instance_id
stringlengths 10
57
| base_commit
stringlengths 40
40
| created_at
stringdate 2014-04-30 14:58:36
2025-04-30 20:14:11
| environment_setup_commit
stringlengths 40
40
| hints_text
stringlengths 0
273k
| patch
stringlengths 251
7.06M
| problem_statement
stringlengths 11
52.5k
| repo
stringlengths 7
53
| test_patch
stringlengths 231
997k
| meta
dict | version
stringclasses 851
values | install_config
dict | requirements
stringlengths 93
34.2k
⌀ | environment
stringlengths 760
20.5k
⌀ | FAIL_TO_PASS
listlengths 1
9.39k
| FAIL_TO_FAIL
listlengths 0
2.69k
| PASS_TO_PASS
listlengths 0
7.87k
| PASS_TO_FAIL
listlengths 0
192
| license_name
stringclasses 55
values | __index_level_0__
int64 0
21.4k
| before_filepaths
listlengths 1
105
| after_filepaths
listlengths 1
105
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dask__dask-2466 | 37c3ae2e091412f5e2fdf3c957c383e89e4c8bb2 | 2017-06-16 14:00:21 | a18308c6384f2a8cd7c0c50749a20bd067842ef9 | jcrist: > Unsure how to test this
Any way to test using moto, and skip if moto not installed?
If this is hard to test, I'd also be fine merging without.
martindurant: Yeah, moto doesn't disallow writing, even when anonymous, so I don't know what parameters I could pass to s3fs that would have any effect, where as local writes take no parameters at all. | diff --git a/dask/array/fft.py b/dask/array/fft.py
index cfccda548..2f64c94ee 100644
--- a/dask/array/fft.py
+++ b/dask/array/fft.py
@@ -228,9 +228,9 @@ def _fftfreq_helper(n, d=1.0, chunks=None, real=False):
s = n // 2 + 1 if real else n
t = l - s
- chunks = _normalize_chunks(chunks, (s,))[0] + (t,)
+ chunks = _normalize_chunks(chunks, (s,))
- r = _linspace(0, 1, l, chunks=chunks)
+ r = _linspace(0, 1, l, chunks=(chunks[0] + (t,),))
if real:
n_2 = n // 2 + 1
@@ -239,6 +239,9 @@ def _fftfreq_helper(n, d=1.0, chunks=None, real=False):
n_2 = (n + 1) // 2
r = _concatenate([r[:n_2], r[n_2:-1] - 1])
+ if r.chunks != chunks:
+ r = r.rechunk(chunks)
+
r /= d
return r
diff --git a/dask/bag/core.py b/dask/bag/core.py
index 1cb43c01b..6cc7a8ab6 100644
--- a/dask/bag/core.py
+++ b/dask/bag/core.py
@@ -118,7 +118,8 @@ def optimize(dsk, keys, fuse_keys=None, rename_fused_keys=True, **kwargs):
def to_textfiles(b, path, name_function=None, compression='infer',
- encoding=system_encoding, compute=True, get=None):
+ encoding=system_encoding, compute=True, get=None,
+ storage_options=None):
""" Write bag to disk, one filename per partition, one line per element
**Paths**: This will create one file for each partition in your bag. You
@@ -174,7 +175,7 @@ def to_textfiles(b, path, name_function=None, compression='infer',
"""
from dask import delayed
writes = write_bytes(b.to_delayed(), path, name_function, compression,
- encoding=encoding)
+ encoding=encoding, **(storage_options or {}))
# Use Bag optimizations on these delayed objects
dsk = ensure_dict(delayed(writes).dask)
@@ -642,9 +643,10 @@ class Bag(Base):
@wraps(to_textfiles)
def to_textfiles(self, path, name_function=None, compression='infer',
- encoding=system_encoding, compute=True, get=None):
+ encoding=system_encoding, compute=True, get=None,
+ storage_options=None):
return to_textfiles(self, path, name_function, compression, encoding,
- compute, get=get)
+ compute, get=get, storage_options=storage_options)
def fold(self, binop, combine=None, initial=no_default, split_every=None):
""" Parallelizable reduction
diff --git a/dask/dataframe/io/csv.py b/dask/dataframe/io/csv.py
index 9ceaad2e2..c999aa414 100644
--- a/dask/dataframe/io/csv.py
+++ b/dask/dataframe/io/csv.py
@@ -364,7 +364,7 @@ def _to_csv_chunk(df, **kwargs):
def to_csv(df, filename, name_function=None, compression=None, compute=True,
- get=None, **kwargs):
+ get=None, storage_options=None, **kwargs):
"""
Store Dask DataFrame to CSV files
@@ -472,10 +472,12 @@ def to_csv(df, filename, name_function=None, compression=None, compute=True,
decimal: string, default '.'
Character recognized as decimal separator. E.g. use ',' for
European data
+ storage_options: dict
+ Parameters passed on to the backend filesystem class.
"""
values = [_to_csv_chunk(d, **kwargs) for d in df.to_delayed()]
values = write_bytes(values, filename, name_function, compression,
- encoding=None)
+ encoding=None, **(storage_options or {}))
if compute:
delayed(values).compute(get=get)
| Can't specify storage_options for bag.to_textfiles, dataframe.to_csv
The read functions allow specifying `storage_options` for things like AWS key/secret. The `to_parquet` function allows this as well.
However in other cases such as `bag.to_textfiles()` and `dataframe.to_csv()`, it is not possible to specify these options. It would be nice for there to be consistency, since there is no other way to specify some of the options at this time. | dask/dask | diff --git a/dask/array/tests/test_fft.py b/dask/array/tests/test_fft.py
index c02b7fefd..ab6de9560 100644
--- a/dask/array/tests/test_fft.py
+++ b/dask/array/tests/test_fft.py
@@ -9,6 +9,10 @@ import dask.array.fft
from dask.array.fft import fft_wrap
from dask.array.utils import assert_eq
+from dask.array.core import (
+ normalize_chunks as _normalize_chunks,
+)
+
def same_keys(a, b):
def key(k):
@@ -202,7 +206,13 @@ def test_wrap_fftns(modname, funcname, dtype):
@pytest.mark.parametrize("c", [lambda m: m, lambda m: (1, m - 1)])
def test_fftfreq(n, d, c):
c = c(n)
- assert_eq(da.fft.fftfreq(n, d, chunks=c), np.fft.fftfreq(n, d))
+
+ r1 = np.fft.fftfreq(n, d)
+ r2 = da.fft.fftfreq(n, d, chunks=c)
+
+ assert _normalize_chunks(c, r2.shape) == r2.chunks
+
+ assert_eq(r1, r2)
@pytest.mark.parametrize("n", [1, 2, 3, 6, 7])
@@ -210,9 +220,13 @@ def test_fftfreq(n, d, c):
@pytest.mark.parametrize("c", [lambda m: m // 2 + 1, lambda m: (1, m // 2)])
def test_rfftfreq(n, d, c):
c = c(n)
- assert_eq(
- da.fft.rfftfreq(n, d, chunks=c), np.fft.rfftfreq(n, d)
- )
+
+ r1 = np.fft.rfftfreq(n, d)
+ r2 = da.fft.rfftfreq(n, d, chunks=c)
+
+ assert _normalize_chunks(c, r2.shape) == r2.chunks
+
+ assert_eq(r1, r2)
@pytest.mark.parametrize("funcname", ["fftshift", "ifftshift"])
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 3
} | 1.17 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[complete]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-xdist"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | aiobotocore==2.1.2
aiohttp==3.8.6
aioitertools==0.11.0
aiosignal==1.2.0
async-timeout==4.0.2
asynctest==0.13.0
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
botocore==1.23.24
certifi==2021.5.30
charset-normalizer==3.0.1
click==8.0.4
cloudpickle==2.2.1
-e git+https://github.com/dask/dask.git@37c3ae2e091412f5e2fdf3c957c383e89e4c8bb2#egg=dask
distributed==1.19.3
execnet==1.9.0
frozenlist==1.2.0
fsspec==2022.1.0
HeapDict==1.0.1
idna==3.10
idna-ssl==1.1.0
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
jmespath==0.10.0
locket==1.0.0
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
msgpack-python==0.5.6
multidict==5.2.0
numpy==1.19.5
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pandas==1.1.5
partd==1.2.0
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
psutil==7.0.0
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-xdist==3.0.2
python-dateutil==2.9.0.post0
pytz==2025.2
s3fs==2022.1.0
six==1.17.0
sortedcontainers==2.4.0
tblib==1.7.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
toolz==0.12.0
tornado==6.1
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
urllib3==1.26.20
wrapt==1.16.0
yarl==1.7.2
zict==2.1.0
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: dask
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- aiobotocore==2.1.2
- aiohttp==3.8.6
- aioitertools==0.11.0
- aiosignal==1.2.0
- async-timeout==4.0.2
- asynctest==0.13.0
- botocore==1.23.24
- charset-normalizer==3.0.1
- click==8.0.4
- cloudpickle==2.2.1
- distributed==1.19.3
- execnet==1.9.0
- frozenlist==1.2.0
- fsspec==2022.1.0
- heapdict==1.0.1
- idna==3.10
- idna-ssl==1.1.0
- jmespath==0.10.0
- locket==1.0.0
- msgpack-python==0.5.6
- multidict==5.2.0
- numpy==1.19.5
- pandas==1.1.5
- partd==1.2.0
- psutil==7.0.0
- pytest-xdist==3.0.2
- python-dateutil==2.9.0.post0
- pytz==2025.2
- s3fs==2022.1.0
- six==1.17.0
- sortedcontainers==2.4.0
- tblib==1.7.0
- toolz==0.12.0
- tornado==6.1
- urllib3==1.26.20
- wrapt==1.16.0
- yarl==1.7.2
- zict==2.1.0
prefix: /opt/conda/envs/dask
| [
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>0-1.0-1]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>0-1.0-2]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>0-1.0-3]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>0-1.0-6]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>0-1.0-7]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>0-0.5-1]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>0-0.5-2]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>0-0.5-3]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>0-0.5-6]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>0-0.5-7]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>0-6.283185307179586-1]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>0-6.283185307179586-2]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>0-6.283185307179586-3]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>0-6.283185307179586-6]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>0-6.283185307179586-7]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>1-1.0-1]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>1-0.5-1]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>1-6.283185307179586-1]"
]
| []
| [
"dask/array/tests/test_fft.py::test_cant_fft_chunked_axis[fft]",
"dask/array/tests/test_fft.py::test_cant_fft_chunked_axis[ifft]",
"dask/array/tests/test_fft.py::test_cant_fft_chunked_axis[rfft]",
"dask/array/tests/test_fft.py::test_cant_fft_chunked_axis[irfft]",
"dask/array/tests/test_fft.py::test_cant_fft_chunked_axis[hfft]",
"dask/array/tests/test_fft.py::test_cant_fft_chunked_axis[ihfft]",
"dask/array/tests/test_fft.py::test_fft[fft]",
"dask/array/tests/test_fft.py::test_fft[ifft]",
"dask/array/tests/test_fft.py::test_fft[rfft]",
"dask/array/tests/test_fft.py::test_fft[irfft]",
"dask/array/tests/test_fft.py::test_fft[hfft]",
"dask/array/tests/test_fft.py::test_fft[ihfft]",
"dask/array/tests/test_fft.py::test_fft2n_shapes[fft2]",
"dask/array/tests/test_fft.py::test_fft2n_shapes[ifft2]",
"dask/array/tests/test_fft.py::test_fft2n_shapes[fftn]",
"dask/array/tests/test_fft.py::test_fft2n_shapes[ifftn]",
"dask/array/tests/test_fft.py::test_fft2n_shapes[rfft2]",
"dask/array/tests/test_fft.py::test_fft2n_shapes[irfft2]",
"dask/array/tests/test_fft.py::test_fft2n_shapes[rfftn]",
"dask/array/tests/test_fft.py::test_fft2n_shapes[irfftn]",
"dask/array/tests/test_fft.py::test_fft_n_kwarg[fft]",
"dask/array/tests/test_fft.py::test_fft_n_kwarg[ifft]",
"dask/array/tests/test_fft.py::test_fft_n_kwarg[rfft]",
"dask/array/tests/test_fft.py::test_fft_n_kwarg[irfft]",
"dask/array/tests/test_fft.py::test_fft_n_kwarg[hfft]",
"dask/array/tests/test_fft.py::test_fft_n_kwarg[ihfft]",
"dask/array/tests/test_fft.py::test_fft_consistent_names[fft]",
"dask/array/tests/test_fft.py::test_fft_consistent_names[ifft]",
"dask/array/tests/test_fft.py::test_fft_consistent_names[rfft]",
"dask/array/tests/test_fft.py::test_fft_consistent_names[irfft]",
"dask/array/tests/test_fft.py::test_fft_consistent_names[hfft]",
"dask/array/tests/test_fft.py::test_fft_consistent_names[ihfft]",
"dask/array/tests/test_fft.py::test_wrap_bad_kind",
"dask/array/tests/test_fft.py::test_nd_ffts_axes[float32-fft2]",
"dask/array/tests/test_fft.py::test_nd_ffts_axes[float32-ifft2]",
"dask/array/tests/test_fft.py::test_nd_ffts_axes[float32-fftn]",
"dask/array/tests/test_fft.py::test_nd_ffts_axes[float32-ifftn]",
"dask/array/tests/test_fft.py::test_nd_ffts_axes[float32-rfft2]",
"dask/array/tests/test_fft.py::test_nd_ffts_axes[float32-irfft2]",
"dask/array/tests/test_fft.py::test_nd_ffts_axes[float32-rfftn]",
"dask/array/tests/test_fft.py::test_nd_ffts_axes[float32-irfftn]",
"dask/array/tests/test_fft.py::test_nd_ffts_axes[float64-fft2]",
"dask/array/tests/test_fft.py::test_nd_ffts_axes[float64-ifft2]",
"dask/array/tests/test_fft.py::test_nd_ffts_axes[float64-fftn]",
"dask/array/tests/test_fft.py::test_nd_ffts_axes[float64-ifftn]",
"dask/array/tests/test_fft.py::test_nd_ffts_axes[float64-rfft2]",
"dask/array/tests/test_fft.py::test_nd_ffts_axes[float64-irfft2]",
"dask/array/tests/test_fft.py::test_nd_ffts_axes[float64-rfftn]",
"dask/array/tests/test_fft.py::test_nd_ffts_axes[float64-irfftn]",
"dask/array/tests/test_fft.py::test_wrap_ffts[float32-fft-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_ffts[float32-ifft-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_ffts[float32-rfft-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_ffts[float32-irfft-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_ffts[float32-hfft-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_ffts[float32-ihfft-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_ffts[float64-fft-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_ffts[float64-ifft-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_ffts[float64-rfft-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_ffts[float64-irfft-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_ffts[float64-hfft-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_ffts[float64-ihfft-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_fftns[float32-fft2-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_fftns[float32-ifft2-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_fftns[float32-fftn-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_fftns[float32-ifftn-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_fftns[float32-rfft2-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_fftns[float32-irfft2-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_fftns[float32-rfftn-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_fftns[float32-irfftn-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_fftns[float64-fft2-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_fftns[float64-ifft2-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_fftns[float64-fftn-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_fftns[float64-ifftn-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_fftns[float64-rfft2-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_fftns[float64-irfft2-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_fftns[float64-rfftn-numpy.fft]",
"dask/array/tests/test_fft.py::test_wrap_fftns[float64-irfftn-numpy.fft]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>1-1.0-2]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>1-1.0-3]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>1-1.0-6]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>1-1.0-7]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>1-0.5-2]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>1-0.5-3]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>1-0.5-6]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>1-0.5-7]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>1-6.283185307179586-2]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>1-6.283185307179586-3]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>1-6.283185307179586-6]",
"dask/array/tests/test_fft.py::test_fftfreq[<lambda>1-6.283185307179586-7]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>0-1.0-1]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>0-1.0-2]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>0-1.0-3]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>0-1.0-6]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>0-1.0-7]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>0-0.5-1]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>0-0.5-2]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>0-0.5-3]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>0-0.5-6]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>0-0.5-7]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>0-6.283185307179586-1]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>0-6.283185307179586-2]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>0-6.283185307179586-3]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>0-6.283185307179586-6]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>0-6.283185307179586-7]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>1-1.0-1]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>1-1.0-2]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>1-1.0-3]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>1-1.0-6]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>1-1.0-7]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>1-0.5-1]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>1-0.5-2]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>1-0.5-3]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>1-0.5-6]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>1-0.5-7]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>1-6.283185307179586-1]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>1-6.283185307179586-2]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>1-6.283185307179586-3]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>1-6.283185307179586-6]",
"dask/array/tests/test_fft.py::test_rfftfreq[<lambda>1-6.283185307179586-7]",
"dask/array/tests/test_fft.py::test_fftshift[None-fftshift]",
"dask/array/tests/test_fft.py::test_fftshift[None-ifftshift]",
"dask/array/tests/test_fft.py::test_fftshift[0-fftshift]",
"dask/array/tests/test_fft.py::test_fftshift[0-ifftshift]",
"dask/array/tests/test_fft.py::test_fftshift[1-fftshift]",
"dask/array/tests/test_fft.py::test_fftshift[1-ifftshift]",
"dask/array/tests/test_fft.py::test_fftshift[2-fftshift]",
"dask/array/tests/test_fft.py::test_fftshift[2-ifftshift]",
"dask/array/tests/test_fft.py::test_fftshift[axes4-fftshift]",
"dask/array/tests/test_fft.py::test_fftshift[axes4-ifftshift]",
"dask/array/tests/test_fft.py::test_fftshift[axes5-fftshift]",
"dask/array/tests/test_fft.py::test_fftshift[axes5-ifftshift]",
"dask/array/tests/test_fft.py::test_fftshift[axes6-fftshift]",
"dask/array/tests/test_fft.py::test_fftshift[axes6-ifftshift]",
"dask/array/tests/test_fft.py::test_fftshift[axes7-fftshift]",
"dask/array/tests/test_fft.py::test_fftshift[axes7-ifftshift]",
"dask/array/tests/test_fft.py::test_fftshift_identity[None-fftshift-ifftshift]",
"dask/array/tests/test_fft.py::test_fftshift_identity[None-ifftshift-fftshift]",
"dask/array/tests/test_fft.py::test_fftshift_identity[0-fftshift-ifftshift]",
"dask/array/tests/test_fft.py::test_fftshift_identity[0-ifftshift-fftshift]",
"dask/array/tests/test_fft.py::test_fftshift_identity[1-fftshift-ifftshift]",
"dask/array/tests/test_fft.py::test_fftshift_identity[1-ifftshift-fftshift]",
"dask/array/tests/test_fft.py::test_fftshift_identity[2-fftshift-ifftshift]",
"dask/array/tests/test_fft.py::test_fftshift_identity[2-ifftshift-fftshift]",
"dask/array/tests/test_fft.py::test_fftshift_identity[axes4-fftshift-ifftshift]",
"dask/array/tests/test_fft.py::test_fftshift_identity[axes4-ifftshift-fftshift]",
"dask/array/tests/test_fft.py::test_fftshift_identity[axes5-fftshift-ifftshift]",
"dask/array/tests/test_fft.py::test_fftshift_identity[axes5-ifftshift-fftshift]",
"dask/array/tests/test_fft.py::test_fftshift_identity[axes6-fftshift-ifftshift]",
"dask/array/tests/test_fft.py::test_fftshift_identity[axes6-ifftshift-fftshift]",
"dask/array/tests/test_fft.py::test_fftshift_identity[axes7-fftshift-ifftshift]",
"dask/array/tests/test_fft.py::test_fftshift_identity[axes7-ifftshift-fftshift]"
]
| []
| BSD 3-Clause "New" or "Revised" License | 1,380 | [
"dask/array/fft.py",
"dask/bag/core.py",
"dask/dataframe/io/csv.py"
]
| [
"dask/array/fft.py",
"dask/bag/core.py",
"dask/dataframe/io/csv.py"
]
|
dask__dask-2467 | ecdae84aa3b8ee5408876b0b7d7eade3d127e177 | 2017-06-16 16:29:49 | c560965c8fc0da7cbc0920d43b7011d2721307d3 | mrocklin: This looks good to me. I was surprised to see that serializing `(slice, [...])` was as fast (or faster) as serializing `slice(...)` | diff --git a/dask/dataframe/io/io.py b/dask/dataframe/io/io.py
index 93b47bb3c..5c82683aa 100644
--- a/dask/dataframe/io/io.py
+++ b/dask/dataframe/io/io.py
@@ -343,7 +343,7 @@ def dataframe_from_ctable(x, slc, columns=None, categories=None, lock=lock):
def from_dask_array(x, columns=None):
- """ Create Dask Array from a Dask DataFrame
+ """ Create a Dask DataFrame from a Dask Array.
Converts a 2d array into a DataFrame and a 1d array into a Series.
diff --git a/dask/delayed.py b/dask/delayed.py
index a9cd531e0..8f0943d0d 100644
--- a/dask/delayed.py
+++ b/dask/delayed.py
@@ -64,26 +64,33 @@ def to_task_dask(expr):
"""
if isinstance(expr, Delayed):
return expr.key, expr.dask
+
if isinstance(expr, base.Base):
name = 'finalize-' + tokenize(expr, pure=True)
keys = expr._keys()
dsk = expr._optimize(ensure_dict(expr.dask), keys)
dsk[name] = (expr._finalize, (concrete, keys))
return name, dsk
- if isinstance(expr, tuple) and type(expr) != tuple:
- return expr, {}
- if isinstance(expr, (Iterator, list, tuple, set)):
+
+ if isinstance(expr, Iterator):
+ expr = list(expr)
+ typ = type(expr)
+
+ if typ in (list, tuple, set):
args, dasks = unzip((to_task_dask(e) for e in expr), 2)
args = list(args)
dsk = sharedict.merge(*dasks)
# Ensure output type matches input type
- if isinstance(expr, (tuple, set)):
- return (type(expr), args), dsk
- else:
- return args, dsk
- if type(expr) is dict:
+ return (args, dsk) if typ is list else ((typ, args), dsk)
+
+ if typ is dict:
args, dsk = to_task_dask([[k, v] for k, v in expr.items()])
return (dict, args), dsk
+
+ if typ is slice:
+ args, dsk = to_task_dask([expr.start, expr.stop, expr.step])
+ return (slice,) + tuple(args), dsk
+
return expr, {}
| Slicing delayed objects with delayed objects does not work
I'm trying to wrap my code with delayed, using `dask 0.15.0`, `python 3.6.0` and `numpy 1.12.1`, but I stumbled on an exception.
```
from dask import delayed
import numpy as np
a = delayed(np.arange(10))
b = delayed(2)
a[:b].compute()
```
results in: `TypeError: slice indices must be integers or None or have an __index__ method`
```
a[delayed(slice)(None,b)].compute()
```
Works, even only calling `slice(None,b)` gives no error, but then again `a[slice(None,b)]` gives an identical error. Isn't it to be expected that a delayed object would wrap its own slice objects when sliced?
| dask/dask | diff --git a/dask/tests/test_delayed.py b/dask/tests/test_delayed.py
index 837e739a4..8c89cca23 100644
--- a/dask/tests/test_delayed.py
+++ b/dask/tests/test_delayed.py
@@ -1,4 +1,4 @@
-from collections import Iterator, namedtuple
+from collections import namedtuple
from operator import add, setitem
import pickle
from random import random
@@ -32,6 +32,10 @@ def test_to_task_dask():
assert task == x
assert dict(dask) == {}
+ task, dask = to_task_dask(slice(a, b, 3))
+ assert task == (slice, 'a', 'b', 3)
+ assert dict(dask) == merge(a.dask, b.dask)
+
# Issue https://github.com/dask/dask/issues/2107
class MyClass(dict):
pass
@@ -58,6 +62,8 @@ def test_operators():
a = delayed([1, 2, 3])
assert a[0].compute() == 1
assert (a + a).compute() == [1, 2, 3, 1, 2, 3]
+ b = delayed(2)
+ assert a[:b].compute() == [1, 2]
a = delayed(10)
assert (a + 1).compute() == 11
@@ -154,7 +160,6 @@ def test_lists_are_concrete():
assert c.compute() == 20
[email protected]
def test_iterators():
a = delayed(1)
b = delayed(2)
@@ -163,7 +168,6 @@ def test_iterators():
assert c.compute() == 3
def f(seq):
- assert isinstance(seq, Iterator)
return sum(seq)
c = delayed(f)(iter([a, b]))
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 2
} | 0.15 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[complete]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | aiobotocore==2.1.2
aiohttp==3.8.6
aioitertools==0.11.0
aiosignal==1.2.0
async-timeout==4.0.2
asynctest==0.13.0
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
botocore==1.23.24
certifi==2021.5.30
charset-normalizer==3.0.1
click==8.0.4
cloudpickle==2.2.1
coverage==6.2
-e git+https://github.com/dask/dask.git@ecdae84aa3b8ee5408876b0b7d7eade3d127e177#egg=dask
distributed==1.19.3
execnet==1.9.0
frozenlist==1.2.0
fsspec==2022.1.0
HeapDict==1.0.1
idna==3.10
idna-ssl==1.1.0
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
jmespath==0.10.0
locket==1.0.0
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
msgpack-python==0.5.6
multidict==5.2.0
numpy==1.19.5
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pandas==1.1.5
partd==1.2.0
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
psutil==7.0.0
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-asyncio==0.16.0
pytest-cov==4.0.0
pytest-mock==3.6.1
pytest-xdist==3.0.2
python-dateutil==2.9.0.post0
pytz==2025.2
s3fs==2022.1.0
six==1.17.0
sortedcontainers==2.4.0
tblib==1.7.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tomli==1.2.3
toolz==0.12.0
tornado==6.1
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
urllib3==1.26.20
wrapt==1.16.0
yarl==1.7.2
zict==2.1.0
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: dask
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- aiobotocore==2.1.2
- aiohttp==3.8.6
- aioitertools==0.11.0
- aiosignal==1.2.0
- async-timeout==4.0.2
- asynctest==0.13.0
- botocore==1.23.24
- charset-normalizer==3.0.1
- click==8.0.4
- cloudpickle==2.2.1
- coverage==6.2
- distributed==1.19.3
- execnet==1.9.0
- frozenlist==1.2.0
- fsspec==2022.1.0
- heapdict==1.0.1
- idna==3.10
- idna-ssl==1.1.0
- jmespath==0.10.0
- locket==1.0.0
- msgpack-python==0.5.6
- multidict==5.2.0
- numpy==1.19.5
- pandas==1.1.5
- partd==1.2.0
- psutil==7.0.0
- pytest-asyncio==0.16.0
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- pytest-xdist==3.0.2
- python-dateutil==2.9.0.post0
- pytz==2025.2
- s3fs==2022.1.0
- six==1.17.0
- sortedcontainers==2.4.0
- tblib==1.7.0
- tomli==1.2.3
- toolz==0.12.0
- tornado==6.1
- urllib3==1.26.20
- wrapt==1.16.0
- yarl==1.7.2
- zict==2.1.0
prefix: /opt/conda/envs/dask
| [
"dask/tests/test_delayed.py::test_to_task_dask",
"dask/tests/test_delayed.py::test_operators"
]
| []
| [
"dask/tests/test_delayed.py::test_delayed",
"dask/tests/test_delayed.py::test_methods",
"dask/tests/test_delayed.py::test_attributes",
"dask/tests/test_delayed.py::test_method_getattr_optimize",
"dask/tests/test_delayed.py::test_delayed_errors",
"dask/tests/test_delayed.py::test_common_subexpressions",
"dask/tests/test_delayed.py::test_lists",
"dask/tests/test_delayed.py::test_literates",
"dask/tests/test_delayed.py::test_literates_keys",
"dask/tests/test_delayed.py::test_lists_are_concrete",
"dask/tests/test_delayed.py::test_iterators",
"dask/tests/test_delayed.py::test_traverse_false",
"dask/tests/test_delayed.py::test_pure",
"dask/tests/test_delayed.py::test_pure_global_setting",
"dask/tests/test_delayed.py::test_nout",
"dask/tests/test_delayed.py::test_kwargs",
"dask/tests/test_delayed.py::test_array_delayed",
"dask/tests/test_delayed.py::test_array_bag_delayed",
"dask/tests/test_delayed.py::test_delayed_picklable",
"dask/tests/test_delayed.py::test_delayed_compute_forward_kwargs",
"dask/tests/test_delayed.py::test_delayed_method_descriptor",
"dask/tests/test_delayed.py::test_delayed_callable",
"dask/tests/test_delayed.py::test_delayed_name_on_call",
"dask/tests/test_delayed.py::test_callable_obj",
"dask/tests/test_delayed.py::test_name_consistent_across_instances",
"dask/tests/test_delayed.py::test_sensitive_to_partials",
"dask/tests/test_delayed.py::test_delayed_name",
"dask/tests/test_delayed.py::test_finalize_name"
]
| []
| BSD 3-Clause "New" or "Revised" License | 1,381 | [
"dask/delayed.py",
"dask/dataframe/io/io.py"
]
| [
"dask/delayed.py",
"dask/dataframe/io/io.py"
]
|
dask__dask-2468 | ecdae84aa3b8ee5408876b0b7d7eade3d127e177 | 2017-06-16 16:53:44 | c560965c8fc0da7cbc0920d43b7011d2721307d3 | diff --git a/dask/bag/core.py b/dask/bag/core.py
index aed4e6ca2..1cb43c01b 100644
--- a/dask/bag/core.py
+++ b/dask/bag/core.py
@@ -1987,5 +1987,5 @@ def split(seq, n):
def to_dataframe(seq, columns, dtypes):
import pandas as pd
- res = pd.DataFrame(seq, columns=list(columns))
+ res = pd.DataFrame(reify(seq), columns=list(columns))
return res.astype(dtypes, copy=False)
diff --git a/dask/dataframe/io/io.py b/dask/dataframe/io/io.py
index 93b47bb3c..5c82683aa 100644
--- a/dask/dataframe/io/io.py
+++ b/dask/dataframe/io/io.py
@@ -343,7 +343,7 @@ def dataframe_from_ctable(x, slc, columns=None, categories=None, lock=lock):
def from_dask_array(x, columns=None):
- """ Create Dask Array from a Dask DataFrame
+ """ Create a Dask DataFrame from a Dask Array.
Converts a 2d array into a DataFrame and a 1d array into a Series.
diff --git a/dask/delayed.py b/dask/delayed.py
index a9cd531e0..8f0943d0d 100644
--- a/dask/delayed.py
+++ b/dask/delayed.py
@@ -64,26 +64,33 @@ def to_task_dask(expr):
"""
if isinstance(expr, Delayed):
return expr.key, expr.dask
+
if isinstance(expr, base.Base):
name = 'finalize-' + tokenize(expr, pure=True)
keys = expr._keys()
dsk = expr._optimize(ensure_dict(expr.dask), keys)
dsk[name] = (expr._finalize, (concrete, keys))
return name, dsk
- if isinstance(expr, tuple) and type(expr) != tuple:
- return expr, {}
- if isinstance(expr, (Iterator, list, tuple, set)):
+
+ if isinstance(expr, Iterator):
+ expr = list(expr)
+ typ = type(expr)
+
+ if typ in (list, tuple, set):
args, dasks = unzip((to_task_dask(e) for e in expr), 2)
args = list(args)
dsk = sharedict.merge(*dasks)
# Ensure output type matches input type
- if isinstance(expr, (tuple, set)):
- return (type(expr), args), dsk
- else:
- return args, dsk
- if type(expr) is dict:
+ return (args, dsk) if typ is list else ((typ, args), dsk)
+
+ if typ is dict:
args, dsk = to_task_dask([[k, v] for k, v in expr.items()])
return (dict, args), dsk
+
+ if typ is slice:
+ args, dsk = to_task_dask([expr.start, expr.stop, expr.step])
+ return (slice,) + tuple(args), dsk
+
return expr, {}
| Can't convert bag -> dataframe -> bag after flatten()
When I try to use `flatten()/concat()` on a bag and then proceed to convert to dataframe and back, the `pd.DataFrame` constructor fails because the data is passed in as an Iterator. See example below. Commenting out `.flatten()` or `.to_bag()` both cause it to be successful. But as written it causes an exception.
```
import dask.bag as db
(db
.from_sequence(5 * [range(10)])
.flatten()
.to_dataframe()
.to_bag()
.compute()
)
```
```
TypeError Traceback (most recent call last)
<ipython-input-1-31fb669b888e> in <module>()
2
3 (db
----> 4 .from_sequence(5 * [range(10)])
5 .flatten()
6 .to_dataframe()
/Users/lsglick/.virtualenvs/test/lib/python2.7/site-packages/dask/base.pyc in compute(self, **kwargs)
95 Extra keywords to forward to the scheduler ``get`` function.
96 """
---> 97 (result,) = compute(self, traverse=False, **kwargs)
98 return result
99
/Users/lsglick/.virtualenvs/test/lib/python2.7/site-packages/dask/base.pyc in compute(*args, **kwargs)
202 dsk = collections_to_dsk(variables, optimize_graph, **kwargs)
203 keys = [var._keys() for var in variables]
--> 204 results = get(dsk, keys, **kwargs)
205
206 results_iter = iter(results)
/Users/lsglick/.virtualenvs/test/lib/python2.7/site-packages/dask/multiprocessing.pyc in get(dsk, keys, num_workers, func_loads, func_dumps, optimize_graph, **kwargs)
175 get_id=_process_get_id, dumps=dumps, loads=loads,
176 pack_exception=pack_exception,
--> 177 raise_exception=reraise, **kwargs)
178 finally:
179 if cleanup:
/Users/lsglick/.virtualenvs/test/lib/python2.7/site-packages/dask/local.pyc in get_async(apply_async, num_workers, dsk, result, cache, get_id, rerun_exceptions_locally, pack_exception, raise_exception, callbacks, dumps, loads, **kwargs)
519 _execute_task(task, data) # Re-execute locally
520 else:
--> 521 raise_exception(exc, tb)
522 res, worker_id = loads(res_info)
523 state['cache'][key] = res
/Users/lsglick/.virtualenvs/test/lib/python2.7/site-packages/dask/local.pyc in execute_task()
288 try:
289 task, data = loads(task_info)
--> 290 result = _execute_task(task, data)
291 id = get_id()
292 result = dumps((result, id))
/Users/lsglick/.virtualenvs/test/lib/python2.7/site-packages/dask/local.pyc in _execute_task()
268 elif istask(arg):
269 func, args = arg[0], arg[1:]
--> 270 args2 = [_execute_task(a, cache) for a in args]
271 return func(*args2)
272 elif not ishashable(arg):
/Users/lsglick/.virtualenvs/test/lib/python2.7/site-packages/dask/local.pyc in _execute_task()
269 func, args = arg[0], arg[1:]
270 args2 = [_execute_task(a, cache) for a in args]
--> 271 return func(*args2)
272 elif not ishashable(arg):
273 return arg
/Users/lsglick/.virtualenvs/test/lib/python2.7/site-packages/dask/bag/core.pyc in to_dataframe()
1988 def to_dataframe(seq, columns, dtypes):
1989 import pandas as pd
-> 1990 res = pd.DataFrame(seq, columns=list(columns))
1991 return res.astype(dtypes, copy=False)
/Users/lsglick/.virtualenvs/test/lib/python2.7/site-packages/pandas/core/frame.pyc in __init__()
323 mgr = self._init_dict({}, index, columns, dtype=dtype)
324 elif isinstance(data, collections.Iterator):
--> 325 raise TypeError("data argument can't be an iterator")
326 else:
327 try:
TypeError: data argument can't be an iterator
```
Versions:
Python: 3.5.2
Pandas: 0.20.2
Dask: 0.15.0
AND
Python: 2.7.10
Pandas: 0.19.2
Dask: 0.15.0 | dask/dask | diff --git a/dask/bag/tests/test_bag.py b/dask/bag/tests/test_bag.py
index bd3679cbe..0053dc0b6 100644
--- a/dask/bag/tests/test_bag.py
+++ b/dask/bag/tests/test_bag.py
@@ -822,6 +822,13 @@ def test_to_dataframe():
dd.utils.assert_eq(df, sol, check_index=False)
check_parts(df, sol)
+ # Works with iterators
+ b = db.from_sequence(range(100), npartitions=5).map_partitions(iter)
+ sol = pd.DataFrame({'a': range(100)})
+ df = b.to_dataframe(columns=sol)
+ dd.utils.assert_eq(df, sol, check_index=False)
+ check_parts(df, sol)
+
ext_open = [('gz', GzipFile), ('', open)]
if not PY2:
diff --git a/dask/tests/test_delayed.py b/dask/tests/test_delayed.py
index 837e739a4..8c89cca23 100644
--- a/dask/tests/test_delayed.py
+++ b/dask/tests/test_delayed.py
@@ -1,4 +1,4 @@
-from collections import Iterator, namedtuple
+from collections import namedtuple
from operator import add, setitem
import pickle
from random import random
@@ -32,6 +32,10 @@ def test_to_task_dask():
assert task == x
assert dict(dask) == {}
+ task, dask = to_task_dask(slice(a, b, 3))
+ assert task == (slice, 'a', 'b', 3)
+ assert dict(dask) == merge(a.dask, b.dask)
+
# Issue https://github.com/dask/dask/issues/2107
class MyClass(dict):
pass
@@ -58,6 +62,8 @@ def test_operators():
a = delayed([1, 2, 3])
assert a[0].compute() == 1
assert (a + a).compute() == [1, 2, 3, 1, 2, 3]
+ b = delayed(2)
+ assert a[:b].compute() == [1, 2]
a = delayed(10)
assert (a + 1).compute() == 11
@@ -154,7 +160,6 @@ def test_lists_are_concrete():
assert c.compute() == 20
[email protected]
def test_iterators():
a = delayed(1)
b = delayed(2)
@@ -163,7 +168,6 @@ def test_iterators():
assert c.compute() == 3
def f(seq):
- assert isinstance(seq, Iterator)
return sum(seq)
c = delayed(f)(iter([a, b]))
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 3
} | 0.15 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[complete]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"moto",
"mock"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | aiobotocore==2.1.2
aiohttp==3.8.6
aioitertools==0.11.0
aiosignal==1.2.0
async-timeout==4.0.2
asynctest==0.13.0
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
boto3==1.23.10
botocore==1.23.24
certifi==2021.5.30
cffi==1.15.1
charset-normalizer==2.0.12
click==8.0.4
cloudpickle==2.2.1
cryptography==40.0.2
-e git+https://github.com/dask/dask.git@ecdae84aa3b8ee5408876b0b7d7eade3d127e177#egg=dask
dataclasses==0.8
distributed==1.19.3
frozenlist==1.2.0
fsspec==2022.1.0
HeapDict==1.0.1
idna==3.10
idna-ssl==1.1.0
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
Jinja2==3.0.3
jmespath==0.10.0
locket==1.0.0
MarkupSafe==2.0.1
mock==5.2.0
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
moto==4.0.13
msgpack-python==0.5.6
multidict==5.2.0
numpy==1.19.5
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pandas==1.1.5
partd==1.2.0
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
psutil==7.0.0
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pycparser==2.21
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
python-dateutil==2.9.0.post0
pytz==2025.2
requests==2.27.1
responses==0.17.0
s3fs==2022.1.0
s3transfer==0.5.2
six==1.17.0
sortedcontainers==2.4.0
tblib==1.7.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
toolz==0.12.0
tornado==6.1
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
urllib3==1.26.20
Werkzeug==2.0.3
wrapt==1.16.0
xmltodict==0.14.2
yarl==1.7.2
zict==2.1.0
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: dask
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- aiobotocore==2.1.2
- aiohttp==3.8.6
- aioitertools==0.11.0
- aiosignal==1.2.0
- async-timeout==4.0.2
- asynctest==0.13.0
- boto3==1.23.10
- botocore==1.23.24
- cffi==1.15.1
- charset-normalizer==2.0.12
- click==8.0.4
- cloudpickle==2.2.1
- cryptography==40.0.2
- dataclasses==0.8
- distributed==1.19.3
- frozenlist==1.2.0
- fsspec==2022.1.0
- heapdict==1.0.1
- idna==3.10
- idna-ssl==1.1.0
- jinja2==3.0.3
- jmespath==0.10.0
- locket==1.0.0
- markupsafe==2.0.1
- mock==5.2.0
- moto==4.0.13
- msgpack-python==0.5.6
- multidict==5.2.0
- numpy==1.19.5
- pandas==1.1.5
- partd==1.2.0
- psutil==7.0.0
- pycparser==2.21
- python-dateutil==2.9.0.post0
- pytz==2025.2
- requests==2.27.1
- responses==0.17.0
- s3fs==2022.1.0
- s3transfer==0.5.2
- six==1.17.0
- sortedcontainers==2.4.0
- tblib==1.7.0
- toolz==0.12.0
- tornado==6.1
- urllib3==1.26.20
- werkzeug==2.0.3
- wrapt==1.16.0
- xmltodict==0.14.2
- yarl==1.7.2
- zict==2.1.0
prefix: /opt/conda/envs/dask
| [
"dask/tests/test_delayed.py::test_to_task_dask",
"dask/tests/test_delayed.py::test_operators"
]
| [
"dask/bag/tests/test_bag.py::test_to_dataframe"
]
| [
"dask/bag/tests/test_bag.py::test_Bag",
"dask/bag/tests/test_bag.py::test_keys",
"dask/bag/tests/test_bag.py::test_map",
"dask/bag/tests/test_bag.py::test_map_function_with_multiple_arguments",
"dask/bag/tests/test_bag.py::test_map_with_constructors",
"dask/bag/tests/test_bag.py::test_map_with_builtins",
"dask/bag/tests/test_bag.py::test_map_with_kwargs",
"dask/bag/tests/test_bag.py::test_bag_map",
"dask/bag/tests/test_bag.py::test_map_method",
"dask/bag/tests/test_bag.py::test_starmap",
"dask/bag/tests/test_bag.py::test_filter",
"dask/bag/tests/test_bag.py::test_remove",
"dask/bag/tests/test_bag.py::test_iter",
"dask/bag/tests/test_bag.py::test_repr[str]",
"dask/bag/tests/test_bag.py::test_repr[repr]",
"dask/bag/tests/test_bag.py::test_pluck",
"dask/bag/tests/test_bag.py::test_pluck_with_default",
"dask/bag/tests/test_bag.py::test_unzip",
"dask/bag/tests/test_bag.py::test_fold",
"dask/bag/tests/test_bag.py::test_distinct",
"dask/bag/tests/test_bag.py::test_frequencies",
"dask/bag/tests/test_bag.py::test_topk",
"dask/bag/tests/test_bag.py::test_topk_with_non_callable_key[1]",
"dask/bag/tests/test_bag.py::test_topk_with_non_callable_key[2]",
"dask/bag/tests/test_bag.py::test_topk_with_multiarg_lambda",
"dask/bag/tests/test_bag.py::test_lambdas",
"dask/bag/tests/test_bag.py::test_reductions",
"dask/bag/tests/test_bag.py::test_reduction_names",
"dask/bag/tests/test_bag.py::test_tree_reductions",
"dask/bag/tests/test_bag.py::test_aggregation[1]",
"dask/bag/tests/test_bag.py::test_aggregation[3]",
"dask/bag/tests/test_bag.py::test_aggregation[4]",
"dask/bag/tests/test_bag.py::test_non_splittable_reductions[1]",
"dask/bag/tests/test_bag.py::test_non_splittable_reductions[10]",
"dask/bag/tests/test_bag.py::test_std",
"dask/bag/tests/test_bag.py::test_var",
"dask/bag/tests/test_bag.py::test_join",
"dask/bag/tests/test_bag.py::test_foldby",
"dask/bag/tests/test_bag.py::test_map_partitions",
"dask/bag/tests/test_bag.py::test_map_partitions_args_kwargs",
"dask/bag/tests/test_bag.py::test_random_sample_size",
"dask/bag/tests/test_bag.py::test_random_sample_prob_range",
"dask/bag/tests/test_bag.py::test_random_sample_repeated_computation",
"dask/bag/tests/test_bag.py::test_random_sample_different_definitions",
"dask/bag/tests/test_bag.py::test_random_sample_random_state",
"dask/bag/tests/test_bag.py::test_lazify_task",
"dask/bag/tests/test_bag.py::test_lazify",
"dask/bag/tests/test_bag.py::test_inline_singleton_lists",
"dask/bag/tests/test_bag.py::test_take",
"dask/bag/tests/test_bag.py::test_take_npartitions",
"dask/bag/tests/test_bag.py::test_take_npartitions_warn",
"dask/bag/tests/test_bag.py::test_map_is_lazy",
"dask/bag/tests/test_bag.py::test_can_use_dict_to_make_concrete",
"dask/bag/tests/test_bag.py::test_read_text",
"dask/bag/tests/test_bag.py::test_read_text_large",
"dask/bag/tests/test_bag.py::test_read_text_encoding",
"dask/bag/tests/test_bag.py::test_read_text_large_gzip",
"dask/bag/tests/test_bag.py::test_from_sequence",
"dask/bag/tests/test_bag.py::test_from_long_sequence",
"dask/bag/tests/test_bag.py::test_product",
"dask/bag/tests/test_bag.py::test_partition_collect",
"dask/bag/tests/test_bag.py::test_groupby",
"dask/bag/tests/test_bag.py::test_groupby_with_indexer",
"dask/bag/tests/test_bag.py::test_groupby_with_npartitions_changed",
"dask/bag/tests/test_bag.py::test_concat",
"dask/bag/tests/test_bag.py::test_flatten",
"dask/bag/tests/test_bag.py::test_concat_after_map",
"dask/bag/tests/test_bag.py::test_args",
"dask/bag/tests/test_bag.py::test_to_textfiles[gz-GzipFile]",
"dask/bag/tests/test_bag.py::test_to_textfiles[-open]",
"dask/bag/tests/test_bag.py::test_to_textfiles[bz2-BZ2File]",
"dask/bag/tests/test_bag.py::test_to_textfiles_name_function_preserves_order",
"dask/bag/tests/test_bag.py::test_to_textfiles_name_function_warn",
"dask/bag/tests/test_bag.py::test_to_textfiles_encoding",
"dask/bag/tests/test_bag.py::test_to_textfiles_inputs",
"dask/bag/tests/test_bag.py::test_to_textfiles_endlines",
"dask/bag/tests/test_bag.py::test_string_namespace",
"dask/bag/tests/test_bag.py::test_string_namespace_with_unicode",
"dask/bag/tests/test_bag.py::test_str_empty_split",
"dask/bag/tests/test_bag.py::test_map_with_iterator_function",
"dask/bag/tests/test_bag.py::test_ensure_compute_output_is_concrete",
"dask/bag/tests/test_bag.py::test_bag_class_extend",
"dask/bag/tests/test_bag.py::test_gh715",
"dask/bag/tests/test_bag.py::test_bag_compute_forward_kwargs",
"dask/bag/tests/test_bag.py::test_to_delayed",
"dask/bag/tests/test_bag.py::test_to_delayed_optimizes",
"dask/bag/tests/test_bag.py::test_from_delayed",
"dask/bag/tests/test_bag.py::test_from_delayed_iterator",
"dask/bag/tests/test_bag.py::test_range",
"dask/bag/tests/test_bag.py::test_zip[1]",
"dask/bag/tests/test_bag.py::test_zip[7]",
"dask/bag/tests/test_bag.py::test_zip[10]",
"dask/bag/tests/test_bag.py::test_zip[28]",
"dask/bag/tests/test_bag.py::test_repartition[1-1]",
"dask/bag/tests/test_bag.py::test_repartition[1-2]",
"dask/bag/tests/test_bag.py::test_repartition[1-7]",
"dask/bag/tests/test_bag.py::test_repartition[1-11]",
"dask/bag/tests/test_bag.py::test_repartition[1-23]",
"dask/bag/tests/test_bag.py::test_repartition[2-1]",
"dask/bag/tests/test_bag.py::test_repartition[2-2]",
"dask/bag/tests/test_bag.py::test_repartition[2-7]",
"dask/bag/tests/test_bag.py::test_repartition[2-11]",
"dask/bag/tests/test_bag.py::test_repartition[2-23]",
"dask/bag/tests/test_bag.py::test_repartition[5-1]",
"dask/bag/tests/test_bag.py::test_repartition[5-2]",
"dask/bag/tests/test_bag.py::test_repartition[5-7]",
"dask/bag/tests/test_bag.py::test_repartition[5-11]",
"dask/bag/tests/test_bag.py::test_repartition[5-23]",
"dask/bag/tests/test_bag.py::test_repartition[12-1]",
"dask/bag/tests/test_bag.py::test_repartition[12-2]",
"dask/bag/tests/test_bag.py::test_repartition[12-7]",
"dask/bag/tests/test_bag.py::test_repartition[12-11]",
"dask/bag/tests/test_bag.py::test_repartition[12-23]",
"dask/bag/tests/test_bag.py::test_repartition[23-1]",
"dask/bag/tests/test_bag.py::test_repartition[23-2]",
"dask/bag/tests/test_bag.py::test_repartition[23-7]",
"dask/bag/tests/test_bag.py::test_repartition[23-11]",
"dask/bag/tests/test_bag.py::test_repartition[23-23]",
"dask/bag/tests/test_bag.py::test_repartition_names",
"dask/bag/tests/test_bag.py::test_accumulate",
"dask/bag/tests/test_bag.py::test_groupby_tasks",
"dask/bag/tests/test_bag.py::test_groupby_tasks_names",
"dask/bag/tests/test_bag.py::test_groupby_tasks_2[1000-20-100]",
"dask/bag/tests/test_bag.py::test_groupby_tasks_2[12345-234-1042]",
"dask/bag/tests/test_bag.py::test_groupby_tasks_3",
"dask/bag/tests/test_bag.py::test_to_textfiles_empty_partitions",
"dask/bag/tests/test_bag.py::test_reduction_empty",
"dask/bag/tests/test_bag.py::test_reduction_empty_aggregate[1]",
"dask/bag/tests/test_bag.py::test_reduction_empty_aggregate[2]",
"dask/bag/tests/test_bag.py::test_reduction_empty_aggregate[4]",
"dask/bag/tests/test_bag.py::test_reduction_with_non_comparable_objects",
"dask/bag/tests/test_bag.py::test_empty",
"dask/bag/tests/test_bag.py::test_bag_picklable",
"dask/bag/tests/test_bag.py::test_msgpack_unicode",
"dask/bag/tests/test_bag.py::test_bag_with_single_callable",
"dask/bag/tests/test_bag.py::test_optimize_fuse_keys",
"dask/bag/tests/test_bag.py::test_reductions_are_lazy",
"dask/bag/tests/test_bag.py::test_repeated_groupby",
"dask/bag/tests/test_bag.py::test_temporary_directory",
"dask/bag/tests/test_bag.py::test_empty_bag",
"dask/tests/test_delayed.py::test_delayed",
"dask/tests/test_delayed.py::test_methods",
"dask/tests/test_delayed.py::test_attributes",
"dask/tests/test_delayed.py::test_method_getattr_optimize",
"dask/tests/test_delayed.py::test_delayed_errors",
"dask/tests/test_delayed.py::test_common_subexpressions",
"dask/tests/test_delayed.py::test_lists",
"dask/tests/test_delayed.py::test_literates",
"dask/tests/test_delayed.py::test_literates_keys",
"dask/tests/test_delayed.py::test_lists_are_concrete",
"dask/tests/test_delayed.py::test_iterators",
"dask/tests/test_delayed.py::test_traverse_false",
"dask/tests/test_delayed.py::test_pure",
"dask/tests/test_delayed.py::test_pure_global_setting",
"dask/tests/test_delayed.py::test_nout",
"dask/tests/test_delayed.py::test_kwargs",
"dask/tests/test_delayed.py::test_array_delayed",
"dask/tests/test_delayed.py::test_array_bag_delayed",
"dask/tests/test_delayed.py::test_delayed_picklable",
"dask/tests/test_delayed.py::test_delayed_compute_forward_kwargs",
"dask/tests/test_delayed.py::test_delayed_method_descriptor",
"dask/tests/test_delayed.py::test_delayed_callable",
"dask/tests/test_delayed.py::test_delayed_name_on_call",
"dask/tests/test_delayed.py::test_callable_obj",
"dask/tests/test_delayed.py::test_name_consistent_across_instances",
"dask/tests/test_delayed.py::test_sensitive_to_partials",
"dask/tests/test_delayed.py::test_delayed_name",
"dask/tests/test_delayed.py::test_finalize_name"
]
| []
| BSD 3-Clause "New" or "Revised" License | 1,382 | [
"dask/delayed.py",
"dask/bag/core.py",
"dask/dataframe/io/io.py"
]
| [
"dask/delayed.py",
"dask/bag/core.py",
"dask/dataframe/io/io.py"
]
|
|
rorodata__firefly-15 | 6fb71b3970af79aad79f4793e0d9e3304420dd14 | 2017-06-16 17:46:48 | 6fb71b3970af79aad79f4793e0d9e3304420dd14 | anandology: The argument values are not constants, so they should be in lowercase.
On 3:33PM, Sat, Jun 17, 2017 Nabarun Pal <[email protected]> wrote:
> *@palnabarun* commented on this pull request.
> ------------------------------
>
> In firefly/main.py
> <https://github.com/rorodata/firefly/pull/15#discussion_r122564373>:
>
> >
> def main():
> # ensure current directory is added to sys.path
> if "" not in sys.path:
> sys.path.insert(0, "")
>
> args = parse_args()
> - functions = load_functions(args.functions)
> + print(len(args.functions), args.CONFIG_FILE)
> + if (len(args.functions) > 0 and args.CONFIG_FILE) or (len(args.functions) == 0 and not args.CONFIG_FILE):
> + raise FireflyError("Invalid arguments provided. Please specify either a config file or a list of functions.")
>
> I thought about that. But the current one seemed more verbose to me. Both
> are okay with me.
>
> —
> You are receiving this because you commented.
>
>
> Reply to this email directly, view it on GitHub
> <https://github.com/rorodata/firefly/pull/15#discussion_r122564373>, or mute
> the thread
> <https://github.com/notifications/unsubscribe-auth/AAAdkUVfpndTH0tzinN7_Zo2Z9XKCmLfks5sE5H0gaJpZM4N8vrJ>
> .
>
| diff --git a/firefly/main.py b/firefly/main.py
index dd4a4fb..8db6caf 100644
--- a/firefly/main.py
+++ b/firefly/main.py
@@ -1,30 +1,47 @@
+import os
import sys
import argparse
import importlib
+import yaml
from .app import Firefly
from .server import FireflyServer
+from .validator import ValidationError, FireflyError
def parse_args():
p = argparse.ArgumentParser()
p.add_argument("-b", "--bind", dest="ADDRESS", default="127.0.0.1:8000")
- p.add_argument("functions", nargs='+', help="functions to serve")
+ p.add_argument("-c", "--config", dest="config_file", default=None)
+ p.add_argument("functions", nargs='*', help="functions to serve")
return p.parse_args()
-def load_function(function_spec):
+def load_function(function_spec, path=None, name=None):
if "." not in function_spec:
raise Exception("Invalid function, please specify it as module.function")
mod_name, func_name = function_spec.rsplit(".", 1)
mod = importlib.import_module(mod_name)
func = getattr(mod, func_name)
- return (func_name, func)
+ path = path or "/"+func_name
+ name = name or func_name
+ return (path, name, func)
def load_functions(function_specs):
return [load_function(function_spec) for function_spec in function_specs]
+def parse_config_file(config_file):
+ if not os.path.exists(config_file):
+ raise FireflyError("Specified config file does not exist.")
+ with open(config_file) as f:
+ config_dict = yaml.safe_load(f)
+ return config_dict
+
+def parse_config_data(config_dict):
+ return [(load_function(f["function"], path=f["path"], name=name, ))
+ for name, f in config_dict["functions"].items()]
+
def add_routes(app, functions):
- for name, function in functions:
- app.add_route('/'+name, function, name)
+ for path, name, function in functions:
+ app.add_route(path, function, name)
def main():
# ensure current directory is added to sys.path
@@ -32,7 +49,14 @@ def main():
sys.path.insert(0, "")
args = parse_args()
- functions = load_functions(args.functions)
+
+ if (args.functions and args.CONFIG_FILE) or (not args.functions and not args.CONFIG_FILE):
+ raise FireflyError("Invalid arguments provided. Please specify either a config file or a list of functions.")
+
+ if len(args.functions):
+ functions = load_functions(args.functions)
+ elif args.CONFIG_FILE:
+ functions = parse_config_data(parse_config_file(args.CONFIG_FILE))
app = Firefly()
add_routes(app, functions)
diff --git a/firefly/validator.py b/firefly/validator.py
index b1a2eaf..bb3a477 100644
--- a/firefly/validator.py
+++ b/firefly/validator.py
@@ -8,6 +8,9 @@ else:
class ValidationError(Exception):
pass
+class FireflyError(Exception):
+ pass
+
def validate_args(function, kwargs):
function_signature = signature(function)
try:
diff --git a/requirements.txt b/requirements.txt
index 8ec0719..28de9ef 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,4 +1,5 @@
gunicorn==19.7.1
WebOb==1.7.2
requests==2.18.1
+PyYAML==3.12
funcsigs==1.0.2 ; python_version < '3'
| Make firefly support a config file
It should be possible to specify the specs in an yaml config file instead of passing all command-line arguments. Plan for more options for each function, like output-content-type, input-content-type, endpoint etc.
It would be better to have a version field to the config file so that we can handle old versions if we change the format in future. | rorodata/firefly | diff --git a/tests/test_main.py b/tests/test_main.py
index 7ecfbf2..723def1 100644
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -3,6 +3,7 @@ from firefly.main import load_function
def test_load_functions():
os.path.exists2 = os.path.exists
- name, func = load_function("os.path.exists2")
+ path, name, func = load_function("os.path.exists2")
+ assert path == "/exists2"
assert name == "exists2"
assert func == os.path.exists
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 3
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pyyaml",
"pytest>=3.1.1"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
chardet==3.0.4
exceptiongroup==1.2.2
-e git+https://github.com/rorodata/firefly.git@6fb71b3970af79aad79f4793e0d9e3304420dd14#egg=Firefly
gunicorn==19.7.1
idna==2.5
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
PyYAML==6.0.2
requests==2.18.1
tomli==2.2.1
urllib3==1.21.1
WebOb==1.7.2
| name: firefly
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- chardet==3.0.4
- exceptiongroup==1.2.2
- gunicorn==19.7.1
- idna==2.5
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pyyaml==6.0.2
- requests==2.18.1
- tomli==2.2.1
- urllib3==1.21.1
- webob==1.7.2
prefix: /opt/conda/envs/firefly
| [
"tests/test_main.py::test_load_functions"
]
| []
| []
| []
| Apache License 2.0 | 1,383 | [
"firefly/validator.py",
"requirements.txt",
"firefly/main.py"
]
| [
"firefly/validator.py",
"requirements.txt",
"firefly/main.py"
]
|
Azure__azure-cli-3754 | 0e10f6fee8807cda08afd61f60fe8e7d7508156e | 2017-06-16 20:24:24 | eb12ac454cbe1ddb59c86cdf2045e1912660e750 | codecov-io: # [Codecov](https://codecov.io/gh/Azure/azure-cli/pull/3754?src=pr&el=h1) Report
> Merging [#3754](https://codecov.io/gh/Azure/azure-cli/pull/3754?src=pr&el=desc) into [master](https://codecov.io/gh/Azure/azure-cli/commit/dff725f8bee017b61cf2d6dc5374f9315e52b644?src=pr&el=desc) will **increase** coverage by `<.01%`.
> The diff coverage is `100%`.
[](https://codecov.io/gh/Azure/azure-cli/pull/3754?src=pr&el=tree)
```diff
@@ Coverage Diff @@
## master #3754 +/- ##
==========================================
+ Coverage 72.14% 72.15% +<.01%
==========================================
Files 421 421
Lines 26010 26018 +8
Branches 3943 3945 +2
==========================================
+ Hits 18765 18773 +8
- Misses 6026 6029 +3
+ Partials 1219 1216 -3
```
| [Impacted Files](https://codecov.io/gh/Azure/azure-cli/pull/3754?src=pr&el=tree) | Coverage Δ | |
|---|---|---|
| [...li-cloud/azure/cli/command\_modules/cloud/custom.py](https://codecov.io/gh/Azure/azure-cli/pull/3754?src=pr&el=tree#diff-c3JjL2NvbW1hbmRfbW9kdWxlcy9henVyZS1jbGktY2xvdWQvYXp1cmUvY2xpL2NvbW1hbmRfbW9kdWxlcy9jbG91ZC9jdXN0b20ucHk=) | `18.18% <ø> (ø)` | :arrow_up: |
| [...i-cloud/azure/cli/command\_modules/cloud/\_params.py](https://codecov.io/gh/Azure/azure-cli/pull/3754?src=pr&el=tree#diff-c3JjL2NvbW1hbmRfbW9kdWxlcy9henVyZS1jbGktY2xvdWQvYXp1cmUvY2xpL2NvbW1hbmRfbW9kdWxlcy9jbG91ZC9fcGFyYW1zLnB5) | `93.54% <100%> (+0.21%)` | :arrow_up: |
| [...re-cli-vm/azure/cli/command\_modules/vm/\_actions.py](https://codecov.io/gh/Azure/azure-cli/pull/3754?src=pr&el=tree#diff-c3JjL2NvbW1hbmRfbW9kdWxlcy9henVyZS1jbGktdm0vYXp1cmUvY2xpL2NvbW1hbmRfbW9kdWxlcy92bS9fYWN0aW9ucy5weQ==) | `76.19% <100%> (+1.19%)` | :arrow_up: |
| [src/azure-cli-core/azure/cli/core/cloud.py](https://codecov.io/gh/Azure/azure-cli/pull/3754?src=pr&el=tree#diff-c3JjL2F6dXJlLWNsaS1jb3JlL2F6dXJlL2NsaS9jb3JlL2Nsb3VkLnB5) | `70.9% <100%> (+0.26%)` | :arrow_up: |
| [...zure-cli-vm/azure/cli/command\_modules/vm/custom.py](https://codecov.io/gh/Azure/azure-cli/pull/3754?src=pr&el=tree#diff-c3JjL2NvbW1hbmRfbW9kdWxlcy9henVyZS1jbGktdm0vYXp1cmUvY2xpL2NvbW1hbmRfbW9kdWxlcy92bS9jdXN0b20ucHk=) | `77.01% <100%> (ø)` | :arrow_up: |
| [src/azure-cli-core/azure/cli/core/util.py](https://codecov.io/gh/Azure/azure-cli/pull/3754?src=pr&el=tree#diff-c3JjL2F6dXJlLWNsaS1jb3JlL2F6dXJlL2NsaS9jb3JlL3V0aWwucHk=) | `70.06% <0%> (ø)` | :arrow_up: |
| [...dback/azure/cli/command\_modules/feedback/custom.py](https://codecov.io/gh/Azure/azure-cli/pull/3754?src=pr&el=tree#diff-c3JjL2NvbW1hbmRfbW9kdWxlcy9henVyZS1jbGktZmVlZGJhY2svYXp1cmUvY2xpL2NvbW1hbmRfbW9kdWxlcy9mZWVkYmFjay9jdXN0b20ucHk=) | `31.25% <0%> (ø)` | :arrow_up: |
| [...nent/azure/cli/command\_modules/component/custom.py](https://codecov.io/gh/Azure/azure-cli/pull/3754?src=pr&el=tree#diff-c3JjL2NvbW1hbmRfbW9kdWxlcy9henVyZS1jbGktY29tcG9uZW50L2F6dXJlL2NsaS9jb21tYW5kX21vZHVsZXMvY29tcG9uZW50L2N1c3RvbS5weQ==) | `16.23% <0%> (ø)` | :arrow_up: |
------
[Continue to review full report at Codecov](https://codecov.io/gh/Azure/azure-cli/pull/3754?src=pr&el=continue).
> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta)
> `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data`
> Powered by [Codecov](https://codecov.io/gh/Azure/azure-cli/pull/3754?src=pr&el=footer). Last update [dff725f...cf59716](https://codecov.io/gh/Azure/azure-cli/pull/3754?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments).
| diff --git a/src/azure-cli-core/azure/cli/core/cloud.py b/src/azure-cli-core/azure/cli/core/cloud.py
index 5fb720abd..21eefe782 100644
--- a/src/azure-cli-core/azure/cli/core/cloud.py
+++ b/src/azure-cli-core/azure/cli/core/cloud.py
@@ -58,7 +58,8 @@ class CloudEndpoints(object): # pylint: disable=too-few-public-methods,too-many
gallery=None,
active_directory=None,
active_directory_resource_id=None,
- active_directory_graph_resource_id=None):
+ active_directory_graph_resource_id=None,
+ vm_image_alias_doc=None):
# Attribute names are significant. They are used when storing/retrieving clouds from config
self.management = management
self.resource_manager = resource_manager
@@ -68,6 +69,7 @@ class CloudEndpoints(object): # pylint: disable=too-few-public-methods,too-many
self.active_directory = active_directory
self.active_directory_resource_id = active_directory_resource_id
self.active_directory_graph_resource_id = active_directory_graph_resource_id
+ self.vm_image_alias_doc = vm_image_alias_doc
def has_endpoint_set(self, endpoint_name):
try:
@@ -146,7 +148,8 @@ AZURE_PUBLIC_CLOUD = Cloud(
gallery='https://gallery.azure.com/',
active_directory='https://login.microsoftonline.com',
active_directory_resource_id='https://management.core.windows.net/',
- active_directory_graph_resource_id='https://graph.windows.net/'),
+ active_directory_graph_resource_id='https://graph.windows.net/',
+ vm_image_alias_doc='https://raw.githubusercontent.com/Azure/azure-rest-api-specs/master/arm-compute/quickstart-templates/aliases.json'), # pylint: disable=line-too-long
suffixes=CloudSuffixes(
storage_endpoint='core.windows.net',
keyvault_dns='.vault.azure.net',
@@ -164,7 +167,8 @@ AZURE_CHINA_CLOUD = Cloud(
gallery='https://gallery.chinacloudapi.cn/',
active_directory='https://login.chinacloudapi.cn',
active_directory_resource_id='https://management.core.chinacloudapi.cn/',
- active_directory_graph_resource_id='https://graph.chinacloudapi.cn/'),
+ active_directory_graph_resource_id='https://graph.chinacloudapi.cn/',
+ vm_image_alias_doc='https://raw.githubusercontent.com/Azure/azure-rest-api-specs/master/arm-compute/quickstart-templates/aliases.json'), # pylint: disable=line-too-long
suffixes=CloudSuffixes(
storage_endpoint='core.chinacloudapi.cn',
keyvault_dns='.vault.azure.cn',
@@ -180,7 +184,8 @@ AZURE_US_GOV_CLOUD = Cloud(
gallery='https://gallery.usgovcloudapi.net/',
active_directory='https://login.microsoftonline.com',
active_directory_resource_id='https://management.core.usgovcloudapi.net/',
- active_directory_graph_resource_id='https://graph.windows.net/'),
+ active_directory_graph_resource_id='https://graph.windows.net/',
+ vm_image_alias_doc='https://raw.githubusercontent.com/Azure/azure-rest-api-specs/master/arm-compute/quickstart-templates/aliases.json'), # pylint: disable=line-too-long
suffixes=CloudSuffixes(
storage_endpoint='core.usgovcloudapi.net',
keyvault_dns='.vault.usgovcloudapi.net',
@@ -196,7 +201,8 @@ AZURE_GERMAN_CLOUD = Cloud(
gallery='https://gallery.cloudapi.de/',
active_directory='https://login.microsoftonline.de',
active_directory_resource_id='https://management.core.cloudapi.de/',
- active_directory_graph_resource_id='https://graph.cloudapi.de/'),
+ active_directory_graph_resource_id='https://graph.cloudapi.de/',
+ vm_image_alias_doc='https://raw.githubusercontent.com/Azure/azure-rest-api-specs/master/arm-compute/quickstart-templates/aliases.json'), # pylint: disable=line-too-long
suffixes=CloudSuffixes(
storage_endpoint='core.cloudapi.de',
keyvault_dns='.vault.microsoftazure.de',
diff --git a/src/command_modules/azure-cli-cloud/HISTORY.rst b/src/command_modules/azure-cli-cloud/HISTORY.rst
index d432f8da5..cd010407e 100644
--- a/src/command_modules/azure-cli-cloud/HISTORY.rst
+++ b/src/command_modules/azure-cli-cloud/HISTORY.rst
@@ -2,10 +2,10 @@
Release History
===============
-
2.0.5 (unreleased)
++++++++++++++++++
* Provide an option for 'az cloud set' to select the profile while selecting current cloud
+* Expose 'endpoint_vm_image_alias_doc'
2.0.4 (2017-06-13)
++++++++++++++++++
diff --git a/src/command_modules/azure-cli-cloud/azure/cli/command_modules/cloud/_params.py b/src/command_modules/azure-cli-cloud/azure/cli/command_modules/cloud/_params.py
index ffd0d6f01..ff67ef353 100644
--- a/src/command_modules/azure-cli-cloud/azure/cli/command_modules/cloud/_params.py
+++ b/src/command_modules/azure-cli-cloud/azure/cli/command_modules/cloud/_params.py
@@ -60,6 +60,8 @@ register_cli_argument('cloud', 'endpoint_active_directory_resource_id',
help='The resource ID to obtain AD tokens for')
register_cli_argument('cloud', 'endpoint_active_directory_graph_resource_id',
help='The Active Directory resource ID')
+register_cli_argument('cloud', 'endpoint_vm_image_alias_doc',
+ help='The uri of the document which caches commonly used virtual machine images')
register_cli_argument('cloud', 'suffix_sql_server_hostname',
help='The dns suffix for sql servers')
register_cli_argument('cloud', 'suffix_storage_endpoint',
diff --git a/src/command_modules/azure-cli-cloud/azure/cli/command_modules/cloud/custom.py b/src/command_modules/azure-cli-cloud/azure/cli/command_modules/cloud/custom.py
index 33b366154..be4732918 100644
--- a/src/command_modules/azure-cli-cloud/azure/cli/command_modules/cloud/custom.py
+++ b/src/command_modules/azure-cli-cloud/azure/cli/command_modules/cloud/custom.py
@@ -58,6 +58,7 @@ def register_cloud(cloud_name,
endpoint_active_directory=None,
endpoint_active_directory_resource_id=None,
endpoint_active_directory_graph_resource_id=None,
+ endpoint_vm_image_alias_doc=None,
suffix_sql_server_hostname=None,
suffix_storage_endpoint=None,
suffix_keyvault_dns=None,
@@ -81,6 +82,7 @@ def modify_cloud(cloud_name=None,
endpoint_active_directory=None,
endpoint_active_directory_resource_id=None,
endpoint_active_directory_graph_resource_id=None,
+ endpoint_vm_image_alias_doc=None,
suffix_sql_server_hostname=None,
suffix_storage_endpoint=None,
suffix_keyvault_dns=None,
diff --git a/src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/_actions.py b/src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/_actions.py
index cf259d63b..623039c5b 100644
--- a/src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/_actions.py
+++ b/src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/_actions.py
@@ -72,8 +72,13 @@ def load_images_thru_services(publisher, offer, sku, location):
def load_images_from_aliases_doc(publisher=None, offer=None, sku=None):
- target_url = ('https://raw.githubusercontent.com/Azure/azure-rest-api-specs/'
- 'master/arm-compute/quickstart-templates/aliases.json')
+ from azure.cli.core.cloud import get_active_cloud, CloudEndpointNotSetException
+ cloud = get_active_cloud()
+ try:
+ target_url = cloud.endpoints.vm_image_alias_doc
+ except CloudEndpointNotSetException:
+ raise CLIError("'endpoint_vm_image_alias_doc' isn't configured. Please invoke 'az cloud update' to configure "
+ "it or use '--all' to retrieve images from server")
txt = urlopen(target_url).read()
dic = json.loads(txt.decode())
try:
diff --git a/src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/custom.py b/src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/custom.py
index 2833b3512..a3180d0c9 100644
--- a/src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/custom.py
+++ b/src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/custom.py
@@ -213,9 +213,9 @@ def list_vm_images(image_location=None, publisher_name=None, offer=None, sku=Non
"is supported.")
all_images = load_images_thru_services(publisher_name, offer, sku, image_location)
else:
+ all_images = load_images_from_aliases_doc(publisher_name, offer, sku)
logger.warning(
'You are viewing an offline list of images, use --all to retrieve an up-to-date list')
- all_images = load_images_from_aliases_doc(publisher_name, offer, sku)
for i in all_images:
i['urn'] = ':'.join([i['publisher'], i['offer'], i['sku'], i['version']])
| AzureStack: images aliases is not applicable for azurestack
### Description
CLI has a set of aliases for the images referred from the following location
https://raw.githubusercontent.com/Azure/azure-rest-api-specs/master/arm-compute/quickstart-templates/aliases.json
These aliases makes the assumption that these images are available in all the clouds and matches the
definition like below
"UbuntuLTS":{
"publisher":"Canonical",
"offer":"UbuntuServer",
"sku":"16.04-LTS",
"version":"latest"
}
In azure stack , the alias does not make much sense as the admin may not have uploaded the same image
in azure stack the Ubuntu server sku could be 14.04.3 instead of 16.04-LTS
luckily , the image parameter can be specified with the whole urn like below and this is what being used in azurestack
Canonical:UbuntuServer:14.04.3-LTS:1.0.0
All the public documentation is referring to the use of aliases and the aliases most likely wont work against azurestack unless the admin chooses to upload image matching the azure definitions
| Azure/azure-cli | diff --git a/src/command_modules/azure-cli-vm/tests/test_vm_image.py b/src/command_modules/azure-cli-vm/tests/test_vm_image.py
index f3b661c14..9c2eb1f0d 100644
--- a/src/command_modules/azure-cli-vm/tests/test_vm_image.py
+++ b/src/command_modules/azure-cli-vm/tests/test_vm_image.py
@@ -8,6 +8,8 @@ import unittest
import mock
import azure.cli.core.application as application
+from azure.cli.core.util import CLIError
+from azure.cli.core.cloud import CloudEndpointNotSetException
class TestVMImage(unittest.TestCase):
@@ -42,6 +44,17 @@ class TestVMImage(unittest.TestCase):
self.assertEqual(parts[2], ubuntu_image['sku'])
self.assertEqual(parts[3], ubuntu_image['version'])
+ @mock.patch('azure.cli.core.cloud.get_active_cloud', autospec=True)
+ def test_when_alias_doc_is_missing(self, mock_get_active_cloud):
+ from azure.cli.command_modules.vm._actions import load_images_from_aliases_doc
+ p = mock.PropertyMock(side_effect=CloudEndpointNotSetException)
+ mock_cloud = mock.MagicMock()
+ type(mock_cloud.endpoints).vm_image_alias_doc = p
+ mock_get_active_cloud.return_value = mock_cloud
+ # assert
+ with self.assertRaises(CLIError):
+ load_images_from_aliases_doc()
+
if __name__ == '__main__':
unittest.main()
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 6
} | 2.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "python scripts/dev_setup.py",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc libssl-dev libffi-dev"
],
"python": "3.5",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | adal==0.4.3
applicationinsights==0.10.0
argcomplete==1.8.0
astroid==2.11.7
attrs==22.2.0
autopep8==2.0.4
azure-batch==3.0.0
-e git+https://github.com/Azure/azure-cli.git@0e10f6fee8807cda08afd61f60fe8e7d7508156e#egg=azure_cli&subdirectory=src/azure-cli
-e git+https://github.com/Azure/azure-cli.git@0e10f6fee8807cda08afd61f60fe8e7d7508156e#egg=azure_cli_acr&subdirectory=src/command_modules/azure-cli-acr
-e git+https://github.com/Azure/azure-cli.git@0e10f6fee8807cda08afd61f60fe8e7d7508156e#egg=azure_cli_acs&subdirectory=src/command_modules/azure-cli-acs
-e git+https://github.com/Azure/azure-cli.git@0e10f6fee8807cda08afd61f60fe8e7d7508156e#egg=azure_cli_appservice&subdirectory=src/command_modules/azure-cli-appservice
-e git+https://github.com/Azure/azure-cli.git@0e10f6fee8807cda08afd61f60fe8e7d7508156e#egg=azure_cli_batch&subdirectory=src/command_modules/azure-cli-batch
-e git+https://github.com/Azure/azure-cli.git@0e10f6fee8807cda08afd61f60fe8e7d7508156e#egg=azure_cli_billing&subdirectory=src/command_modules/azure-cli-billing
-e git+https://github.com/Azure/azure-cli.git@0e10f6fee8807cda08afd61f60fe8e7d7508156e#egg=azure_cli_cdn&subdirectory=src/command_modules/azure-cli-cdn
-e git+https://github.com/Azure/azure-cli.git@0e10f6fee8807cda08afd61f60fe8e7d7508156e#egg=azure_cli_cloud&subdirectory=src/command_modules/azure-cli-cloud
-e git+https://github.com/Azure/azure-cli.git@0e10f6fee8807cda08afd61f60fe8e7d7508156e#egg=azure_cli_cognitiveservices&subdirectory=src/command_modules/azure-cli-cognitiveservices
-e git+https://github.com/Azure/azure-cli.git@0e10f6fee8807cda08afd61f60fe8e7d7508156e#egg=azure_cli_component&subdirectory=src/command_modules/azure-cli-component
-e git+https://github.com/Azure/azure-cli.git@0e10f6fee8807cda08afd61f60fe8e7d7508156e#egg=azure_cli_configure&subdirectory=src/command_modules/azure-cli-configure
-e git+https://github.com/Azure/azure-cli.git@0e10f6fee8807cda08afd61f60fe8e7d7508156e#egg=azure_cli_consumption&subdirectory=src/command_modules/azure-cli-consumption
-e git+https://github.com/Azure/azure-cli.git@0e10f6fee8807cda08afd61f60fe8e7d7508156e#egg=azure_cli_core&subdirectory=src/azure-cli-core
-e git+https://github.com/Azure/azure-cli.git@0e10f6fee8807cda08afd61f60fe8e7d7508156e#egg=azure_cli_cosmosdb&subdirectory=src/command_modules/azure-cli-cosmosdb
-e git+https://github.com/Azure/azure-cli.git@0e10f6fee8807cda08afd61f60fe8e7d7508156e#egg=azure_cli_dla&subdirectory=src/command_modules/azure-cli-dla
-e git+https://github.com/Azure/azure-cli.git@0e10f6fee8807cda08afd61f60fe8e7d7508156e#egg=azure_cli_dls&subdirectory=src/command_modules/azure-cli-dls
-e git+https://github.com/Azure/azure-cli.git@0e10f6fee8807cda08afd61f60fe8e7d7508156e#egg=azure_cli_feedback&subdirectory=src/command_modules/azure-cli-feedback
-e git+https://github.com/Azure/azure-cli.git@0e10f6fee8807cda08afd61f60fe8e7d7508156e#egg=azure_cli_find&subdirectory=src/command_modules/azure-cli-find
-e git+https://github.com/Azure/azure-cli.git@0e10f6fee8807cda08afd61f60fe8e7d7508156e#egg=azure_cli_interactive&subdirectory=src/command_modules/azure-cli-interactive
-e git+https://github.com/Azure/azure-cli.git@0e10f6fee8807cda08afd61f60fe8e7d7508156e#egg=azure_cli_iot&subdirectory=src/command_modules/azure-cli-iot
-e git+https://github.com/Azure/azure-cli.git@0e10f6fee8807cda08afd61f60fe8e7d7508156e#egg=azure_cli_keyvault&subdirectory=src/command_modules/azure-cli-keyvault
-e git+https://github.com/Azure/azure-cli.git@0e10f6fee8807cda08afd61f60fe8e7d7508156e#egg=azure_cli_lab&subdirectory=src/command_modules/azure-cli-lab
-e git+https://github.com/Azure/azure-cli.git@0e10f6fee8807cda08afd61f60fe8e7d7508156e#egg=azure_cli_monitor&subdirectory=src/command_modules/azure-cli-monitor
-e git+https://github.com/Azure/azure-cli.git@0e10f6fee8807cda08afd61f60fe8e7d7508156e#egg=azure_cli_network&subdirectory=src/command_modules/azure-cli-network
-e git+https://github.com/Azure/azure-cli.git@0e10f6fee8807cda08afd61f60fe8e7d7508156e#egg=azure_cli_nspkg&subdirectory=src/azure-cli-nspkg
-e git+https://github.com/Azure/azure-cli.git@0e10f6fee8807cda08afd61f60fe8e7d7508156e#egg=azure_cli_profile&subdirectory=src/command_modules/azure-cli-profile
-e git+https://github.com/Azure/azure-cli.git@0e10f6fee8807cda08afd61f60fe8e7d7508156e#egg=azure_cli_rdbms&subdirectory=src/command_modules/azure-cli-rdbms
-e git+https://github.com/Azure/azure-cli.git@0e10f6fee8807cda08afd61f60fe8e7d7508156e#egg=azure_cli_redis&subdirectory=src/command_modules/azure-cli-redis
-e git+https://github.com/Azure/azure-cli.git@0e10f6fee8807cda08afd61f60fe8e7d7508156e#egg=azure_cli_resource&subdirectory=src/command_modules/azure-cli-resource
-e git+https://github.com/Azure/azure-cli.git@0e10f6fee8807cda08afd61f60fe8e7d7508156e#egg=azure_cli_role&subdirectory=src/command_modules/azure-cli-role
-e git+https://github.com/Azure/azure-cli.git@0e10f6fee8807cda08afd61f60fe8e7d7508156e#egg=azure_cli_sf&subdirectory=src/command_modules/azure-cli-sf
-e git+https://github.com/Azure/azure-cli.git@0e10f6fee8807cda08afd61f60fe8e7d7508156e#egg=azure_cli_sql&subdirectory=src/command_modules/azure-cli-sql
-e git+https://github.com/Azure/azure-cli.git@0e10f6fee8807cda08afd61f60fe8e7d7508156e#egg=azure_cli_storage&subdirectory=src/command_modules/azure-cli-storage
-e git+https://github.com/Azure/azure-cli.git@0e10f6fee8807cda08afd61f60fe8e7d7508156e#egg=azure_cli_taskhelp&subdirectory=src/command_modules/azure-cli-taskhelp
-e git+https://github.com/Azure/azure-cli.git@0e10f6fee8807cda08afd61f60fe8e7d7508156e#egg=azure_cli_testsdk&subdirectory=src/azure-cli-testsdk
-e git+https://github.com/Azure/azure-cli.git@0e10f6fee8807cda08afd61f60fe8e7d7508156e#egg=azure_cli_utility_automation&subdirectory=scripts
-e git+https://github.com/Azure/azure-cli.git@0e10f6fee8807cda08afd61f60fe8e7d7508156e#egg=azure_cli_vm&subdirectory=src/command_modules/azure-cli-vm
azure-common==1.1.28
azure-core==1.24.2
azure-datalake-store==0.0.9
azure-devtools==1.2.0
azure-graphrbac==0.30.0rc6
azure-keyvault==0.3.4
azure-mgmt-authorization==0.30.0rc6
azure-mgmt-batch==4.0.0
azure-mgmt-billing==0.1.0
azure-mgmt-cdn==0.30.2
azure-mgmt-cognitiveservices==1.0.0
azure-mgmt-compute==1.0.0rc1
azure-mgmt-consumption==0.1.0
azure-mgmt-containerregistry==0.2.1
azure-mgmt-datalake-analytics==0.1.4
azure-mgmt-datalake-nspkg==3.0.1
azure-mgmt-datalake-store==0.1.4
azure-mgmt-devtestlabs==2.0.0
azure-mgmt-dns==1.0.1
azure-mgmt-documentdb==0.1.3
azure-mgmt-iothub==0.2.2
azure-mgmt-keyvault==0.40.0
azure-mgmt-monitor==0.2.1
azure-mgmt-network==1.0.0rc3
azure-mgmt-nspkg==1.0.0
azure-mgmt-rdbms==0.1.0
azure-mgmt-redis==1.0.0
azure-mgmt-resource==1.1.0rc1
azure-mgmt-sql==0.5.1
azure-mgmt-storage==1.0.0rc1
azure-mgmt-trafficmanager==0.30.0
azure-mgmt-web==0.32.0
azure-monitor==0.3.0
azure-multiapi-storage==0.1.0
azure-nspkg==1.0.0
azure-servicefabric==5.6.130
bleach==4.1.0
certifi==2021.5.30
cffi==1.15.1
colorama==0.3.7
ConfigArgParse==1.7
coverage==6.2
cryptography==40.0.2
docutils==0.18.1
flake8==5.0.4
futures==3.1.1
humanfriendly==2.4
idna==3.10
importlib-metadata==4.2.0
iniconfig==1.1.1
isodate==0.7.0
isort==5.10.1
jeepney==0.7.1
jmespath==0.10.0
keyring==23.4.1
lazy-object-proxy==1.7.1
mccabe==0.7.0
mock==5.2.0
msrest==0.4.29
msrestazure==0.4.34
multidict==5.2.0
nose==1.3.7
oauthlib==3.2.2
packaging==21.3
paramiko==2.0.2
pbr==6.1.1
pluggy==1.0.0
prompt-toolkit==3.0.36
py==1.11.0
pyasn1==0.5.1
pycodestyle==2.10.0
pycparser==2.21
pydocumentdb==2.3.5
pyflakes==2.5.0
Pygments==2.14.0
PyJWT==2.4.0
pylint==1.7.1
pyOpenSSL==16.2.0
pyparsing==3.1.4
pytest==7.0.1
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==3.11
readme-renderer==34.0
requests==2.9.1
requests-oauthlib==2.0.0
scp==0.15.0
SecretStorage==3.3.3
six==1.10.0
sshtunnel==0.4.0
tabulate==0.7.7
tomli==1.2.3
typed-ast==1.5.5
typing-extensions==4.1.1
urllib3==1.26.20
urllib3-secure-extra==0.1.0
vcrpy==1.10.3
vsts-cd-manager==1.0.2
wcwidth==0.2.13
webencodings==0.5.1
Whoosh==2.7.4
wrapt==1.16.0
xmltodict==0.14.2
yarl==1.7.2
zipp==3.6.0
| name: azure-cli
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- adal==0.4.3
- applicationinsights==0.10.0
- argcomplete==1.8.0
- astroid==2.11.7
- attrs==22.2.0
- autopep8==2.0.4
- azure-batch==3.0.0
- azure-common==1.1.28
- azure-core==1.24.2
- azure-datalake-store==0.0.9
- azure-devtools==1.2.0
- azure-graphrbac==0.30.0rc6
- azure-keyvault==0.3.4
- azure-mgmt-authorization==0.30.0rc6
- azure-mgmt-batch==4.0.0
- azure-mgmt-billing==0.1.0
- azure-mgmt-cdn==0.30.2
- azure-mgmt-cognitiveservices==1.0.0
- azure-mgmt-compute==1.0.0rc1
- azure-mgmt-consumption==0.1.0
- azure-mgmt-containerregistry==0.2.1
- azure-mgmt-datalake-analytics==0.1.4
- azure-mgmt-datalake-nspkg==3.0.1
- azure-mgmt-datalake-store==0.1.4
- azure-mgmt-devtestlabs==2.0.0
- azure-mgmt-dns==1.0.1
- azure-mgmt-documentdb==0.1.3
- azure-mgmt-iothub==0.2.2
- azure-mgmt-keyvault==0.40.0
- azure-mgmt-monitor==0.2.1
- azure-mgmt-network==1.0.0rc3
- azure-mgmt-nspkg==1.0.0
- azure-mgmt-rdbms==0.1.0
- azure-mgmt-redis==1.0.0
- azure-mgmt-resource==1.1.0rc1
- azure-mgmt-sql==0.5.1
- azure-mgmt-storage==1.0.0rc1
- azure-mgmt-trafficmanager==0.30.0
- azure-mgmt-web==0.32.0
- azure-monitor==0.3.0
- azure-multiapi-storage==0.1.0
- azure-nspkg==1.0.0
- azure-servicefabric==5.6.130
- bleach==4.1.0
- cffi==1.15.1
- colorama==0.3.7
- configargparse==1.7
- coverage==6.2
- cryptography==40.0.2
- docutils==0.18.1
- flake8==5.0.4
- futures==3.1.1
- humanfriendly==2.4
- idna==3.10
- importlib-metadata==4.2.0
- iniconfig==1.1.1
- isodate==0.7.0
- isort==5.10.1
- jeepney==0.7.1
- jmespath==0.10.0
- keyring==23.4.1
- lazy-object-proxy==1.7.1
- mccabe==0.7.0
- mock==5.2.0
- msrest==0.4.29
- msrestazure==0.4.34
- multidict==5.2.0
- nose==1.3.7
- oauthlib==3.2.2
- packaging==21.3
- paramiko==2.0.2
- pbr==6.1.1
- pip==9.0.1
- pluggy==1.0.0
- prompt-toolkit==3.0.36
- py==1.11.0
- pyasn1==0.5.1
- pycodestyle==2.10.0
- pycparser==2.21
- pydocumentdb==2.3.5
- pyflakes==2.5.0
- pygments==2.14.0
- pyjwt==2.4.0
- pylint==1.7.1
- pyopenssl==16.2.0
- pyparsing==3.1.4
- pytest==7.0.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==3.11
- readme-renderer==34.0
- requests==2.9.1
- requests-oauthlib==2.0.0
- scp==0.15.0
- secretstorage==3.3.3
- setuptools==30.4.0
- six==1.10.0
- sshtunnel==0.4.0
- tabulate==0.7.7
- tomli==1.2.3
- typed-ast==1.5.5
- typing-extensions==4.1.1
- urllib3==1.26.20
- urllib3-secure-extra==0.1.0
- vcrpy==1.10.3
- vsts-cd-manager==1.0.2
- wcwidth==0.2.13
- webencodings==0.5.1
- whoosh==2.7.4
- wrapt==1.16.0
- xmltodict==0.14.2
- yarl==1.7.2
- zipp==3.6.0
prefix: /opt/conda/envs/azure-cli
| [
"src/command_modules/azure-cli-vm/tests/test_vm_image.py::TestVMImage::test_when_alias_doc_is_missing"
]
| [
"src/command_modules/azure-cli-vm/tests/test_vm_image.py::TestVMImage::test_read_images_from_alias_doc"
]
| []
| []
| MIT License | 1,384 | [
"src/command_modules/azure-cli-cloud/azure/cli/command_modules/cloud/custom.py",
"src/command_modules/azure-cli-cloud/azure/cli/command_modules/cloud/_params.py",
"src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/custom.py",
"src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/_actions.py",
"src/azure-cli-core/azure/cli/core/cloud.py",
"src/command_modules/azure-cli-cloud/HISTORY.rst"
]
| [
"src/command_modules/azure-cli-cloud/azure/cli/command_modules/cloud/custom.py",
"src/command_modules/azure-cli-cloud/azure/cli/command_modules/cloud/_params.py",
"src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/custom.py",
"src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/_actions.py",
"src/azure-cli-core/azure/cli/core/cloud.py",
"src/command_modules/azure-cli-cloud/HISTORY.rst"
]
|
prophile__jacquard-56 | d6bafbb05c7effddbbe7c84b3310e3c2cde31930 | 2017-06-16 22:58:44 | d6bafbb05c7effddbbe7c84b3310e3c2cde31930 | diff --git a/jacquard/cli.py b/jacquard/cli.py
index 263ae91..a2e102e 100644
--- a/jacquard/cli.py
+++ b/jacquard/cli.py
@@ -115,7 +115,7 @@ def main(args=sys.argv[1:], config=None):
options.func(config, options)
except CommandError as exc:
(message,) = exc.args
- sys.stderr.write("%s\n", message)
+ print(message, file=sys.stderr)
exit(1)
diff --git a/jacquard/experiments/commands.py b/jacquard/experiments/commands.py
index d93fee3..dcdf99d 100644
--- a/jacquard/experiments/commands.py
+++ b/jacquard/experiments/commands.py
@@ -91,6 +91,7 @@ class Conclude(BaseCommand):
experiment = Experiment.from_store(store, options.experiment)
current_experiments = store.get('active-experiments', [])
+ concluded_experiments = store.get('concluded-experiments', [])
if options.experiment not in current_experiments:
raise CommandError(
@@ -98,6 +99,7 @@ class Conclude(BaseCommand):
)
current_experiments.remove(options.experiment)
+ concluded_experiments.append(options.experiment)
close(
store,
@@ -118,6 +120,7 @@ class Conclude(BaseCommand):
experiment.save(store)
store['active-experiments'] = current_experiments
+ store['concluded-experiments'] = concluded_experiments
class Load(BaseCommand):
@@ -143,7 +146,7 @@ class Load(BaseCommand):
parser.add_argument(
'--skip-launched',
action='store_true',
- help="do not error on launched experiments",
+ help="do not load or error on launched experiments",
)
@retrying
@@ -151,6 +154,7 @@ class Load(BaseCommand):
"""Run command."""
with config.storage.transaction() as store:
live_experiments = store.get('active-experiments', ())
+ concluded_experiments = store.get('concluded-experiments', ())
for file in options.files:
definition = yaml.safe_load(file)
@@ -167,6 +171,16 @@ class Load(BaseCommand):
experiment.id,
)
+ elif experiment.id in concluded_experiments:
+ if options.skip_launched:
+ continue
+
+ else:
+ raise CommandError(
+ "Experiment %r has concluded, refusing to edit" %
+ experiment.id,
+ )
+
experiment.save(store)
| `jacquard load-experiment` erases `concluded` dates | prophile/jacquard | diff --git a/jacquard/experiments/tests/test_smoke.py b/jacquard/experiments/tests/test_smoke.py
index 2b10c85..4839686 100644
--- a/jacquard/experiments/tests/test_smoke.py
+++ b/jacquard/experiments/tests/test_smoke.py
@@ -1,6 +1,10 @@
+import io
+import copy
import datetime
-from unittest.mock import Mock
+import contextlib
+from unittest.mock import Mock, patch
+import pytest
import dateutil.tz
from jacquard.cli import main
@@ -46,6 +50,7 @@ def test_conclude_no_branch():
assert 'concluded' in config.storage['experiments/foo']
assert 'foo' not in config.storage['active-experiments']
+ assert 'foo' in config.storage['concluded-experiments']
assert not config.storage['defaults']
@@ -57,4 +62,107 @@ def test_conclude_updates_defaults():
assert 'concluded' in config.storage['experiments/foo']
assert 'foo' not in config.storage['active-experiments']
+ assert 'foo' in config.storage['concluded-experiments']
assert config.storage['defaults'] == BRANCH_SETTINGS
+
+
+def test_load_after_launch_errors():
+ config = Mock()
+ config.storage = DummyStore('', data=DUMMY_DATA_POST_LAUNCH)
+
+ experiment_data = {'id': 'foo'}
+ experiment_data.update(DUMMY_DATA_PRE_LAUNCH['experiments/foo'])
+
+ stderr = io.StringIO()
+ with contextlib.redirect_stderr(stderr), pytest.raises(SystemExit):
+ with patch(
+ 'jacquard.experiments.commands.yaml.safe_load',
+ return_value=experiment_data,
+ ), patch(
+ 'jacquard.experiments.commands.argparse.FileType',
+ return_value=str,
+ ):
+ main(('load-experiment', 'foo.yaml'), config=config)
+
+ stderr_content = stderr.getvalue()
+ assert "Experiment 'foo' is live, refusing to edit" in stderr_content
+
+ fresh_data = DummyStore('', data=DUMMY_DATA_POST_LAUNCH)
+ assert fresh_data.data == config.storage.data, "Data should be unchanged"
+
+
+def test_load_after_launch_with_skip_launched():
+ config = Mock()
+ config.storage = DummyStore('', data=DUMMY_DATA_POST_LAUNCH)
+
+ experiment_data = {'id': 'foo'}
+ experiment_data.update(DUMMY_DATA_PRE_LAUNCH['experiments/foo'])
+
+ stderr = io.StringIO()
+ with contextlib.redirect_stderr(stderr), patch(
+ 'jacquard.experiments.commands.yaml.safe_load',
+ return_value=experiment_data,
+ ), patch(
+ 'jacquard.experiments.commands.argparse.FileType',
+ return_value=str,
+ ):
+ main(('load-experiment', '--skip-launched', 'foo.yaml'), config=config)
+
+ fresh_data = DummyStore('', data=DUMMY_DATA_POST_LAUNCH)
+ assert fresh_data.data == config.storage.data, "Data should be unchanged"
+
+ stderr_content = stderr.getvalue()
+ assert '' == stderr_content
+
+
+def test_load_after_conclude_errors():
+ config = Mock()
+ config.storage = DummyStore('', data=DUMMY_DATA_POST_LAUNCH)
+
+ main(('conclude', 'foo', 'bar'), config=config)
+ original_data = copy.deepcopy(config.storage.data)
+
+ experiment_data = {'id': 'foo'}
+ experiment_data.update(DUMMY_DATA_PRE_LAUNCH['experiments/foo'])
+
+ stderr = io.StringIO()
+ with contextlib.redirect_stderr(stderr), pytest.raises(SystemExit):
+ with patch(
+ 'jacquard.experiments.commands.yaml.safe_load',
+ return_value=experiment_data,
+ ), patch(
+ 'jacquard.experiments.commands.argparse.FileType',
+ return_value=str,
+ ):
+ main(('load-experiment', 'foo.yaml'), config=config)
+
+ assert original_data == config.storage.data, "Data should be unchanged"
+
+ stderr_content = stderr.getvalue()
+ assert "Experiment 'foo' has concluded, refusing to edit" in stderr_content
+
+
+def test_load_after_conclude_with_skip_launched():
+ config = Mock()
+ config.storage = DummyStore('', data=DUMMY_DATA_POST_LAUNCH)
+
+ main(('conclude', 'foo', 'bar'), config=config)
+ original_data = copy.deepcopy(config.storage.data)
+
+ experiment_data = {'id': 'foo'}
+ experiment_data.update(DUMMY_DATA_PRE_LAUNCH['experiments/foo'])
+
+ stderr = io.StringIO()
+ with contextlib.redirect_stderr(stderr), patch(
+ 'jacquard.experiments.commands.yaml.safe_load',
+ return_value=experiment_data,
+ ), patch(
+ 'jacquard.experiments.commands.argparse.FileType',
+ return_value=str,
+ ):
+ main(('load-experiment', '--skip-launched', 'foo.yaml'), config=config)
+
+ assert original_data == config.storage.data, "Data should be unchanged"
+
+ stderr_content = stderr.getvalue()
+ assert '' == stderr_content
diff --git a/jacquard/tests/test_cli.py b/jacquard/tests/test_cli.py
index bae650f..bf32262 100644
--- a/jacquard/tests/test_cli.py
+++ b/jacquard/tests/test_cli.py
@@ -3,7 +3,9 @@ import textwrap
import contextlib
import unittest.mock
-from jacquard.cli import main
+import pytest
+
+from jacquard.cli import CommandError, main
from jacquard.storage.dummy import DummyStore
@@ -59,3 +61,28 @@ def test_run_write_command():
assert output.getvalue() == ''
assert config.storage.data == {'defaults': '{"foo": "bar"}'}
+
+
+def test_erroring_command():
+ config = unittest.mock.Mock()
+
+ ERROR_MESSAGE = "MOCK ERROR: Something went wrong in the"
+
+ mock_parser = unittest.mock.Mock()
+ mock_options = unittest.mock.Mock()
+ mock_options.func = unittest.mock.Mock(
+ side_effect=CommandError(ERROR_MESSAGE),
+ )
+ mock_parser.parse_args = unittest.mock.Mock(
+ return_value=mock_options,
+ )
+
+ stderr = io.StringIO()
+ with contextlib.redirect_stderr(stderr), pytest.raises(SystemExit):
+ with unittest.mock.patch(
+ 'jacquard.cli.argument_parser',
+ return_value=mock_parser,
+ ):
+ main(['command'], config=config)
+
+ assert stderr.getvalue() == ERROR_MESSAGE + "\n"
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 3,
"test_score": 0
},
"num_modified_files": 2
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"fakeredis",
"hypothesis",
"flake8",
"flake8-docstrings",
"flake8-isort",
"flake8-mutable",
"flake8-debugger",
"flake8-comprehensions",
"flake8-todo"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | async-timeout==4.0.2
attrs==22.2.0
cached-property==1.5.2
certifi==2021.5.30
coverage==6.2
dataclasses==0.8
Deprecated==1.2.18
fakeredis==1.7.4
flake8==5.0.4
flake8-comprehensions==2.3.0
flake8-debugger==4.0.0
flake8-docstrings==1.6.0
flake8-isort==4.2.0
flake8-mutable==1.2.0
flake8-todo==0.7
greenlet==2.0.2
hypothesis==6.31.6
importlib-metadata==4.2.0
iniconfig==1.1.1
isort==5.10.1
-e git+https://github.com/prophile/jacquard.git@d6bafbb05c7effddbbe7c84b3310e3c2cde31930#egg=jacquard_split
mccabe==0.7.0
packaging==21.3
pluggy==1.0.0
py==1.11.0
pycodestyle==2.9.1
pydocstyle==6.3.0
pyflakes==2.5.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
python-dateutil==2.9.0.post0
PyYAML==6.0.1
redis==4.2.2
six==1.17.0
snowballstemmer==2.2.0
sortedcontainers==2.4.0
SQLAlchemy==1.4.54
tomli==1.2.3
typing_extensions==4.1.1
Werkzeug==2.0.3
wrapt==1.16.0
zipp==3.6.0
| name: jacquard
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- async-timeout==4.0.2
- attrs==22.2.0
- cached-property==1.5.2
- coverage==6.2
- dataclasses==0.8
- deprecated==1.2.18
- fakeredis==1.7.4
- flake8==5.0.4
- flake8-comprehensions==2.3.0
- flake8-debugger==4.0.0
- flake8-docstrings==1.6.0
- flake8-isort==4.2.0
- flake8-mutable==1.2.0
- flake8-todo==0.7
- greenlet==2.0.2
- hypothesis==6.31.6
- importlib-metadata==4.2.0
- iniconfig==1.1.1
- isort==5.10.1
- mccabe==0.7.0
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pycodestyle==2.9.1
- pydocstyle==6.3.0
- pyflakes==2.5.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- python-dateutil==2.9.0.post0
- pyyaml==6.0.1
- redis==4.2.2
- six==1.17.0
- snowballstemmer==2.2.0
- sortedcontainers==2.4.0
- sqlalchemy==1.4.54
- tomli==1.2.3
- typing-extensions==4.1.1
- werkzeug==2.0.3
- wrapt==1.16.0
- zipp==3.6.0
prefix: /opt/conda/envs/jacquard
| [
"jacquard/tests/test_cli.py::test_erroring_command"
]
| [
"jacquard/experiments/tests/test_smoke.py::test_launch",
"jacquard/experiments/tests/test_smoke.py::test_conclude_no_branch",
"jacquard/experiments/tests/test_smoke.py::test_conclude_updates_defaults",
"jacquard/experiments/tests/test_smoke.py::test_load_after_launch_errors",
"jacquard/experiments/tests/test_smoke.py::test_load_after_launch_with_skip_launched",
"jacquard/experiments/tests/test_smoke.py::test_load_after_conclude_errors",
"jacquard/experiments/tests/test_smoke.py::test_load_after_conclude_with_skip_launched",
"jacquard/tests/test_cli.py::test_smoke_cli_help",
"jacquard/tests/test_cli.py::test_help_message_when_given_no_subcommand",
"jacquard/tests/test_cli.py::test_run_basic_command",
"jacquard/tests/test_cli.py::test_run_write_command"
]
| []
| []
| MIT License | 1,385 | [
"jacquard/experiments/commands.py",
"jacquard/cli.py"
]
| [
"jacquard/experiments/commands.py",
"jacquard/cli.py"
]
|
|
bokeh__bokeh-6488 | 74cf9e9e173fe3dbff132bf94257310869f9614f | 2017-06-18 02:35:12 | 44b63d65efec1e06fb565a9a81e0f2f21315e85a | diff --git a/bokeh/util/serialization.py b/bokeh/util/serialization.py
index 96c406a88..dd6d27e78 100644
--- a/bokeh/util/serialization.py
+++ b/bokeh/util/serialization.py
@@ -101,7 +101,8 @@ def convert_datetime_type(obj):
# Datetime (datetime is a subclass of date)
elif isinstance(obj, dt.datetime):
- return (obj - DT_EPOCH).total_seconds() * 1000. + obj.microsecond / 1000.
+ diff = obj.replace(tzinfo=None) - DT_EPOCH
+ return diff.total_seconds() * 1000. + obj.microsecond / 1000.
# Timedelta (timedelta is class in the datetime library)
elif isinstance(obj, dt.timedelta):
| bokeh.util.serialization.py convert_datetime_type breaks when passed timezone aware datetime
After updating to bokeh 0.12.6 the convert_datetime_type became unable to handle datetime objecs that are timezone aware. The subtraction of DT_EPOCH which is a naive datetime from a obj which might be aware or naive fails when obj is timezone aware. The error indicates that subtraction of a datetime object and a naive object is not supported.
This occurs with python 2.7.
proposed fix to serialization.py attached.
$ pip freeze
appdirs==1.4.3
awsebcli==3.7.7
backports-abc==0.4
backports.ssl-match-hostname==3.5.0.1
bkcharts==0.2
blessed==1.9.5
bokeh==0.12.6
botocore==1.4.40
cement==2.8.2
certifi==2016.2.28
colorama==0.3.7
cycler==0.10.0
DateTime==4.1.1
Django==1.9.8
docker-py==1.7.2
dockerpty==0.4.1
docopt==0.6.2
docutils==0.12
futures==3.0.5
Jinja2==2.8
jmespath==0.9.0
MarkupSafe==0.23
msgpack-python==0.4.8
numpy==1.11.1
packaging==16.8
pandas==0.18.1
pathspec==0.3.4
pyasn1==0.1.9
pygobject==3.18.2
pyparsing==2.1.5
python-dateutil==2.5.3
pytz==2016.6.1
PyYAML==3.11
requests==2.9.1
rsa==3.4.2
s3transfer==0.0.1
semantic-version==2.5.0
singledispatch==3.4.0.3
six==1.10.0
tabulate==0.7.5
td-client==0.5.0
termcolor==1.1.0
texttable==0.8.4
tornado==4.4.1
tzlocal==1.2.2
urllib3==1.17
virtualenv==15.0.2
wcwidth==0.1.7
websocket-client==0.37.0
zope.interface==4.2.0
[serialization.py.txt](https://github.com/bokeh/bokeh/files/1078483/serialization.py.txt)
| bokeh/bokeh | diff --git a/bokeh/util/tests/test_serialization.py b/bokeh/util/tests/test_serialization.py
index c664a5c9a..501f2f679 100644
--- a/bokeh/util/tests/test_serialization.py
+++ b/bokeh/util/tests/test_serialization.py
@@ -6,6 +6,7 @@ import base64
import pytest
import numpy as np
import pandas as pd
+import pytz
import bokeh.util.serialization as bus
@@ -60,6 +61,12 @@ def test_convert_datetime_type():
assert bus.convert_datetime_type(pd.Timedelta("3000ms")) == 3000.0
assert bus.convert_datetime_type(bus._pd_timestamp(3000000)) == 3.0
+def test_convert_datetime_type_with_tz():
+ # This ensures datetimes are sent to BokehJS timezone-naive
+ # see https://github.com/bokeh/bokeh/issues/6480
+ for tz in pytz.all_timezones:
+ assert bus.convert_datetime_type(datetime.datetime(2016, 5, 11, tzinfo=datetime.tzinfo(tz))) == 1462924800000.0
+
testing = [[float('nan'), 3], [float('-inf'), [float('inf')]]]
expected = [['NaN', 3.0], ['-Infinity', ['Infinity']]]
| {
"commit_name": "merge_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 0.12 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install bokeh",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | bokeh==3.4.3
contourpy==1.3.0
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
Jinja2==3.1.6
MarkupSafe==3.0.2
numpy==2.0.2
packaging @ file:///croot/packaging_1734472117206/work
pandas==2.2.3
pillow==11.1.0
pluggy @ file:///croot/pluggy_1733169602837/work
pytest @ file:///croot/pytest_1738938843180/work
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.2
six==1.17.0
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
tornado==6.4.2
tzdata==2025.2
xyzservices==2025.1.0
| name: bokeh
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- bokeh==3.4.3
- contourpy==1.3.0
- jinja2==3.1.6
- markupsafe==3.0.2
- numpy==2.0.2
- pandas==2.2.3
- pillow==11.1.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.2
- six==1.17.0
- tornado==6.4.2
- tzdata==2025.2
- xyzservices==2025.1.0
prefix: /opt/conda/envs/bokeh
| [
"bokeh/util/tests/test_serialization.py::test_convert_datetime_type_with_tz"
]
| [
"bokeh/util/tests/test_serialization.py::test_array_encoding_disabled_by_dtype"
]
| [
"bokeh/util/tests/test_serialization.py::test_id",
"bokeh/util/tests/test_serialization.py::test_id_with_simple_ids",
"bokeh/util/tests/test_serialization.py::test_np_consts",
"bokeh/util/tests/test_serialization.py::test_binary_array_types",
"bokeh/util/tests/test_serialization.py::test_datetime_types",
"bokeh/util/tests/test_serialization.py::test_is_datetime_type",
"bokeh/util/tests/test_serialization.py::test_convert_datetime_type",
"bokeh/util/tests/test_serialization.py::test_traverse_return_valid_json",
"bokeh/util/tests/test_serialization.py::test_traverse_with_numpy",
"bokeh/util/tests/test_serialization.py::test_traverse_without_numpy",
"bokeh/util/tests/test_serialization.py::test_transform_array_force_list_default",
"bokeh/util/tests/test_serialization.py::test_transform_array_force_list_true",
"bokeh/util/tests/test_serialization.py::test_transform_series_force_list_default",
"bokeh/util/tests/test_serialization.py::test_transform_series_force_list_true",
"bokeh/util/tests/test_serialization.py::test_transform_array_to_list",
"bokeh/util/tests/test_serialization.py::test_transform_array_with_nans_to_list[values0]",
"bokeh/util/tests/test_serialization.py::test_transform_array_with_nans_to_list[values1]",
"bokeh/util/tests/test_serialization.py::test_encode_base64_dict",
"bokeh/util/tests/test_serialization.py::test_decode_base64_dict",
"bokeh/util/tests/test_serialization.py::test_encode_decode_roundtrip"
]
| []
| BSD 3-Clause "New" or "Revised" License | 1,386 | [
"bokeh/util/serialization.py"
]
| [
"bokeh/util/serialization.py"
]
|
|
neogeny__TatSu-32 | 03c5c4fdbe0ad1a649a1c6a04913efc234122971 | 2017-06-18 12:28:46 | 4aa9636ab1a77a24a5b60eeb06575aee5cf20dd7 | diff --git a/tatsu/contexts.py b/tatsu/contexts.py
index 6c79fcf..c5dfa78 100644
--- a/tatsu/contexts.py
+++ b/tatsu/contexts.py
@@ -14,6 +14,7 @@ from ._unicode_characters import (
)
from tatsu.util import notnone, ustr, prune_dict, is_list, info, safe_name
from tatsu.util import left_assoc, right_assoc
+from tatsu.util import debug # noqa
from tatsu import buffering
from tatsu import color
from tatsu.infos import (
@@ -461,6 +462,10 @@ class ParseContext(object):
self._memos[key] = memo
return memo
+ def _forget(self, key):
+ self._memos.pop(key, None)
+ self._results.pop(key, None)
+
def _memo_for(self, key):
memo = self._memos.get(key)
@@ -479,13 +484,19 @@ class ParseContext(object):
self._results[key] = self._mkresult(node)
def _is_recursive(self, name):
- return name in self._recursive_rules
+ return self.left_recursion and name in self._recursive_rules
def _set_recursive(self, name):
- self._recursive_rules.add(name)
+ if self.left_recursion:
+ # add rules that are mutually recursive
+ i = self._rule_stack.index(name)
+ for rule in reversed(self._rule_stack[i:]):
+ if rule not in self._recursive_rules:
+ print('left', rule)
+ self._recursive_rules.add(rule)
def _unset_recursive(self, name):
- self._recursive_rules.remove(name)
+ self._recursive_rules -= {name}
def _set_left_recursion_guard(self, key):
ex = self._make_exception(key.name, exclass=FailedLeftRecursion)
@@ -520,37 +531,49 @@ class ParseContext(object):
finally:
self._rule_stack.pop()
+ def _clear_recursion_errors(self):
+ def filter(key, value):
+ return isinstance(value, FailedLeftRecursion)
+
+ prune_dict(self._memos, filter)
+
def _recursive_call(self, ruleinfo):
+ pos = self._pos
key = self.memokey
- memo = self._memo_for(key)
-
- if isinstance(memo, Exception):
- raise memo
- elif memo:
- return memo
- self._set_left_recursion_guard(key)
- result = self._invoke_rule(ruleinfo)
+ if key in self._results:
+ return self._results[key]
+ result = self._invoke_rule(ruleinfo, pos, key)
- if not self.left_recursion:
- return result
if not self._is_recursive(ruleinfo.name):
return result
- self._next_token(ruleinfo)
- key = self.memokey
- self._save_result(key, result.node)
- try:
- result = self._recursive_call(ruleinfo)
- except FailedParse:
- pass
+ while True:
+ self._forget(key)
+ self._save_result(key, result.node)
+ self._clear_recursion_errors()
+ self._goto(pos)
+ try:
+ new_result = self._invoke_rule(ruleinfo, pos, key)
+ except FailedParse:
+ break
+ if self._pos <= result.newpos:
+ break
+ result = new_result
+ del self._results[key]
+ self._forget(key)
return result
- def _invoke_rule(self, ruleinfo):
- key = self.memokey
+ def _invoke_rule(self, ruleinfo, pos, key):
+ memo = self._memo_for(key)
+ if isinstance(memo, Exception):
+ raise memo
+ elif memo:
+ return memo
+ self._set_left_recursion_guard(key)
+
self._push_ast()
- pos = self._pos
try:
try:
self._next_token(ruleinfo)
| Parser drops part of input
Grammar:
```
identifier = /\w+/ ;
start = '{' start '}' | start '->' identifier | identifier ;
```
Input:
```
{ size }
test
```
Expected output: `['{', 'size', '}']`
Actual output: `test`
Removing the (left recursive) rule `start '->' identifier` fixes the problem. | neogeny/TatSu | diff --git a/test/grammar/left_recursion_test.py b/test/grammar/left_recursion_test.py
index 2c81e0a..0dced21 100644
--- a/test/grammar/left_recursion_test.py
+++ b/test/grammar/left_recursion_test.py
@@ -60,50 +60,85 @@ class LeftRecursionTests(unittest.TestCase):
print(ast)
self.assertEqual([['5', '-', '87'], '-', '32'], ast)
+ @unittest.skip('uncertain if grammar is correct')
def test_indirect_left_recursion_complex(self, trace=False):
grammar = '''
@@left_recursion :: True
- start = Primary $ ;
- Primary = PrimaryNoNewArray ;
-
- PrimaryNoNewArray =
- ClassInstanceCreationExpression
- | MethodInvocation
- | FieldAccess
- | ArrayAccess
- | 'this' ;
-
- ClassInstanceCreationExpression =
- 'new' ClassOrInterfaceType '(' ')'
- | Primary '.new' Identifier '()' ;
-
- MethodInvocation =
- Primary '.' MethodName '()'
- | MethodName '()' ;
-
- FieldAccess =
- Primary '.' Identifier
- | 'super.' Identifier ;
-
- ArrayAccess =
- Primary '[' Expression ']'
- | ExpressionName '[' Expression ']' ;
-
- ClassOrInterfaceType =
- ClassName
- | InterfaceTypeName ;
-
- ClassName = 'C' | 'D' ;
- InterfaceTypeName = 'I' | 'J' ;
- Identifier = 'x' | 'y' | ClassOrInterfaceType ;
+ start
+ =
+ Primary $
+ ;
+
+ Primary
+ =
+ PrimaryNoNewArray
+ ;
+
+ PrimaryNoNewArray
+ =
+ | ClassInstanceCreationExpression
+ | MethodInvocation
+ | FieldAccess
+ | ArrayAccess
+ | 'this'
+ ;
+
+ ClassInstanceCreationExpression
+ =
+ | 'new' ClassOrInterfaceType '(' ')'
+ | Primary '.new' Identifier '()'
+ ;
+
+ MethodInvocation
+ =
+ | MethodName '()'
+ | Primary '.' MethodName '()'
+ ;
+
+ FieldAccess
+ =
+ | Primary '.' Identifier
+ | 'super.' Identifier
+ ;
+
+ ArrayAccess
+ =
+ | Primary '[' Expression ']'
+ | ExpressionName '[' Expression ']'
+ ;
+
+ ClassOrInterfaceType
+ =
+ | ClassName
+ | InterfaceTypeName
+ ;
+
+ ClassName
+ =
+ 'C' | 'D'
+ ;
+
+ InterfaceTypeName
+ =
+ 'I' | 'J'
+ ;
+
+ Identifier
+ =
+ | 'x' | 'y'
+ | ClassOrInterfaceType
+ ;
+
MethodName = 'm' | 'n' ;
+
ExpressionName = Identifier ;
+
Expression = 'i' | 'j' ;
'''
model = compile(grammar, "test")
ast = model.parse("this", trace=trace, colorize=True)
self.assertEqual('this', ast)
- ast = model.parse("this.x", trace=trace, colorize=True)
+ ast = model.parse("this.x", trace=True, colorize=True)
self.assertEqual(['this', '.', 'x'], ast)
ast = model.parse("this.x.y", trace=trace, colorize=True)
self.assertEqual([['this', '.', 'x'], '.', 'y'], ast)
@@ -139,7 +174,7 @@ class LeftRecursionTests(unittest.TestCase):
model.parse("1*2+3*5", trace=trace, colorize=True)
try:
model.parse("1*2+3*5", left_recursion=False, trace=trace, colorize=True)
- self.Fail('expected left recursion failure')
+ self.fail('expected left recursion failure')
except FailedParse:
pass
@@ -170,7 +205,7 @@ class LeftRecursionTests(unittest.TestCase):
ast = model_b.parse("(((1+2)))", trace=trace, colorize=True)
self.assertEqual(['1', '+', '2'], ast)
- def test_left_recursion_bug(self, trace=True):
+ def test_left_recursion_bug(self, trace=False):
grammar = '''\
@@grammar :: Minus
@@left_recursion :: True
@@ -197,7 +232,7 @@ class LeftRecursionTests(unittest.TestCase):
'''
model = compile(grammar=grammar)
model.parse('3', trace=trace, colorize=True)
- model.parse('3 - 2', trace=trace, colorize=True)
+ model.parse('3 - 2', trace=True, colorize=True)
model.parse('(3 - 2)', trace=trace, colorize=True)
model.parse('(3 - 2) - 1', trace=trace, colorize=True)
model.parse('3 - 2 - 1', trace=trace, colorize=True)
@@ -215,3 +250,62 @@ class LeftRecursionTests(unittest.TestCase):
model = compile(grammar, "test")
ast = model.parse("1+2+3", trace=trace, colorize=True)
self.assertEqual(['1', '+', ['2', '+', '3']], ast)
+
+ def test_partial_input_bug(self, trace=False):
+ grammar = '''
+ start
+ =
+ expre
+ ;
+
+ expre
+ =
+ | '{' expre '}'
+ | expre '->' identifier
+ | identifier
+ ;
+
+ identifier
+ =
+ /\w+/
+ ;
+ '''
+
+ input = '''
+ { size } test
+ '''
+
+ model = compile(grammar)
+ ast = model.parse(input, trace=trace, colorize=True)
+ assert ['{', 'size', '}'] == ast
+
+ def test_dropped_input_bug(self, trace=False):
+ grammar = '''
+ @@left_recursion :: True
+
+ start
+ =
+ expr
+ ;
+
+ expr
+ =
+ | expr ',' expr
+ | identifier
+ ;
+
+ identifier
+ =
+ /\w+/
+ ;
+ '''
+ model = compile(grammar)
+
+ ast = model.parse('foo', trace=trace, colorize=True)
+ self.assertEqual('foo', ast)
+
+ ast = model.parse('foo bar', trace=trace, colorize=True)
+ self.assertEqual('foo', ast)
+
+ ast = model.parse('foo, bar', trace=trace, colorize=True)
+ self.assertEqual(['foo', ',', 'bar'], ast)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 4.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-flake8",
"pytest-mypy",
"pytest-pylint"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | astroid==3.3.9
dill==0.3.9
exceptiongroup==1.2.2
filelock==3.18.0
flake8==7.2.0
iniconfig==2.1.0
isort==6.0.1
mccabe==0.7.0
mypy==1.15.0
mypy-extensions==1.0.0
packaging==24.2
platformdirs==4.3.7
pluggy==1.5.0
pycodestyle==2.13.0
pyflakes==3.3.1
pylint==3.3.6
pytest==8.3.5
pytest-flake8==1.3.0
pytest-mypy==1.0.0
pytest-pylint==0.21.0
-e git+https://github.com/neogeny/TatSu.git@03c5c4fdbe0ad1a649a1c6a04913efc234122971#egg=TatSu
tomli==2.2.1
tomlkit==0.13.2
typing_extensions==4.13.0
| name: TatSu
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- astroid==3.3.9
- dill==0.3.9
- exceptiongroup==1.2.2
- filelock==3.18.0
- flake8==7.2.0
- iniconfig==2.1.0
- isort==6.0.1
- mccabe==0.7.0
- mypy==1.15.0
- mypy-extensions==1.0.0
- packaging==24.2
- platformdirs==4.3.7
- pluggy==1.5.0
- pycodestyle==2.13.0
- pyflakes==3.3.1
- pylint==3.3.6
- pytest==8.3.5
- pytest-flake8==1.3.0
- pytest-mypy==1.0.0
- pytest-pylint==0.21.0
- tomli==2.2.1
- tomlkit==0.13.2
- typing-extensions==4.13.0
prefix: /opt/conda/envs/TatSu
| [
"test/grammar/left_recursion_test.py::LeftRecursionTests::test_dropped_input_bug",
"test/grammar/left_recursion_test.py::LeftRecursionTests::test_partial_input_bug"
]
| []
| [
"test/grammar/left_recursion_test.py::LeftRecursionTests::test_direct_left_recursion",
"test/grammar/left_recursion_test.py::LeftRecursionTests::test_indirect_left_recursion",
"test/grammar/left_recursion_test.py::LeftRecursionTests::test_indirect_left_recursion_with_cut",
"test/grammar/left_recursion_test.py::LeftRecursionTests::test_left_recursion_bug",
"test/grammar/left_recursion_test.py::LeftRecursionTests::test_left_recursion_with_right_associativity",
"test/grammar/left_recursion_test.py::LeftRecursionTests::test_nested_left_recursion",
"test/grammar/left_recursion_test.py::LeftRecursionTests::test_no_left_recursion"
]
| []
| BSD License | 1,387 | [
"tatsu/contexts.py"
]
| [
"tatsu/contexts.py"
]
|
|
itamarst__crochet-109 | 20f0e95848760ba6859576d1713739c8734a539c | 2017-06-18 17:01:00 | 20f0e95848760ba6859576d1713739c8734a539c | diff --git a/.travis.yml b/.travis.yml
index c080557..3777fa6 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,14 +1,16 @@
language: python
env:
- - TWISTED=Twisted==15.0 RUNTESTS=trial
- - TWISTED=Twisted RUNTESTS=trial
+ # Oldest supported version:
+ - TWISTED=Twisted==16.0
+ # Latest Twisted:
+ - TWISTED=Twisted
python:
- 2.7
- 3.4
- 3.5
- - 3.6-dev
+ - 3.6
- pypy
install:
@@ -17,7 +19,7 @@ install:
- python setup.py -q install
script:
- - $RUNTESTS crochet.tests
+ - trial crochet.tests
- pyflakes crochet
notifications:
diff --git a/crochet/_eventloop.py b/crochet/_eventloop.py
index d6f7baa..9980e99 100644
--- a/crochet/_eventloop.py
+++ b/crochet/_eventloop.py
@@ -298,7 +298,15 @@ class ThreadLogObserver(object):
"""
A log observer that writes to a queue.
"""
- self._logWritingReactor.callFromThread(self._observer, msg)
+ def log():
+ try:
+ self._observer(msg)
+ except:
+ # Lower-level logging system blew up, nothing we can do, so
+ # just drop on the floor.
+ pass
+
+ self._logWritingReactor.callFromThread(log)
class EventLoop(object):
diff --git a/docs/news.rst b/docs/news.rst
index 7d97d31..1ce83fc 100644
--- a/docs/news.rst
+++ b/docs/news.rst
@@ -1,6 +1,18 @@
What's New
==========
+1.7.0
+^^^^^
+
+Bug fixes:
+
+* If the Python `logging.Handler` throws an exception Crochet no longer goes into a death spiral.
+ Thanks to Michael Schlenker for the bug report.
+
+Removed features:
+
+* Versions of Twisted < 16.0 are no longer supported (i.e. no longer tested in CI.)
+
1.6.0
^^^^^
| MemoryErrors when logging handlers raise exceptions
This lets a python 2.7.11 + twisted 14.0 + crochet 1.4 (win32, 32-Bit) grow to 2 GB in size, and be pretty much disfunctional for anything (but not crashing sometimes, only raising a MemoryError if lucky). It also has the same effect for Twisted 17.1.
import time
import crochet
import logging
class BrokenHandler(logging.Handler):
def emit(self, record):
raise ValueError("Kaboom")
log = logging.getLogger("twisted")
h = BrokenHandler(logging.DEBUG)
log.addHandler(h)
log.setLevel(logging.DEBUG)
crochet.setup()
while 1:
time.sleep(10)
It should not hang like that, even if the logger misbehaves.
| itamarst/crochet | diff --git a/crochet/tests/test_logging.py b/crochet/tests/test_logging.py
new file mode 100644
index 0000000..9dc351b
--- /dev/null
+++ b/crochet/tests/test_logging.py
@@ -0,0 +1,86 @@
+"""Tests for the logging bridge."""
+
+from __future__ import absolute_import
+
+from twisted.trial.unittest import SynchronousTestCase
+import threading
+
+from twisted.python import threadable
+
+from .._eventloop import ThreadLogObserver
+
+
+class ThreadLogObserverTest(SynchronousTestCase):
+ """
+ Tests for ThreadLogObserver.
+
+ We use Twisted's SyncTestCase to ensure that unhandled logged errors get
+ reported as errors, in particular for test_error.
+ """
+ def test_stop(self):
+ """
+ ThreadLogObserver.stop() stops the thread started in __init__.
+ """
+ threadLog = ThreadLogObserver(None)
+ self.assertTrue(threadLog._thread.is_alive())
+ threadLog.stop()
+ threadLog._thread.join()
+ self.assertFalse(threadLog._thread.is_alive())
+
+ def test_emit(self):
+ """
+ ThreadLogObserver.emit runs the wrapped observer's in its thread, with
+ the given message.
+ """
+ messages = []
+ def observer(msg):
+ messages.append((threading.current_thread().ident, msg))
+
+ threadLog = ThreadLogObserver(observer)
+ ident = threadLog._thread.ident
+ msg1 = {}
+ msg2 = {"a": "b"}
+ threadLog(msg1)
+ threadLog(msg2)
+ threadLog.stop()
+ # Wait for writing to finish:
+ threadLog._thread.join()
+ self.assertEqual(messages, [(ident, msg1), (ident, msg2)])
+
+ def test_errors(self):
+ """
+ ThreadLogObserver.emit catches and silently drops exceptions from its
+ observer.
+ """
+ messages = []
+ counter = []
+ def observer(msg):
+ counter.append(1)
+ if len(counter) == 2:
+ raise RuntimeError("ono a bug")
+ messages.append(msg)
+
+ threadLog = ThreadLogObserver(observer)
+ msg1 = {"m": "1"}
+ msg2 = {"m": "2"}
+ msg3 = {"m": "3"}
+ threadLog(msg1)
+ threadLog(msg2)
+ threadLog(msg3)
+ threadLog.stop()
+ # Wait for writing to finish:
+ threadLog._thread.join()
+ self.assertEqual(messages, [msg1, msg3])
+
+ def test_ioThreadUnchanged(self):
+ """
+ ThreadLogObserver does not change the Twisted I/O thread (which is
+ supposed to match the thread the main reactor is running in.)
+ """
+ threadLog = ThreadLogObserver(None)
+ threadLog.stop()
+ threadLog._thread.join()
+ self.assertIn(threadable.ioThread,
+ # Either reactor was never run, or run in thread running
+ # the tests:
+ (None, threading.current_thread().ident))
diff --git a/crochet/tests/test_setup.py b/crochet/tests/test_setup.py
index f60f99e..5bc7850 100644
--- a/crochet/tests/test_setup.py
+++ b/crochet/tests/test_setup.py
@@ -14,7 +14,6 @@ import twisted
from twisted.python.log import PythonLoggingObserver
from twisted.python import log
from twisted.python.runtime import platform
-from twisted.python import threadable
from twisted.internet.task import Clock
from .._eventloop import EventLoop, ThreadLogObserver, _store
@@ -265,56 +264,6 @@ class ProcessSetupTests(TestCase):
self.assertFalse(reactor.getDelayedCalls())
-class ThreadLogObserverTest(TestCase):
- """
- Tests for ThreadLogObserver.
- """
- def test_stop(self):
- """
- ThreadLogObserver.stop() stops the thread started in __init__.
- """
- threadLog = ThreadLogObserver(None)
- self.assertTrue(threadLog._thread.is_alive())
- threadLog.stop()
- threadLog._thread.join()
- self.assertFalse(threadLog._thread.is_alive())
-
- def test_emit(self):
- """
- ThreadLogObserver.emit runs the wrapped observer's in its thread, with
- the given message.
- """
- messages = []
- def observer(msg):
- messages.append((threading.current_thread().ident, msg))
-
- threadLog = ThreadLogObserver(observer)
- ident = threadLog._thread.ident
- msg1 = {}
- msg2 = {"a": "b"}
- threadLog(msg1)
- threadLog(msg2)
- threadLog.stop()
- # Wait for writing to finish:
- threadLog._thread.join()
- self.assertEqual(messages, [(ident, msg1), (ident, msg2)])
-
-
- def test_ioThreadUnchanged(self):
- """
- ThreadLogObserver does not change the Twisted I/O thread (which is
- supposed to match the thread the main reactor is running in.)
- """
- threadLog = ThreadLogObserver(None)
- threadLog.stop()
- threadLog._thread.join()
- self.assertIn(threadable.ioThread,
- # Either reactor was never run, or run in thread running
- # the tests:
- (None, threading.current_thread().ident))
-
-
-
class ReactorImportTests(TestCase):
"""
Tests for when the reactor gets imported.
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 3
} | 1.6 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": null,
"python": "3.6",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs==22.2.0
Automat==22.10.0
Babel==2.11.0
certifi==2021.5.30
charset-normalizer==2.0.12
constantly==15.1.0
coverage==6.2
-e git+https://github.com/itamarst/crochet.git@20f0e95848760ba6859576d1713739c8734a539c#egg=crochet
docutils==0.18.1
hyperlink==21.0.0
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
incremental==22.10.0
iniconfig==1.1.1
Jinja2==3.0.3
MarkupSafe==2.0.1
packaging==21.3
pluggy==1.0.0
py==1.11.0
Pygments==2.14.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
pytz==2025.2
requests==2.27.1
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
tomli==1.2.3
Twisted==22.4.0
typing_extensions==4.1.1
urllib3==1.26.20
zipp==3.6.0
zope.interface==5.5.2
| name: crochet
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- attrs==22.2.0
- automat==22.10.0
- babel==2.11.0
- charset-normalizer==2.0.12
- constantly==15.1.0
- coverage==6.2
- docutils==0.18.1
- hyperlink==21.0.0
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- incremental==22.10.0
- iniconfig==1.1.1
- jinja2==3.0.3
- markupsafe==2.0.1
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- pytz==2025.2
- requests==2.27.1
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- tomli==1.2.3
- twisted==22.4.0
- typing-extensions==4.1.1
- urllib3==1.26.20
- zipp==3.6.0
- zope-interface==5.5.2
prefix: /opt/conda/envs/crochet
| [
"crochet/tests/test_logging.py::ThreadLogObserverTest::test_errors"
]
| []
| [
"crochet/tests/test_logging.py::ThreadLogObserverTest::test_emit",
"crochet/tests/test_logging.py::ThreadLogObserverTest::test_ioThreadUnchanged",
"crochet/tests/test_logging.py::ThreadLogObserverTest::test_stop",
"crochet/tests/test_setup.py::SetupTests::test_first_runs_reactor",
"crochet/tests/test_setup.py::SetupTests::test_logging",
"crochet/tests/test_setup.py::SetupTests::test_no_setup",
"crochet/tests/test_setup.py::SetupTests::test_no_setup_after_setup",
"crochet/tests/test_setup.py::SetupTests::test_no_setup_registry_shutdown",
"crochet/tests/test_setup.py::SetupTests::test_runs_with_lock",
"crochet/tests/test_setup.py::SetupTests::test_second_does_nothing",
"crochet/tests/test_setup.py::SetupTests::test_setup_registry_shutdown",
"crochet/tests/test_setup.py::SetupTests::test_start_watchdog_thread",
"crochet/tests/test_setup.py::SetupTests::test_stop_logging_on_exit",
"crochet/tests/test_setup.py::SetupTests::test_stop_on_exit",
"crochet/tests/test_setup.py::SetupTests::test_warnings_untouched",
"crochet/tests/test_setup.py::ProcessSetupTests::test_posix",
"crochet/tests/test_setup.py::ReactorImportTests::test_crochet_import_no_reactor",
"crochet/tests/test_setup.py::LoggingTests::test_new_logging"
]
| []
| MIT License | 1,388 | [
"crochet/_eventloop.py",
".travis.yml",
"docs/news.rst"
]
| [
"crochet/_eventloop.py",
".travis.yml",
"docs/news.rst"
]
|
|
fronzbot__blinkpy-37 | 2a4b0d3f4b5974c5b57a8723fa5d2d57f441408e | 2017-06-18 22:39:10 | 968b77b8bd97306fe763abe6a4a5760d91f4a211 | diff --git a/CHANGES.rst b/CHANGES.rst
index 3452982..be1cf91 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -6,6 +6,7 @@ A list of changes between each release
0.7.0.dev (development version)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- Fixed style errors for bumped pydocstring and pylint versions
+- Changed Blink.cameras dictionary to be case-insensitive (fixes `#35 <https://github.com/fronzbot/blinkpy/issues/35>`_)
0.6.0 (2017-05-12)
^^^^^^^^^^^^^^^^^^
diff --git a/README.rst b/README.rst
index 2e02f9b..d3eb6f5 100644
--- a/README.rst
+++ b/README.rst
@@ -1,7 +1,6 @@
blinkpy |Build Status| |Coverage Status|
=============================================
A Python library for the Blink Camera system
-Only compatible with Python 3+
Disclaimer:
~~~~~~~~~~~~~~~
@@ -51,8 +50,8 @@ The cameras are of a BlinkCamera class, of which the following parameters can be
blink = blinkpy.Blink(username='YOUR USER NAME', password='YOUR PASSWORD')
blink.setup_system()
- for name, camera in blink.cameras.items():
- print(name) # Name of the camera
+ for camera in blink.cameras:
+ print(camera.name) # Name of the camera
print(camera.id) # Integer id of the camera (assigned by Blink)
print(camera.armed) # Whether the device is armed/disarmed (ie. detecting motion)
print(camera.clip) # Link to last motion clip captured
@@ -62,7 +61,7 @@ The cameras are of a BlinkCamera class, of which the following parameters can be
print(camera.battery_string) # Gives battery level as a string ("OK" or "Low"). Returns "Unknown" if value is... well, unknown
print(camera.notifications) # Number of unread notifications (ie. motion alerts that haven't been viewed)
print(camera.motion) # Dictionary containing values for keys ['video', 'image', 'time']
- # which corresponds to last motion recorded, thumbnail of last motion, and timestamp of last motion
+ # which correspond to last motion recorded, thumbnail of last motion, and timestamp of last motion
Class Descriptions
diff --git a/blinkpy.py b/blinkpy.py
index 12cdd67..2ce09da 100644
--- a/blinkpy.py
+++ b/blinkpy.py
@@ -16,6 +16,7 @@ import json
import getpass
from shutil import copyfileobj
import requests
+from requests.structures import CaseInsensitiveDict
import helpers.errors as ERROR
from helpers.constants import (BLINK_URL, LOGIN_URL,
LOGIN_BACKUP_URL,
@@ -186,7 +187,7 @@ class Blink(object):
self.region_id = None
self._host = None
self._events = []
- self.cameras = {}
+ self.cameras = CaseInsensitiveDict({})
self._idlookup = {}
self.urls = None
diff --git a/helpers/__init__.py b/helpers/__init__.py
index 0a53750..e74901c 100644
--- a/helpers/__init__.py
+++ b/helpers/__init__.py
@@ -1,3 +1,3 @@
"""Init file for blinkpy helper functions."""
from helpers import constants
-from helpers import errors
\ No newline at end of file
+from helpers import errors
| Ignore case when accessing camera name
Currently the camera dictionary can be accessed with the camera name, but case matters. We should ignore case sensitivity here to make it less annoying to access. | fronzbot/blinkpy | diff --git a/tests/test_blink_functions.py b/tests/test_blink_functions.py
index cb2ba8b..be4c49f 100644
--- a/tests/test_blink_functions.py
+++ b/tests/test_blink_functions.py
@@ -2,6 +2,7 @@
import unittest
from unittest import mock
+import random
import blinkpy
import tests.mock_responses as mresp
@@ -120,6 +121,26 @@ class TestBlinkFunctions(unittest.TestCase):
camera.urls.home_url = "use_bad_response"
self.assertEqual(camera.image_refresh(), None)
+ @mock.patch('blinkpy.blinkpy.requests.post',
+ side_effect=mresp.mocked_requests_post)
+ @mock.patch('blinkpy.blinkpy.requests.get',
+ side_effect=mresp.mocked_requests_get)
+ def test_camera_random_case(self, mock_get, mock_post):
+ """Checks for case of camera name."""
+ self.blink.setup_system()
+ for camera_name in self.blink.cameras:
+
+ rand_name = camera_name
+ # Make sure we never pass this test if rand_name = camera_name
+ while rand_name == camera_name:
+ rand_name = ''.join(
+ random.choice(
+ (str.upper, str.lower)
+ )(x) for x in camera_name)
+
+ self.assertEqual(self.blink.cameras[camera_name].name,
+ self.blink.cameras[rand_name].name)
+
def test_camera_update(self):
"""Checks that the update function is doing the right thing."""
self.test_urls = blinkpy.BlinkURLHandler('test')
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 4
} | 0.6 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest>=2.9.2",
"pytest-cov>=2.3.1",
"pytest-timeout>=1.0.0",
"testtools"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/fronzbot/blinkpy.git@2a4b0d3f4b5974c5b57a8723fa5d2d57f441408e#egg=blinkpy
certifi==2025.1.31
charset-normalizer==3.4.1
coverage==7.8.0
exceptiongroup==1.2.2
idna==3.10
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
pytest-cov==6.0.0
pytest-timeout==2.3.1
requests==2.32.3
testtools==2.7.2
tomli==2.2.1
urllib3==2.3.0
| name: blinkpy
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- coverage==7.8.0
- exceptiongroup==1.2.2
- idna==3.10
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pytest-cov==6.0.0
- pytest-timeout==2.3.1
- requests==2.32.3
- testtools==2.7.2
- tomli==2.2.1
- urllib3==2.3.0
prefix: /opt/conda/envs/blinkpy
| [
"tests/test_blink_functions.py::TestBlinkFunctions::test_camera_random_case"
]
| []
| [
"tests/test_blink_functions.py::TestBlinkFunctions::test_camera_thumbs",
"tests/test_blink_functions.py::TestBlinkFunctions::test_camera_update",
"tests/test_blink_functions.py::TestBlinkFunctions::test_image_to_file",
"tests/test_blink_functions.py::TestBlinkFunctions::test_image_with_bad_data",
"tests/test_blink_functions.py::TestBlinkFunctions::test_last_motion",
"tests/test_blink_functions.py::TestBlinkFunctions::test_set_motion_detect",
"tests/test_blink_functions.py::TestBlinkFunctions::test_take_new_picture"
]
| []
| MIT License | 1,389 | [
"README.rst",
"blinkpy.py",
"helpers/__init__.py",
"CHANGES.rst"
]
| [
"README.rst",
"blinkpy.py",
"helpers/__init__.py",
"CHANGES.rst"
]
|
|
seperman__deepdiff-76 | d4918e17c555df1b46827f865c5c105097199f80 | 2017-06-19 19:34:25 | d4918e17c555df1b46827f865c5c105097199f80 | diff --git a/README.md b/README.md
index 8f5b62f..26ff60e 100644
--- a/README.md
+++ b/README.md
@@ -165,7 +165,7 @@ Let's say you have a huge nested object and want to see if any item with the wor
```py
from deepdiff import DeepSearch
obj = {"long": "somewhere", "string": 2, 0: 0, "somewhere": "around"}
-ds = DeepSearch(obj, item, verbose_level=2)
+ds = DeepSearch(obj, "somewhere", verbose_level=2)
print(ds)
```
@@ -187,7 +187,7 @@ Just grep through your objects as you would in shell!
```py
from deepdiff import grep
obj = {"long": "somewhere", "string": 2, 0: 0, "somewhere": "around"}
-ds = obj | grep(item)
+ds = obj | grep("somewhere")
print(ds)
```
diff --git a/deepdiff/__init__.py b/deepdiff/__init__.py
index 6e608d0..95ad688 100644
--- a/deepdiff/__init__.py
+++ b/deepdiff/__init__.py
@@ -1,3 +1,4 @@
+"""This module offers the DeepDiff, DeepSearch, grep and DeepHash classes."""
import logging
if __name__ == '__main__':
diff --git a/deepdiff/search.py b/deepdiff/search.py
index 26ba882..8c826d8 100644
--- a/deepdiff/search.py
+++ b/deepdiff/search.py
@@ -127,6 +127,11 @@ class DeepSearch(dict):
parents_ids=frozenset({}),
is_namedtuple=False):
"""Search objects"""
+ found = False
+ if obj == item:
+ found = True
+ self.__report(report_key='matched_values', key=parent, value=obj)
+
try:
if is_namedtuple:
obj = obj._asdict()
@@ -136,7 +141,9 @@ class DeepSearch(dict):
try:
obj = {i: getattr(obj, i) for i in obj.__slots__}
except AttributeError:
- self['unprocessed'].append("%s" % parent)
+ if not found:
+ self['unprocessed'].append("%s" % parent)
+
return
self.__search_dict(
| Feature: Search for types/objects
Currently when encountering an object, `DeepSearch` examines that object's `__dict__` without testing whether the object is what's being searched for. By example:
```
>>> from deepdiff import DeepSearch
>>> from uuid import uuid4()
>>> foo = uuid4()
>>> DeepSearch({1: foo}, foo)
{}
```
This would also allow global singletons like `None` and functions to be searched for.
This seems like it'd be a fairly small change, just an `if item == obj` check at the top of `DeepSearch.__search_obj`. If you think this a good idea, I'm happy to make a PR. | seperman/deepdiff | diff --git a/tests/test_search.py b/tests/test_search.py
index 49929c6..3f56ceb 100644
--- a/tests/test_search.py
+++ b/tests/test_search.py
@@ -15,6 +15,7 @@ To run a specific test, run this from the root of repo:
"""
import unittest
from deepdiff import DeepSearch, grep
+from datetime import datetime
import logging
logging.disable(logging.CRITICAL)
@@ -286,6 +287,30 @@ class DeepSearchTestCase(unittest.TestCase):
result = {'matched_values': {'root'}}
self.assertEqual(DeepSearch(obj, item, verbose_level=1, case_sensitive=False), result)
+ def test_none(self):
+ obj = item = None
+ result = {'matched_values': {'root'}}
+ self.assertEqual(DeepSearch(obj, item, verbose_level=1), result)
+
+ def test_complex_obj(self):
+ obj = datetime(2017, 5, 4, 1, 1, 1)
+ item = datetime(2017, 5, 4, 1, 1, 1)
+ result = {'matched_values': {'root'}}
+ self.assertEqual(DeepSearch(obj, item, verbose_level=1), result)
+
+ def test_keep_searching_after_obj_match(self):
+ class AlwaysEqual:
+ def __init__(self, recurse=True):
+ if recurse:
+ self.some_attr = AlwaysEqual(recurse=False)
+ def __eq__(self, other):
+ return True
+
+ obj = AlwaysEqual()
+ item = AlwaysEqual()
+ result = {'matched_values': {'root', 'root.some_attr'}}
+ self.assertEqual(DeepSearch(obj, item, verbose_level=1), result)
+
class GrepTestCase(unittest.TestCase):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 3
} | 3.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
-e git+https://github.com/seperman/deepdiff.git@d4918e17c555df1b46827f865c5c105097199f80#egg=deepdiff
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
jsonpickle==2.2.0
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: deepdiff
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- jsonpickle==2.2.0
prefix: /opt/conda/envs/deepdiff
| [
"tests/test_search.py::DeepSearchTestCase::test_keep_searching_after_obj_match",
"tests/test_search.py::DeepSearchTestCase::test_none"
]
| []
| [
"tests/test_search.py::DeepSearchTestCase::test_bad_attribute",
"tests/test_search.py::DeepSearchTestCase::test_case_insensitive_of_str_in_list",
"tests/test_search.py::DeepSearchTestCase::test_case_insensitive_of_str_in_one_liner",
"tests/test_search.py::DeepSearchTestCase::test_case_sensitive_of_str_in_list",
"tests/test_search.py::DeepSearchTestCase::test_case_sensitive_of_str_in_one_liner",
"tests/test_search.py::DeepSearchTestCase::test_complex_obj",
"tests/test_search.py::DeepSearchTestCase::test_custom_object",
"tests/test_search.py::DeepSearchTestCase::test_custom_object_in_dictionary_verbose",
"tests/test_search.py::DeepSearchTestCase::test_custom_object_skip_path",
"tests/test_search.py::DeepSearchTestCase::test_custom_object_verbose",
"tests/test_search.py::DeepSearchTestCase::test_int_in_dictionary",
"tests/test_search.py::DeepSearchTestCase::test_loop",
"tests/test_search.py::DeepSearchTestCase::test_loop_in_lists",
"tests/test_search.py::DeepSearchTestCase::test_named_tuples_verbose",
"tests/test_search.py::DeepSearchTestCase::test_number_in_list",
"tests/test_search.py::DeepSearchTestCase::test_skip_dictionary_path",
"tests/test_search.py::DeepSearchTestCase::test_skip_list_path",
"tests/test_search.py::DeepSearchTestCase::test_skip_path1",
"tests/test_search.py::DeepSearchTestCase::test_skip_type_str",
"tests/test_search.py::DeepSearchTestCase::test_string_in_dictionary",
"tests/test_search.py::DeepSearchTestCase::test_string_in_dictionary_case_insensitive",
"tests/test_search.py::DeepSearchTestCase::test_string_in_dictionary_in_list_verbose",
"tests/test_search.py::DeepSearchTestCase::test_string_in_dictionary_key_case_insensitive_partial",
"tests/test_search.py::DeepSearchTestCase::test_string_in_dictionary_verbose",
"tests/test_search.py::DeepSearchTestCase::test_string_in_list",
"tests/test_search.py::DeepSearchTestCase::test_string_in_list_verbose",
"tests/test_search.py::DeepSearchTestCase::test_string_in_list_verbose2",
"tests/test_search.py::DeepSearchTestCase::test_string_in_list_verbose3",
"tests/test_search.py::DeepSearchTestCase::test_string_in_root",
"tests/test_search.py::DeepSearchTestCase::test_string_in_root_verbose",
"tests/test_search.py::DeepSearchTestCase::test_string_in_set_verbose",
"tests/test_search.py::DeepSearchTestCase::test_string_in_tuple",
"tests/test_search.py::DeepSearchTestCase::test_unknown_parameters",
"tests/test_search.py::GrepTestCase::test_grep_dict"
]
| []
| MIT License | 1,390 | [
"deepdiff/search.py",
"README.md",
"deepdiff/__init__.py"
]
| [
"deepdiff/search.py",
"README.md",
"deepdiff/__init__.py"
]
|
|
hylang__hy-1309 | 672c8a1637c7ad5c5cc0ce8c77e044c99631a47e | 2017-06-20 23:16:56 | 5c720c0110908e3f47dba2e4cc1c820d16f359a1 | gilch: @Kodiologist Conflicting files, can you fix that? News should be easy. #1307 also affected that test file. You wrote #1269 in the first place, and this seems like a simple amendment. Otherwise, it looks okay to me. The test looks more complex but Travis passed it.
After fixing the conflict, try breaking the flag PYTHONDONTWRITEBYTECODE detection to make sure the test still catches it. | diff --git a/NEWS b/NEWS
index 1a6c3d5e..b99e2285 100644
--- a/NEWS
+++ b/NEWS
@@ -8,6 +8,7 @@ Changes from 0.13.0
[ Bug Fixes ]
* Numeric literals are no longer parsed as symbols when followed by a dot
and a symbol
+ * Hy now respects the environment variable PYTHONDONTWRITEBYTECODE
Changes from 0.12.1
diff --git a/hy/importer.py b/hy/importer.py
index 878e1be5..ec3b606e 100644
--- a/hy/importer.py
+++ b/hy/importer.py
@@ -96,13 +96,14 @@ def import_file_to_module(module_name, fpath, loader=None):
module = imp.new_module(module_name)
module.__file__ = fpath
code = ast_compile(_ast, fpath, "exec")
- try:
- write_code_as_pyc(fpath, code)
- except (IOError, OSError):
- # We failed to save the bytecode, probably because of a
- # permissions issue. The user only asked to import the
- # file, so don't bug them about it.
- pass
+ if not os.environ.get('PYTHONDONTWRITEBYTECODE'):
+ try:
+ write_code_as_pyc(fpath, code)
+ except (IOError, OSError):
+ # We failed to save the bytecode, probably because of a
+ # permissions issue. The user only asked to import the
+ # file, so don't bug them about it.
+ pass
eval(code, module.__dict__)
except (HyTypeError, LexException) as e:
if e.source is None:
| Hy Ignores PYTHONDONTWRITEBYTECODE
When `PYTHONDONTWRITEBYTECODE` is set in the environment (for example using `export PYTHONDONTWRITEBYTECODE=1`) Hy should not generate `.pyc`. | hylang/hy | diff --git a/tests/test_bin.py b/tests/test_bin.py
index 8f72962e..c2e31e96 100644
--- a/tests/test_bin.py
+++ b/tests/test_bin.py
@@ -9,6 +9,7 @@ import subprocess
import re
from hy._compat import PY3
from hy.importer import get_bytecode_path
+import pytest
hy_dir = os.environ.get('HY_DIR', '')
@@ -18,13 +19,18 @@ def hr(s=""):
return "hy --repl-output-fn=hy.contrib.hy-repr.hy-repr " + s
-def run_cmd(cmd, stdin_data=None, expect=0):
+def run_cmd(cmd, stdin_data=None, expect=0, dontwritebytecode=False):
+ env = None
+ if dontwritebytecode:
+ env = dict(os.environ)
+ env["PYTHONDONTWRITEBYTECODE"] = "1"
p = subprocess.Popen(os.path.join(hy_dir, cmd),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
- shell=True)
+ shell=True,
+ env=env)
if stdin_data is not None:
p.stdin.write(stdin_data)
p.stdin.flush()
@@ -240,39 +246,42 @@ def test_bin_hy_no_main():
assert "This Should Still Work" in output
-def test_bin_hy_byte_compile():
[email protected]('scenario', [
+ "normal", "prevent_by_force", "prevent_by_env"])
[email protected]('cmd_fmt', [
+ 'hy {fpath}', 'hy -m {modname}', "hy -c '(import {modname})'"])
+def test_bin_hy_byte_compile(scenario, cmd_fmt):
modname = "tests.resources.bin.bytecompile"
fpath = modname.replace(".", "/") + ".hy"
-
- for can_byte_compile in [True, False]:
- for cmd in ["hy " + fpath,
- "hy -m " + modname,
- "hy -c '(import {})'".format(modname)]:
-
- rm(get_bytecode_path(fpath))
-
- if not can_byte_compile:
- # Keep Hy from being able to byte-compile the module by
- # creating a directory at the target location.
- os.mkdir(get_bytecode_path(fpath))
-
- # Whether or not we can byte-compile the module, we should be able
- # to run it.
- output, _ = run_cmd(cmd)
- assert "Hello from macro" in output
- assert "The macro returned: boink" in output
-
- if can_byte_compile:
- # That should've byte-compiled the module.
- assert os.path.exists(get_bytecode_path(fpath))
-
- # When we run the same command again, and we've byte-compiled the
- # module, the byte-compiled version should be run instead of the
- # source, in which case the macro shouldn't be run.
- output, _ = run_cmd(cmd)
- assert ("Hello from macro" in output) ^ can_byte_compile
- assert "The macro returned: boink" in output
+ cmd = cmd_fmt.format(**locals())
+
+ rm(get_bytecode_path(fpath))
+
+ if scenario == "prevent_by_force":
+ # Keep Hy from being able to byte-compile the module by
+ # creating a directory at the target location.
+ os.mkdir(get_bytecode_path(fpath))
+
+ # Whether or not we can byte-compile the module, we should be able
+ # to run it.
+ output, _ = run_cmd(cmd, dontwritebytecode=scenario == "prevent_by_env")
+ assert "Hello from macro" in output
+ assert "The macro returned: boink" in output
+
+ if scenario == "normal":
+ # That should've byte-compiled the module.
+ assert os.path.exists(get_bytecode_path(fpath))
+ elif scenario == "prevent_by_env":
+ # No byte-compiled version should've been created.
+ assert not os.path.exists(get_bytecode_path(fpath))
+
+ # When we run the same command again, and we've byte-compiled the
+ # module, the byte-compiled version should be run instead of the
+ # source, in which case the macro shouldn't be run.
+ output, _ = run_cmd(cmd)
+ assert ("Hello from macro" in output) ^ (scenario == "normal")
+ assert "The macro returned: boink" in output
def test_bin_hy_module_main():
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 2
} | 0.13 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | appdirs==1.4.4
args==0.1.0
astor==0.8.1
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
clint==0.5.1
-e git+https://github.com/hylang/hy.git@672c8a1637c7ad5c5cc0ce8c77e044c99631a47e#egg=hy
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
rply==0.7.8
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: hy
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- appdirs==1.4.4
- args==0.1.0
- astor==0.8.1
- clint==0.5.1
- rply==0.7.8
prefix: /opt/conda/envs/hy
| [
"tests/test_bin.py::test_bin_hy_byte_compile[hy"
]
| [
"tests/test_bin.py::test_bin_hy_stdin",
"tests/test_bin.py::test_bin_hy_icmd_and_spy"
]
| [
"tests/test_bin.py::test_bin_hy",
"tests/test_bin.py::test_bin_hy_stdin_multiline",
"tests/test_bin.py::test_bin_hy_stdin_comments",
"tests/test_bin.py::test_bin_hy_stdin_assignment",
"tests/test_bin.py::test_bin_hy_stdin_as_arrow",
"tests/test_bin.py::test_bin_hy_stdin_error_underline_alignment",
"tests/test_bin.py::test_bin_hy_stdin_except_do",
"tests/test_bin.py::test_bin_hy_stdin_hy_repr",
"tests/test_bin.py::test_bin_hy_cmd",
"tests/test_bin.py::test_bin_hy_icmd",
"tests/test_bin.py::test_bin_hy_icmd_file",
"tests/test_bin.py::test_bin_hy_missing_file",
"tests/test_bin.py::test_bin_hy_file_with_args",
"tests/test_bin.py::test_bin_hyc",
"tests/test_bin.py::test_bin_hyc_missing_file",
"tests/test_bin.py::test_hy2py",
"tests/test_bin.py::test_bin_hy_builtins",
"tests/test_bin.py::test_bin_hy_main",
"tests/test_bin.py::test_bin_hy_main_args",
"tests/test_bin.py::test_bin_hy_main_exitvalue",
"tests/test_bin.py::test_bin_hy_no_main",
"tests/test_bin.py::test_bin_hy_module_main",
"tests/test_bin.py::test_bin_hy_module_main_args",
"tests/test_bin.py::test_bin_hy_module_main_exitvalue",
"tests/test_bin.py::test_bin_hy_module_no_main"
]
| []
| MIT License | 1,391 | [
"NEWS",
"hy/importer.py"
]
| [
"NEWS",
"hy/importer.py"
]
|
alexhsamuel__ntab-11 | e28de6ee1a9bf991225bfbeac40f5852dbecab36 | 2017-06-22 15:08:41 | 0cec2c6fdb3e841d13e1e5bc8246bba083f216e0 | diff --git a/ntab/fmt.py b/ntab/fmt.py
new file mode 100644
index 0000000..7c047c4
--- /dev/null
+++ b/ntab/fmt.py
@@ -0,0 +1,22 @@
+from __future__ import absolute_import, division, print_function, unicode_literals
+
+import six
+
+from .lib.text import palide
+
+#-------------------------------------------------------------------------------
+
+def format_row(row, width=80, max_name_width=32):
+ """
+ @rtype
+ Generator of lines.
+ """
+ vals = row.__dict__
+ name_width = min(max_name_width, max( len(n) for n in vals ))
+ for name, val in six.iteritems(vals):
+ yield "{}: {}".format(
+ palide(name, name_width),
+ palide(str(val), width - name_width - 2)
+ )
+
+
diff --git a/ntab/html.py b/ntab/html.py
index 9ca4ccb..37c25a4 100644
--- a/ntab/html.py
+++ b/ntab/html.py
@@ -82,7 +82,7 @@ def _render(table, css_class="tab-table", max_rows=None):
yield "<thead>"
yield "<tr>"
for name, width in zip(names, widths):
- yield "<th>{}</th>".format(elide(name, max(width, 8)))
+ yield "<th>{}</th>".format(elide(name, max(width, 8), elide_pos=0.7))
yield "</tr>"
yield "</thead>"
yield "<tbody>"
diff --git a/ntab/lib/text.py b/ntab/lib/text.py
index 8ad3e4b..0812d61 100644
--- a/ntab/lib/text.py
+++ b/ntab/lib/text.py
@@ -27,14 +27,18 @@ def pad(string, length, pad=" ", pos=1.0):
if left > 0:
string = pad * (left // pad_len) + pad[: left % pad_len] + string
if right > 0:
- string = string + pad[-(right % pad_len) :] + pad * (right // pad_len)
+ string = (
+ string
+ + pad[pad_len - (right % pad_len) :]
+ + pad * (right // pad_len)
+ )
return string
_pad = pad
-def elide(string, length, ellipsis=u"\u2026", pos=0.7):
+def elide(string, length, ellipsis=u"\u2026", pos=1.0):
"""
Elides characters if necessary to fit `string` in `length` characters.
@@ -63,7 +67,7 @@ def elide(string, length, ellipsis=u"\u2026", pos=0.7):
def palide(string, length, ellipsis=u"\u2026", pad=" ", pad_pos=1.0,
- elide_pos=0.7):
+ elide_pos=1.0):
"""
A combination of `elide` and `pad`.
"""
diff --git a/ntab/tab.py b/ntab/tab.py
index 5b569b5..010dcb1 100644
--- a/ntab/tab.py
+++ b/ntab/tab.py
@@ -13,7 +13,7 @@ import numpy as np
import six
import sys
-from . import nplib
+from . import fmt, nplib
from .lib import *
#-------------------------------------------------------------------------------
@@ -181,11 +181,22 @@ class Row(object):
)
+ def __str__(self):
+ return "\n".join(fmt.format_row(self))
+
+
@property
- def __index__(self):
+ def __idx__(self):
return self.__idx
+ # FIXME: Potentially sketchy.
+ @property
+ def __dict__(self):
+ return odict(
+ (n, a[self.__idx]) for n, a in six.iteritems(self.__arrs) )
+
+
class RowsProxy(collections.Sequence):
# FIXME: Allow modifying values in rows (i.e. mutable rows)?
| pretty-print a row
Print a row with one field on each line.
| alexhsamuel/ntab | diff --git a/ntab/lib/test/test_text.py b/ntab/lib/test/test_text.py
new file mode 100644
index 0000000..aff5323
--- /dev/null
+++ b/ntab/lib/test/test_text.py
@@ -0,0 +1,120 @@
+from __future__ import absolute_import, division, print_function, unicode_literals
+
+import pytest
+
+from ntab.lib.text import *
+
+#-------------------------------------------------------------------------------
+
+def test_pad_length():
+ assert pad("hello", 0) == "hello"
+ assert pad("hello", 4) == "hello"
+ assert pad("hello", 5) == "hello"
+ assert pad("hello", 6) == "hello "
+ assert pad("hello", 10) == "hello "
+ assert pad("hello", length=10) == "hello "
+
+ assert pad("", 0) == ""
+ assert pad("", 5) == " "
+
+
+def test_pad_pad():
+ assert pad("hello", 4, "x") == "hello"
+ assert pad("hello", 6, "x") == "hellox"
+ assert pad("hello", 9, "x") == "helloxxxx"
+ assert pad("hello", 8, "o") == "helloooo"
+ assert pad("hello", 8, "-") == "hello---"
+ assert pad("hello", pad="-", length=8) == "hello---"
+ assert pad("hello", 8, "-=") == "hello=-="
+ assert pad("hello", 12, ".!.") == "hello..!..!."
+
+
+def test_pad_left():
+ assert pad("hello", 4, pos=0 ) == "hello"
+ assert pad("hello", 10, pos=1 ) == "hello "
+ assert pad("hello", 10, pos=0 ) == " hello"
+ assert pad("hello", 10, pos=0, pad="/") == "/////hello"
+
+
+# FIXME: Test center().
+
+def test_elide_default():
+ assert elide("I am a duck.", 8) == u"I am a \u2026"
+ assert elide("I am a duck.", 14) == "I am a duck."
+
+
+def test_elide_length():
+ assert elide("Hello, world!", 15, "...") == "Hello, world!"
+ assert elide("Hello, world!", 13, "...") == "Hello, world!"
+ assert elide("Hello, world!", 12, "...") == "Hello, wo..."
+ assert elide("Hello, world!", 11, "...") == "Hello, w..."
+ assert elide("Hello, world!", 10, "...") == "Hello, ..."
+ assert elide("Hello, world!", 5, "...") == "He..."
+
+ assert elide("foo", 3, "...") == "foo"
+ assert elide("fool", 3, "...") == "..."
+
+
+def test_elide_ellipsis():
+ assert elide("Hello, world!", 10, "...") == "Hello, ..."
+ assert elide("Hello, world!", 10, ".." ) == "Hello, w.."
+ assert elide("Hello, world!", 10, "*" ) == "Hello, wo*"
+ assert elide("Hello, world!", 10, "" ) == "Hello, wor"
+
+ assert elide("Hello, world!", ellipsis="*", length=10) == "Hello, wo*"
+
+
+def test_elide_position():
+ assert elide("Hello, world!", 10, "...", 1.0) == "Hello, ..."
+ assert elide("Hello, world!", 10, "...", 0.7) == "Hello...d!"
+ assert elide("Hello, world!", 10, "...", 0.5) == "Hell...ld!"
+ assert elide("Hello, world!", 10, "...", 0.4) == "Hel...rld!"
+ assert elide("Hello, world!", 10, "...", 0.0) == "... world!"
+
+ assert elide(
+ "Hello, world!", pos=0.4, length=10, ellipsis="..") == "Hel..orld!"
+
+
+def test_palide_length():
+ assert palide("Hello, world!", 3, "...") == "..."
+ assert palide("Hello, world!", 10, "...") == "Hello, ..."
+ assert palide("Hello, world!", 11, "...") == "Hello, w..."
+ assert palide("Hello, world!", 13, "...") == "Hello, world!"
+ assert palide("Hello, world!", 14, "...") == "Hello, world! "
+ assert palide("Hello, world!", 20, "...") == "Hello, world! "
+
+
+def test_palide_ellipsis():
+ assert palide("Hello, world!", 10, "~~~~~") == "Hello~~~~~"
+ assert palide("Hello, world!", 10, "..." ) == "Hello, ..."
+ assert palide("Hello, world!", 10, ".." ) == "Hello, w.."
+ assert palide("Hello, world!", 10, "" ) == "Hello, wor"
+
+
+def test_palide_pad():
+ assert palide("Hello, world!", 13, pad="x") == "Hello, world!"
+ assert palide("Hello, world!", 18, pad="x") == "Hello, world!xxxxx"
+ assert palide("Hello, world!", 18, pad="!") == "Hello, world!!!!!!"
+
+
+def test_palide_position():
+ assert palide("Hello, world!", 11, "..", elide_pos=0.0) == "..o, world!"
+ assert palide("Hello, world!", 11, "..", elide_pos=0.6) == "Hello..rld!"
+ assert palide("Hello, world!", 11, "..", elide_pos=0.8) == "Hello, ..d!"
+
+
+def test_palide_args():
+ assert palide(
+ ellipsis="-//-",
+ length=20,
+ pad="x",
+ elide_pos=0.4,
+ string="The quick brown fox jumped over the lazy dogs.",
+ ) == "The qu-//-lazy dogs."
+
+
+def test_palide_default():
+ assert palide("I am a duck.", 8) == u"I am a \u2026"
+ assert palide("I am a duck.", 14) == "I am a duck. "
+
+
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 3
} | 0.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup==1.2.2
future==1.0.0
iniconfig==2.1.0
-e git+https://github.com/alexhsamuel/ntab.git@e28de6ee1a9bf991225bfbeac40f5852dbecab36#egg=ntab
numpy==2.0.2
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
six==1.17.0
tomli==2.2.1
| name: ntab
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- future==1.0.0
- iniconfig==2.1.0
- numpy==2.0.2
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- six==1.17.0
- tomli==2.2.1
prefix: /opt/conda/envs/ntab
| [
"ntab/lib/test/test_text.py::test_pad_length",
"ntab/lib/test/test_text.py::test_pad_pad",
"ntab/lib/test/test_text.py::test_pad_left",
"ntab/lib/test/test_text.py::test_elide_default",
"ntab/lib/test/test_text.py::test_elide_length",
"ntab/lib/test/test_text.py::test_elide_ellipsis",
"ntab/lib/test/test_text.py::test_palide_length",
"ntab/lib/test/test_text.py::test_palide_ellipsis",
"ntab/lib/test/test_text.py::test_palide_pad",
"ntab/lib/test/test_text.py::test_palide_default"
]
| []
| [
"ntab/lib/test/test_text.py::test_elide_position",
"ntab/lib/test/test_text.py::test_palide_position",
"ntab/lib/test/test_text.py::test_palide_args"
]
| []
| MIT License | 1,392 | [
"ntab/html.py",
"ntab/lib/text.py",
"ntab/fmt.py",
"ntab/tab.py"
]
| [
"ntab/html.py",
"ntab/lib/text.py",
"ntab/fmt.py",
"ntab/tab.py"
]
|
|
alexhsamuel__ntab-14 | 0cec2c6fdb3e841d13e1e5bc8246bba083f216e0 | 2017-06-22 16:34:53 | 0cec2c6fdb3e841d13e1e5bc8246bba083f216e0 | diff --git a/ntab/tab.py b/ntab/tab.py
index 3d90a41..d259d13 100644
--- a/ntab/tab.py
+++ b/ntab/tab.py
@@ -9,13 +9,48 @@ from __future__ import absolute_import, division, print_function, unicode_lite
from builtins import *
import collections
from collections import OrderedDict as odict
+import itertools
import numpy as np
+from past.builtins import basestring
import six
import sys
from . import fmt, nplib
from .lib import *
+#-------------------------------------------------------------------------------
+
+def _ensure_array(obj, length):
+ """
+ Ensures `obj` is an ndarray of shape `(length, )`, converting if necessary.
+ """
+ arr = None
+
+ if isinstance(obj, np.ndarray):
+ arr = obj
+
+ if arr is None and not isinstance(obj, basestring):
+ # Convert sequences to arrays.
+ try:
+ len(obj)
+ except:
+ pass
+ else:
+ arr = np.array(obj)
+
+ # Convert scalars to full arrays.
+ if arr is None:
+ # FIXME: Newer numpy doesn't require explicit dtype
+ dtype = np.array(obj).dtype
+ arr = np.full(length, obj, dtype)
+
+ if len(arr.shape) != 1:
+ raise ValueError("not one-dimensional")
+ if length is not None and arr.shape != (length, ):
+ raise ValueError("wrong length")
+ return arr
+
+
#-------------------------------------------------------------------------------
class ArraysObjectProxy(object):
@@ -269,9 +304,9 @@ class Table(object):
(n, a[sel]) for n, a in self.__arrs.items() )
- def __construct(self, arrs):
+ def __construct(self, length, arrs):
+ self.__length = length
self.__arrs = arrs
- self.__length = None if len(arrs) == 0 else len(a_value(arrs))
# Proxies.
# FIXME: Create lazily?
self.a = ArraysObjectProxy(self)
@@ -308,10 +343,26 @@ class Table(object):
be one-dimensional and the same length.
"""
arrs = odict(*args, **kw_args)
+
+ # Get the length.
+ length = None
+ for arr in six.itervalues(arrs):
+ try:
+ length = len(arr)
+ except TypeError:
+ pass
+ else:
+ break
+ if length is None and len(arrs) > 0:
+ raise ValueError("no arrs have length")
+
# Make sure the arrays are all arrays.
- arrs = odict( (str(n), np.array(a)) for n, a in six.iteritems(arrs) )
+ arrs = odict(
+ (str(n), _ensure_array(a, length))
+ for n, a in six.iteritems(arrs)
+ )
- self.__construct(arrs)
+ self.__construct(length, arrs)
self.__check(self.__arrs)
@@ -332,6 +383,7 @@ class Table(object):
# Construct an instance without calling __init__().
self = object.__new__(class_)
+ length = None if len(arrs) == 0 else len(a_value(arrs))
self.__construct(arrs)
if check:
self.__check(self.__arrs)
@@ -377,11 +429,16 @@ class Table(object):
#---------------------------------------------------------------------------
# Mutators
- # FIXME: Make immutable?
def add(self, *args, **kw_args):
+ """
+ Adds or replaces a column.
+ """
arrs = odict(*args, **kw_args)
- arrs = odict( (str(n), np.array(a)) for n, a in six.iteritems(arrs) )
+ arrs = odict(
+ (str(n), _ensure_array(a, self.__length))
+ for n, a in six.iteritems(arrs)
+ )
if len(arrs) == 0:
# Nothing to do.
| assign constant to array
As a convenience, allow,
```py
tbl.a.foo = 42
```
for either an existing or new array.
| alexhsamuel/ntab | diff --git a/ntab/test/test_basic.py b/ntab/test/test_basic.py
index 44e7468..6259508 100644
--- a/ntab/test/test_basic.py
+++ b/ntab/test/test_basic.py
@@ -81,3 +81,18 @@ def test_empty_arrs():
assert tab.num_rows == 0
+def test_tab_create_scalar():
+ tab = Table(i=2, x=[3, 4, 5], l="foo")
+ assert tab.num_rows == 3
+ assert list(tab.a.i) == [2, 2, 2]
+ assert list(tab.a.l) == ["foo", "foo", "foo"]
+
+
+def test_tab_add_col_scalar():
+ tab = Table(x=[3, 4, 5])
+ tab.a.i = 2
+ tab.a.l = "foo"
+ assert list(tab.a.i) == [2, 2, 2]
+ assert list(tab.a.l) == ["foo", "foo", "foo"]
+
+
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 1
},
"num_modified_files": 1
} | 0.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup==1.2.2
future==1.0.0
iniconfig==2.1.0
-e git+https://github.com/alexhsamuel/ntab.git@0cec2c6fdb3e841d13e1e5bc8246bba083f216e0#egg=ntab
numpy==2.0.2
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
six==1.17.0
tomli==2.2.1
| name: ntab
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- future==1.0.0
- iniconfig==2.1.0
- numpy==2.0.2
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- six==1.17.0
- tomli==2.2.1
prefix: /opt/conda/envs/ntab
| [
"ntab/test/test_basic.py::test_tab_create_scalar",
"ntab/test/test_basic.py::test_tab_add_col_scalar"
]
| []
| [
"ntab/test/test_basic.py::test_init_dict",
"ntab/test/test_basic.py::test_init_empty_dict",
"ntab/test/test_basic.py::test_init_odict",
"ntab/test/test_basic.py::test_init_odict_empty",
"ntab/test/test_basic.py::test_init_kw_args",
"ntab/test/test_basic.py::test_init_items",
"ntab/test/test_basic.py::test_init_bad_length",
"ntab/test/test_basic.py::test_empty_arrs"
]
| []
| MIT License | 1,393 | [
"ntab/tab.py"
]
| [
"ntab/tab.py"
]
|
|
EdinburghGenomics__pyclarity-lims-11 | 14dae08ac127b86df96b326ce466d4c4854dff2c | 2017-06-22 16:57:01 | 14dae08ac127b86df96b326ce466d4c4854dff2c | diff --git a/pyclarity_lims/lims.py b/pyclarity_lims/lims.py
index 4f64117..67f26ad 100644
--- a/pyclarity_lims/lims.py
+++ b/pyclarity_lims/lims.py
@@ -89,18 +89,21 @@ class Lims(object):
else:
return self.parse_response(r)
- def get_file_contents(self, id=None, uri=None):
+ def get_file_contents(self, id=None, uri=None, encoding=None, crlf=False):
"""Returns the contents of the file of <ID> or <uri>"""
if id:
- segments = ['api', self.VERSION, 'files', id, 'download']
+ url = self.get_uri('files', id, 'download')
elif uri:
- segments = [uri, 'download']
+ url = uri.rstrip('/') + '/download'
else:
- raise ValueError("id or uri required")
- url = urljoin(self.baseuri, '/'.join(segments))
+ raise ValueError('id or uri required')
+
r = self.request_session.get(url, auth=(self.username, self.password), timeout=TIMEOUT)
self.validate_response(r)
- return r.text
+ if encoding:
+ r.encoding = encoding
+
+ return r.text.replace('\r\n', '\n') if crlf else r.text
def upload_new_file(self, entity, file_to_upload):
"""Upload a file and attach it to the provided entity."""
| Support for different file encodings
We should be able to read `utf-16`-encoded files, e.g. from the Spectramax. `requests` can return text with any encoding:
```python
>>> request.encoding
'utf-8'
>>> request.text
'some_glitchy_looking_binary_data'
>>> request.encoding = 'utf-16'
>>> request.text
'some_well_formed_data'
```
We should add this to `Lims.get_file_contents`. We should also be able to replace CRLFs with `\n`.
| EdinburghGenomics/pyclarity-lims | diff --git a/tests/test_lims.py b/tests/test_lims.py
index ffa503b..bcf82db 100644
--- a/tests/test_lims.py
+++ b/tests/test_lims.py
@@ -1,4 +1,3 @@
-import xml
from unittest import TestCase
from requests.exceptions import HTTPError
@@ -19,6 +18,7 @@ else:
from unittest.mock import patch, Mock
import builtins
+
class TestLims(TestCase):
url = 'http://testgenologics.com:4040'
username = 'test'
@@ -36,12 +36,10 @@ class TestLims(TestCase):
<exc:exception xmlns:exc="http://pyclarity_lims.com/ri/exception">
</exc:exception>"""
-
def test_get_uri(self):
lims = Lims(self.url, username=self.username, password=self.password)
assert lims.get_uri('artifacts',sample_name='test_sample') == '{url}/api/v2/artifacts?sample_name=test_sample'.format(url=self.url)
-
def test_parse_response(self):
lims = Lims(self.url, username=self.username, password=self.password)
r = Mock(content = self.sample_xml, status_code=200)
@@ -56,7 +54,6 @@ class TestLims(TestCase):
r = Mock(content = self.error_no_msg_xml, status_code=400)
self.assertRaises(HTTPError, lims.parse_response, r)
-
@patch('requests.Session.get',return_value=Mock(content = sample_xml, status_code=200))
def test_get(self, mocked_instance):
lims = Lims(self.url, username=self.username, password=self.password)
@@ -88,7 +85,6 @@ class TestLims(TestCase):
self.assertRaises(HTTPError, lims.post, uri=uri, data=self.sample_xml)
assert mocked_put.call_count == 1
-
@patch('os.path.isfile', return_value=True)
@patch.object(builtins, 'open')
def test_upload_new_file(self, mocked_open, mocked_isfile):
@@ -124,8 +120,6 @@ class TestLims(TestCase):
lims.route_artifacts(artifact_list=[artifact], workflow_uri=self.url+'/api/v2/configuration/workflows/1')
assert mocked_post.call_count == 1
-
-
def test_tostring(self):
lims = Lims(self.url, username=self.username, password=self.password)
from xml.etree import ElementTree as ET
@@ -139,5 +133,16 @@ class TestLims(TestCase):
string = lims.tostring(etree)
assert string == expected_string
+ def test_get_file_contents(self):
+ lims = Lims(self.url, username=self.username, password=self.password)
+ lims.validate_response = Mock()
+ lims.request_session = Mock(get=Mock(return_value=Mock(encoding=None, text='some data\r\n')))
+ exp_url = self.url + '/api/v2/files/an_id/download'
+ assert lims.get_file_contents(uri=self.url + '/api/v2/files/an_id') == 'some data\r\n'
+ assert lims.request_session.get.return_value.encoding is None
+ lims.request_session.get.assert_called_with(exp_url, auth=(self.username, self.password), timeout=16)
+ assert lims.get_file_contents(id='an_id', encoding='utf-16', crlf=True) == 'some data\n'
+ assert lims.request_session.get.return_value.encoding == 'utf-16'
+ lims.request_session.get.assert_called_with(exp_url, auth=(self.username, self.password), timeout=16)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
charset-normalizer==2.0.12
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
packaging==21.3
pluggy==1.0.0
py==1.11.0
-e git+https://github.com/EdinburghGenomics/pyclarity-lims.git@14dae08ac127b86df96b326ce466d4c4854dff2c#egg=pyclarity_lims
pyparsing==3.1.4
pytest==7.0.1
requests==2.27.1
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
zipp==3.6.0
| name: pyclarity-lims
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- charset-normalizer==2.0.12
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- requests==2.27.1
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- zipp==3.6.0
prefix: /opt/conda/envs/pyclarity-lims
| [
"tests/test_lims.py::TestLims::test_get_file_contents"
]
| []
| [
"tests/test_lims.py::TestLims::test_get",
"tests/test_lims.py::TestLims::test_get_uri",
"tests/test_lims.py::TestLims::test_parse_response",
"tests/test_lims.py::TestLims::test_post",
"tests/test_lims.py::TestLims::test_put",
"tests/test_lims.py::TestLims::test_route_artifact",
"tests/test_lims.py::TestLims::test_tostring",
"tests/test_lims.py::TestLims::test_upload_new_file"
]
| []
| MIT License | 1,394 | [
"pyclarity_lims/lims.py"
]
| [
"pyclarity_lims/lims.py"
]
|
|
hylang__hy-1314 | 2e465dbf70624d2ab0923b878b68ed5a177da874 | 2017-06-22 23:42:05 | 5c720c0110908e3f47dba2e4cc1c820d16f359a1 | gilch: We've got conflicting files now.
Auto-promotion sure seems like it would make some things easier.
How would this affect #919? Tag macros are still at compile time, but I think Clojure's tagged literals are at read time, like Common Lisp's `#.`. I'd also like easier access to Python's `eval` in Hy (we already have `exec`). Currently you have to import it from the `builtins` module. We could either rename Hy's eval (to `hy-eval`) or rename Python's eval (to `py-eval`). I'm imagining in-line Python--something like
```
=> (#py"lambda x: x + 1" 41)
42
```
--would be possible with no performance impact. Currently, even with a macro, the (py)eval has to happen at runtime.
gilch: If we're generally autoboxing unquotes, shouldn't this work?
```Python
=> (disassemble `(+ ~(+ 1 1) 40) True)
from hy.core.language import disassemble
from hy import HyExpression, HyInteger, HySymbol
disassemble(HyExpression(((([] + [HySymbol('+')]) + [(1 + 1)]) + [HyInteger(40)])), True)
Traceback (most recent call last):
File "c:\users\me\documents\github\autobox-hy\hy\importer.py", line 184, in hy_eval
return eval(ast_compile(expr, "<eval>", "eval"), namespace)
File "<eval>", line 1, in <module>
File "C:\Users\ME\Documents\GitHub\autobox-hy/hy/core/language.hy", line 78, in disassemble
(fake-source-positions tree)
File "C:\Users\ME\Documents\GitHub\autobox-hy/hy/core/language.hy", line 182, in fake_source_positions
(fake-source-positions subtree)))
File "C:\Users\ME\Documents\GitHub\autobox-hy/hy/core/language.hy", line 185, in fake_source_positions
(setattr tree attr 1))))
AttributeError: 'int' object has no attribute 'start_line'
```
It works if I box it manually though.
```Python
=> (disassemble `(+ ~(HyInteger (+ 1 1)) 40) True)
from hy.core.language import disassemble
from hy import HyExpression, HyInteger, HySymbol
disassemble(HyExpression(((([] + [HySymbol('+')]) + [HyInteger((1 + 1))]) + [HyInteger(40)])), True)
'(2 + 40)'
```
Is the compiler really the right place to put this?
Kodiologist: I think #919 and inline Python (which I, too, would like to see) are orthogonal to this PR. And yeah, I think it would make sense to have `builtins.eval` exported from `hy.core.language` as `py-eval`, although that too would probably be better in a separate PR.
I'll look into the merge conflicts and `disassemble`. The latter is presumably running into problems because it does its own faking of source positions, which isn't playing nicely with how I do that in the compiler.
> Is the compiler really the right place to put this?
I'm pretty sure that the answer is yes. It's the last chance to do it, so doing it then should mean that we only have to implement it in one place in Hy. | diff --git a/NEWS b/NEWS
index b99e2285..a21c5142 100644
--- a/NEWS
+++ b/NEWS
@@ -4,6 +4,10 @@ Changes from 0.13.0
* Single-character "sharp macros" changed to "tag macros", which can have
longer names
* Periods are no longer allowed in keywords
+ * `eval` is now a function instead of a special form
+ * The compiler now automatically promotes values to Hy model objects
+ as necessary, so you can write ``(eval `(+ 1 ~n))`` instead of
+ ``(eval `(+ 1 ~(HyInteger n)))``
[ Bug Fixes ]
* Numeric literals are no longer parsed as symbols when followed by a dot
diff --git a/docs/language/api.rst b/docs/language/api.rst
index 0058c7dc..5ed646d0 100644
--- a/docs/language/api.rst
+++ b/docs/language/api.rst
@@ -863,27 +863,6 @@ doto
=> collection
[2 1]
-eval
-----
-
-``eval`` evaluates a quoted expression and returns the value. The optional
-second and third arguments specify the dictionary of globals to use and the
-module name. The globals dictionary defaults to ``(local)`` and the module name
-defaults to the name of the current module.
-
-.. code-block:: clj
-
- => (eval '(print "Hello World"))
- "Hello World"
-
-If you want to evaluate a string, use ``read-str`` to convert it to a
-form first:
-
-.. code-block:: clj
-
- => (eval (read-str "(+ 1 1)"))
- 2
-
eval-and-compile
----------------
diff --git a/docs/language/core.rst b/docs/language/core.rst
index b52cef4d..21c111b4 100644
--- a/docs/language/core.rst
+++ b/docs/language/core.rst
@@ -230,6 +230,30 @@ Returns ``True`` if *coll* is empty. Equivalent to ``(= 0 (len coll))``.
False
+.. _eval-fn:
+
+eval
+----
+
+``eval`` evaluates a quoted expression and returns the value. The optional
+second and third arguments specify the dictionary of globals to use and the
+module name. The globals dictionary defaults to ``(local)`` and the module name
+defaults to the name of the current module.
+
+.. code-block:: clj
+
+ => (eval '(print "Hello World"))
+ "Hello World"
+
+If you want to evaluate a string, use ``read-str`` to convert it to a
+form first:
+
+.. code-block:: clj
+
+ => (eval (read-str "(+ 1 1)"))
+ 2
+
+
.. _every?-fn:
every?
diff --git a/hy/cmdline.py b/hy/cmdline.py
index 71e4b2c3..a9d966c0 100644
--- a/hy/cmdline.py
+++ b/hy/cmdline.py
@@ -15,7 +15,7 @@ import astor.codegen
import hy
-from hy.lex import LexException, PrematureEndOfInput, tokenize
+from hy.lex import LexException, PrematureEndOfInput
from hy.lex.parser import hy_symbol_mangle
from hy.compiler import HyTypeError
from hy.importer import (hy_eval, import_buffer_to_module,
@@ -77,12 +77,9 @@ class HyREPL(code.InteractiveConsole):
global SIMPLE_TRACEBACKS
try:
try:
- tokens = tokenize(source)
+ do = import_buffer_to_hst(source)
except PrematureEndOfInput:
return True
- do = HyExpression([HySymbol('do')] + tokens)
- do.start_line = do.end_line = do.start_column = do.end_column = 1
- do.replace(do)
except LexException as e:
if e.source is None:
e.source = source
diff --git a/hy/compiler.py b/hy/compiler.py
index 437495d8..adc26472 100755
--- a/hy/compiler.py
+++ b/hy/compiler.py
@@ -5,14 +5,15 @@
from hy.models import (HyObject, HyExpression, HyKeyword, HyInteger, HyComplex,
HyString, HyBytes, HySymbol, HyFloat, HyList, HySet,
- HyDict, HyCons)
+ HyDict, HyCons, wrap_value)
from hy.errors import HyCompileError, HyTypeError
from hy.lex.parser import hy_symbol_mangle
import hy.macros
from hy._compat import (
- str_type, bytes_type, long_type, PY3, PY34, PY35, raise_empty)
+ str_type, string_types, bytes_type, long_type, PY3, PY34, PY35,
+ raise_empty)
from hy.macros import require, macroexpand, tag_macroexpand
import hy.importer
@@ -110,6 +111,19 @@ def builds_if(_type, condition):
return lambda fn: fn
+def spoof_positions(obj):
+ if not isinstance(obj, HyObject) or isinstance(obj, HyCons):
+ return
+ if not hasattr(obj, "start_column"):
+ obj.start_column = 0
+ if not hasattr(obj, "start_line"):
+ obj.start_line = 0
+ if (hasattr(obj, "__iter__") and
+ not isinstance(obj, (string_types, bytes_type))):
+ for x in obj:
+ spoof_positions(x)
+
+
class Result(object):
"""
Smart representation of the result of a hy->AST compilation
@@ -378,23 +392,23 @@ class HyASTCompiler(object):
ret = Result()
for module, names in self.imports.items():
if None in names:
- ret += self.compile([
- HyExpression([
+ e = HyExpression([
HySymbol("import"),
HySymbol(module),
]).replace(expr)
- ])
+ spoof_positions(e)
+ ret += self.compile(e)
names = sorted(name for name in names if name)
if names:
- ret += self.compile([
- HyExpression([
+ e = HyExpression([
HySymbol("import"),
HyList([
HySymbol(module),
HyList([HySymbol(name) for name in names])
])
]).replace(expr)
- ])
+ spoof_positions(e)
+ ret += self.compile(e)
self.imports = defaultdict(set)
return ret.stmts
@@ -404,6 +418,11 @@ class HyASTCompiler(object):
if not isinstance(ret, Result):
ret = Result() + ret
return ret
+ if not isinstance(atom, HyObject):
+ atom = wrap_value(atom)
+ if isinstance(atom, HyObject):
+ spoof_positions(atom)
+ return self.compile_atom(type(atom), atom)
def compile(self, tree):
try:
@@ -602,12 +621,6 @@ class HyASTCompiler(object):
ast.copy_location(new_name, name)
return new_name
- @builds(list)
- def compile_raw_list(self, entries):
- ret = self._compile_branch(entries)
- ret += ret.expr_as_stmt()
- return ret
-
def _render_quoted_form(self, form, level):
"""
Render a quoted form as a new HyExpression.
@@ -706,31 +719,6 @@ class HyASTCompiler(object):
raise HyTypeError(expr,
"`%s' can't be used at the top-level" % expr[0])
- @builds("eval")
- @checkargs(min=1, max=3)
- def compile_eval(self, expr):
- expr.pop(0)
-
- if not isinstance(expr[0], (HyExpression, HySymbol)):
- raise HyTypeError(expr, "expression expected as first argument")
-
- elist = [HySymbol("hy_eval")] + [expr[0]]
- if len(expr) >= 2:
- elist.append(expr[1])
- else:
- elist.append(HyExpression([HySymbol("locals")]))
-
- if len(expr) == 3:
- elist.append(expr[2])
- else:
- elist.append(HyString(self.module_name))
-
- ret = self.compile(HyExpression(elist).replace(expr))
-
- ret.add_imports("hy.importer", ["hy_eval"])
-
- return ret
-
@builds("do")
def compile_do(self, expression):
expression.pop(0)
@@ -766,6 +754,7 @@ class HyASTCompiler(object):
return ret
@builds("try")
+ @checkargs(min=2)
def compile_try_expression(self, expr):
expr.pop(0) # try
@@ -1858,6 +1847,7 @@ class HyASTCompiler(object):
op = ops[expression.pop(0)]
right_associative = op == ast.Pow
+ lineno, col_offset = expression.start_line, expression.start_column
if right_associative:
expression = expression[::-1]
ret = self.compile(expression.pop(0))
@@ -1870,8 +1860,8 @@ class HyASTCompiler(object):
ret += ast.BinOp(left=left_expr,
op=op(),
right=right_expr,
- lineno=child.start_line,
- col_offset=child.start_column)
+ lineno=lineno,
+ col_offset=col_offset)
return ret
@builds("**")
@@ -2619,21 +2609,21 @@ def hy_compile(tree, module_name, root=ast.Module, get_expr=False):
body = []
expr = None
- if not (isinstance(tree, HyObject) or type(tree) is list):
- raise HyCompileError("tree must be a HyObject or a list")
+ if not isinstance(tree, HyObject):
+ tree = wrap_value(tree)
+ if not isinstance(tree, HyObject):
+ raise HyCompileError("`tree` must be a HyObject or capable of "
+ "being promoted to one")
+ spoof_positions(tree)
- if isinstance(tree, HyObject) or tree:
- compiler = HyASTCompiler(module_name)
- result = compiler.compile(tree)
- expr = result.force_expr
+ compiler = HyASTCompiler(module_name)
+ result = compiler.compile(tree)
+ expr = result.force_expr
- if not get_expr:
- result += result.expr_as_stmt()
+ if not get_expr:
+ result += result.expr_as_stmt()
- # We need to test that the type is *exactly* `list` because we don't
- # want to do `tree[0]` on HyList or such.
- spoof_tree = tree[0] if type(tree) is list else tree
- body = compiler.imports_as_stmts(spoof_tree) + result.stmts
+ body = compiler.imports_as_stmts(tree) + result.stmts
ret = root(body=body)
diff --git a/hy/core/language.hy b/hy/core/language.hy
index d498c454..6bcdb793 100644
--- a/hy/core/language.hy
+++ b/hy/core/language.hy
@@ -18,7 +18,8 @@
(import [hy._compat [long-type]]) ; long for python2, int for python3
(import [hy.models [HyCons HySymbol HyKeyword]])
(import [hy.lex [LexException PrematureEndOfInput tokenize]])
-(import [hy.compiler [HyASTCompiler]])
+(import [hy.compiler [HyASTCompiler spoof-positions]])
+(import [hy.importer [hy-eval :as eval]])
(defn butlast [coll]
"Returns coll except of last element."
@@ -74,8 +75,8 @@
(import astor)
(import hy.compiler)
- (fake-source-positions tree)
- (setv compiled (hy.compiler.hy_compile tree (calling-module-name)))
+ (spoof-positions tree)
+ (setv compiled (hy.compiler.hy-compile tree (calling-module-name)))
((if codegen
astor.codegen.to_source
astor.dump)
@@ -174,15 +175,6 @@
"Return true if (pred x) is logical true for every x in coll, else false"
(all (map pred coll)))
-(defn fake-source-positions [tree]
- "Fake the source positions for a given tree"
- (if (coll? tree)
- (for* [subtree tree]
- (fake-source-positions subtree)))
- (for* [attr '[start-line end-line start-column end-column]]
- (if (not (hasattr tree attr))
- (setattr tree attr 1))))
-
(defn flatten [coll]
"Return a single flat list expanding all members of coll"
(if (coll? coll)
@@ -469,7 +461,7 @@
(def *exports*
'[*map accumulate butlast calling-module-name chain coll? combinations
comp complement compress cons cons? constantly count cycle dec distinct
- disassemble drop drop-last drop-while empty? even? every? first filter
+ disassemble drop drop-last drop-while empty? eval even? every? first filter
flatten float? fraction gensym group-by identity inc input instance?
integer integer? integer-char? interleave interpose islice iterable?
iterate iterator? juxt keyword keyword? last list* macroexpand
diff --git a/hy/importer.py b/hy/importer.py
index ec3b606e..63b87d4a 100644
--- a/hy/importer.py
+++ b/hy/importer.py
@@ -3,7 +3,7 @@
# license. See the LICENSE.
from hy.compiler import hy_compile, HyTypeError
-from hy.models import HyObject, replace_hy_obj
+from hy.models import HyObject, HyExpression, HySymbol, replace_hy_obj
from hy.lex import tokenize, LexException
from hy.errors import HyIOError
@@ -14,6 +14,7 @@ import struct
import imp
import sys
import ast
+import inspect
import os
import __future__
@@ -31,7 +32,7 @@ def ast_compile(ast, filename, mode):
def import_buffer_to_hst(buf):
"""Import content from buf and return a Hy AST."""
- return tokenize(buf + "\n")
+ return HyExpression([HySymbol("do")] + tokenize(buf + "\n"))
def import_file_to_hst(fpath):
@@ -142,7 +143,27 @@ def import_buffer_to_module(module_name, buf):
return mod
-def hy_eval(hytree, namespace, module_name, ast_callback=None):
+def hy_eval(hytree, namespace=None, module_name=None, ast_callback=None):
+ """``eval`` evaluates a quoted expression and returns the value. The optional
+ second and third arguments specify the dictionary of globals to use and the
+ module name. The globals dictionary defaults to ``(local)`` and the module
+ name defaults to the name of the current module.
+
+ => (eval '(print "Hello World"))
+ "Hello World"
+
+ If you want to evaluate a string, use ``read-str`` to convert it to a
+ form first:
+
+ => (eval (read-str "(+ 1 1)"))
+ 2"""
+ if namespace is None:
+ frame = inspect.stack()[1][0]
+ namespace = inspect.getargvalues(frame).locals
+ if module_name is None:
+ m = inspect.getmodule(inspect.stack()[1][0])
+ module_name = '__eval__' if m is None else m.__name__
+
foo = HyObject()
foo.start_line = 0
foo.end_line = 0
diff --git a/hy/macros.py b/hy/macros.py
index b24735c2..874c63af 100644
--- a/hy/macros.py
+++ b/hy/macros.py
@@ -3,7 +3,7 @@
# license. See the LICENSE.
from inspect import getargspec, formatargspec
-from hy.models import replace_hy_obj, wrap_value, HyExpression, HyString
+from hy.models import replace_hy_obj, HyExpression, HyString
from hy.errors import HyTypeError, HyMacroExpansionError
@@ -196,7 +196,7 @@ def macroexpand_1(tree, compiler):
raise HyMacroExpansionError(tree, msg)
try:
- obj = wrap_value(m(*ntree[1:], **opts))
+ obj = m(*ntree[1:], **opts)
except HyTypeError as e:
if e.expression is None:
e.expression = tree
@@ -225,4 +225,4 @@ def tag_macroexpand(tag, tree, compiler):
)
expr = tag_macro(tree)
- return replace_hy_obj(wrap_value(expr), tree)
+ return replace_hy_obj(expr, tree)
diff --git a/hy/models.py b/hy/models.py
index 412f5d1f..93cce18a 100644
--- a/hy/models.py
+++ b/hy/models.py
@@ -4,6 +4,7 @@
from __future__ import unicode_literals
from hy._compat import PY3, str_type, bytes_type, long_type, string_types
+from fractions import Fraction
class HyObject(object):
@@ -229,6 +230,8 @@ class HyExpression(HyList):
return "(%s)" % (" ".join([repr(x) for x in self]))
_wrappers[HyExpression] = lambda e: HyExpression(wrap_value(x) for x in e)
+_wrappers[Fraction] = lambda e: HyExpression(
+ [HySymbol("fraction"), wrap_value(e.numerator), wrap_value(e.denominator)])
class HySet(HyList):
| eval doesn't use hy.models._wrappers
I came across this issue trying to narrow down a compiler error I was getting from a macro.
Why should the following piece of code throw an error?
```
=> (import numpy)
=> (= "." (get (name numpy) 0)))
False
=> (eval '(= "." (get (name numpy) 0)))
False
=> (eval `(= "." ~(get (name numpy) 0)))
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "/Users/dg/anaconda3/lib/python3.5/site-packages/hy/importer.py", line 110, in hy_eval
hytree.replace(foo)
File "/Users/dg/anaconda3/lib/python3.5/site-packages/hy/models/list.py", line 31, in replace
x.replace(other)
TypeError: replace() takes at least 2 arguments (1 given)
```
| hylang/hy | diff --git a/tests/compilers/test_ast.py b/tests/compilers/test_ast.py
index 2e880465..1bacdf08 100644
--- a/tests/compilers/test_ast.py
+++ b/tests/compilers/test_ast.py
@@ -7,9 +7,9 @@ from __future__ import unicode_literals
from hy import HyString
from hy.models import HyObject
from hy.compiler import hy_compile
+from hy.importer import import_buffer_to_hst
from hy.errors import HyCompileError, HyTypeError
from hy.lex.exceptions import LexException
-from hy.lex import tokenize
from hy._compat import PY3
import ast
@@ -25,12 +25,12 @@ def _ast_spotcheck(arg, root, secondary):
def can_compile(expr):
- return hy_compile(tokenize(expr), "__main__")
+ return hy_compile(import_buffer_to_hst(expr), "__main__")
def cant_compile(expr):
try:
- hy_compile(tokenize(expr), "__main__")
+ hy_compile(import_buffer_to_hst(expr), "__main__")
assert False
except HyTypeError as e:
# Anything that can't be compiled should raise a user friendly
@@ -48,8 +48,10 @@ def cant_compile(expr):
def test_ast_bad_type():
"Make sure AST breakage can happen"
+ class C:
+ pass
try:
- hy_compile("foo", "__main__")
+ hy_compile(C(), "__main__")
assert True is False
except HyCompileError:
pass
@@ -252,7 +254,7 @@ def test_ast_require():
def test_ast_no_pointless_imports():
def contains_import_from(code):
return any([isinstance(node, ast.ImportFrom)
- for node in hy_compile(tokenize(code), "__main__").body])
+ for node in can_compile(code).body])
# `reduce` is a builtin in Python 2, but not Python 3.
# The version of `map` that returns an iterator is a builtin in
# Python 3, but not Python 2.
@@ -460,7 +462,7 @@ def test_ast_unicode_strings():
hy_s.start_line = hy_s.end_line = 0
hy_s.start_column = hy_s.end_column = 0
- code = hy_compile([hy_s], "__main__")
+ code = hy_compile(hy_s, "__main__")
# code == ast.Module(body=[ast.Expr(value=ast.Str(s=xxx))])
return code.body[0].value.s
@@ -471,7 +473,7 @@ def test_ast_unicode_strings():
def test_ast_unicode_vs_bytes():
- def f(x): return hy_compile(tokenize(x), "__main__").body[0].value.s
+ def f(x): return can_compile(x).body[0].value.s
assert f('"hello"') == u"hello"
assert type(f('"hello"')) is (str if PY3 else unicode) # noqa
assert f('b"hello"') == (eval('b"hello"') if PY3 else "hello")
diff --git a/tests/native_tests/language.hy b/tests/native_tests/language.hy
index 5b2421f5..b4f222d5 100644
--- a/tests/native_tests/language.hy
+++ b/tests/native_tests/language.hy
@@ -5,6 +5,7 @@
(import [tests.resources [kwtest function-with-a-dash]]
[os.path [exists isdir isfile]]
[sys :as systest]
+ re
[operator [or_]]
[hy.errors [HyTypeError]]
pytest)
@@ -1087,10 +1088,42 @@
(defn test-eval-failure []
"NATIVE: test eval failure modes"
; yo dawg
- (try (eval '(eval)) (except [e HyTypeError]) (else (assert False)))
- (try (eval '(eval "snafu")) (except [e HyTypeError]) (else (assert False)))
+ (try (eval '(eval)) (except [e TypeError]) (else (assert False)))
+ (defclass C)
+ (try (eval (C)) (except [e TypeError]) (else (assert False)))
(try (eval 'False []) (except [e HyTypeError]) (else (assert False)))
- (try (eval 'False {} 1) (except [e HyTypeError]) (else (assert False))))
+ (try (eval 'False {} 1) (except [e TypeError]) (else (assert False))))
+
+
+(defn test-eval-quasiquote []
+ ; https://github.com/hylang/hy/issues/1174
+
+ (for [x [
+ None False True
+ 5 5.1
+ 1/2
+ 5j 5.1j 2+1j 1.2+3.4j
+ "" b""
+ "apple bloom" b"apple bloom" "⚘" b"\x00"
+ :mykeyword
+ [] #{} {}
+ [1 2 3] #{1 2 3} {"a" 1 "b" 2}]]
+ (assert (= (eval `(identity ~x)) x))
+ (assert (= (eval x) x)))
+
+ ; Tuples wrap to HyLists, not HyExpressions.
+ (assert (= (eval (,)) []))
+ (assert (= (eval (, 1 2 3)) [1 2 3]))
+
+ (assert (= (eval `(+ "a" ~(+ "b" "c"))) "abc"))
+
+ (setv l ["a" "b"])
+ (setv n 1)
+ (assert (= (eval `(get ~l ~n) "b")))
+
+ (setv d {"a" 1 "b" 2})
+ (setv k "b")
+ (assert (= (eval `(get ~d ~k)) 2)))
(defn test-import-syntax []
@@ -1366,7 +1399,9 @@
(assert (= (disassemble '(do (leaky) (leaky) (macros)))
"Module(\n body=[\n Expr(value=Call(func=Name(id='leaky'), args=[], keywords=[], starargs=None, kwargs=None)),\n Expr(value=Call(func=Name(id='leaky'), args=[], keywords=[], starargs=None, kwargs=None)),\n Expr(value=Call(func=Name(id='macros'), args=[], keywords=[], starargs=None, kwargs=None))])")))
(assert (= (disassemble '(do (leaky) (leaky) (macros)) True)
- "leaky()\nleaky()\nmacros()")))
+ "leaky()\nleaky()\nmacros()"))
+ (assert (= (re.sub r"[L() ]" "" (disassemble `(+ ~(+ 1 1) 40) True))
+ "2+40")))
(defn test-attribute-access []
diff --git a/tests/native_tests/native_macros.hy b/tests/native_tests/native_macros.hy
index 6089f323..6baa7a4f 100644
--- a/tests/native_tests/native_macros.hy
+++ b/tests/native_tests/native_macros.hy
@@ -84,6 +84,14 @@
"NATIVE: test macro calling a plain function"
(assert (= 3 (bar 1 2))))
+(defn test-optional-and-apply-in-macro []
+ ; https://github.com/hylang/hy/issues/1154
+ (defn f [&rest args]
+ (+ "f:" (repr args)))
+ (defmacro mac [&optional x]
+ `(apply f [~x]))
+ (assert (= (mac) "f:(None,)")))
+
(defn test-midtree-yield []
"NATIVE: test yielding with a returnable"
(defn kruft [] (yield) (+ 1 1)))
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 3,
"issue_text_score": 0,
"test_score": 1
},
"num_modified_files": 9
} | 0.13 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"tox",
"Pygments>=1.6",
"Sphinx",
"sphinx_rtd_theme"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
appdirs==1.4.4
args==0.1.0
astor==0.8.1
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
Babel==2.11.0
certifi==2021.5.30
charset-normalizer==2.0.12
clint==0.5.1
distlib==0.3.9
docutils==0.18.1
filelock==3.4.1
-e git+https://github.com/hylang/hy.git@2e465dbf70624d2ab0923b878b68ed5a177da874#egg=hy
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
Jinja2==3.0.3
MarkupSafe==2.0.1
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
platformdirs==2.4.0
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
Pygments==2.14.0
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytz==2025.2
requests==2.27.1
rply==0.7.8
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-rtd-theme==2.0.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tox==3.28.0
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
urllib3==1.26.20
virtualenv==20.17.1
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: hy
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- appdirs==1.4.4
- args==0.1.0
- astor==0.8.1
- babel==2.11.0
- charset-normalizer==2.0.12
- clint==0.5.1
- distlib==0.3.9
- docutils==0.18.1
- filelock==3.4.1
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- jinja2==3.0.3
- markupsafe==2.0.1
- platformdirs==2.4.0
- pygments==2.14.0
- pytz==2025.2
- requests==2.27.1
- rply==0.7.8
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-rtd-theme==2.0.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- tox==3.28.0
- urllib3==1.26.20
- virtualenv==20.17.1
prefix: /opt/conda/envs/hy
| [
"tests/compilers/test_ast.py::test_ast_bad_type",
"tests/compilers/test_ast.py::test_ast_bad_if",
"tests/compilers/test_ast.py::test_ast_valid_if",
"tests/compilers/test_ast.py::test_ast_valid_unary_op",
"tests/compilers/test_ast.py::test_ast_invalid_unary_op",
"tests/compilers/test_ast.py::test_ast_bad_while",
"tests/compilers/test_ast.py::test_ast_good_do",
"tests/compilers/test_ast.py::test_ast_good_raise",
"tests/compilers/test_ast.py::test_ast_raise_from",
"tests/compilers/test_ast.py::test_ast_bad_raise",
"tests/compilers/test_ast.py::test_ast_good_try",
"tests/compilers/test_ast.py::test_ast_bad_try",
"tests/compilers/test_ast.py::test_ast_good_except",
"tests/compilers/test_ast.py::test_ast_bad_except",
"tests/compilers/test_ast.py::test_ast_good_assert",
"tests/compilers/test_ast.py::test_ast_bad_assert",
"tests/compilers/test_ast.py::test_ast_good_global",
"tests/compilers/test_ast.py::test_ast_bad_global",
"tests/compilers/test_ast.py::test_ast_good_nonlocal",
"tests/compilers/test_ast.py::test_ast_bad_nonlocal",
"tests/compilers/test_ast.py::test_ast_good_defclass",
"tests/compilers/test_ast.py::test_ast_bad_defclass",
"tests/compilers/test_ast.py::test_ast_good_lambda",
"tests/compilers/test_ast.py::test_ast_bad_lambda",
"tests/compilers/test_ast.py::test_ast_good_yield",
"tests/compilers/test_ast.py::test_ast_bad_yield",
"tests/compilers/test_ast.py::test_ast_good_import_from",
"tests/compilers/test_ast.py::test_ast_require",
"tests/compilers/test_ast.py::test_ast_no_pointless_imports",
"tests/compilers/test_ast.py::test_ast_good_get",
"tests/compilers/test_ast.py::test_ast_bad_get",
"tests/compilers/test_ast.py::test_ast_good_cut",
"tests/compilers/test_ast.py::test_ast_bad_cut",
"tests/compilers/test_ast.py::test_ast_good_take",
"tests/compilers/test_ast.py::test_ast_good_drop",
"tests/compilers/test_ast.py::test_ast_good_assoc",
"tests/compilers/test_ast.py::test_ast_bad_assoc",
"tests/compilers/test_ast.py::test_ast_bad_with",
"tests/compilers/test_ast.py::test_ast_valid_while",
"tests/compilers/test_ast.py::test_ast_valid_for",
"tests/compilers/test_ast.py::test_ast_invalid_for",
"tests/compilers/test_ast.py::test_ast_expression_basics",
"tests/compilers/test_ast.py::test_ast_anon_fns_basics",
"tests/compilers/test_ast.py::test_ast_non_decoratable",
"tests/compilers/test_ast.py::test_ast_lambda_lists",
"tests/compilers/test_ast.py::test_ast_print",
"tests/compilers/test_ast.py::test_ast_tuple",
"tests/compilers/test_ast.py::test_argument_destructuring",
"tests/compilers/test_ast.py::test_lambda_list_keywords_rest",
"tests/compilers/test_ast.py::test_lambda_list_keywords_key",
"tests/compilers/test_ast.py::test_lambda_list_keywords_kwargs",
"tests/compilers/test_ast.py::test_lambda_list_keywords_kwonly",
"tests/compilers/test_ast.py::test_lambda_list_keywords_mixed",
"tests/compilers/test_ast.py::test_missing_keyword_argument_value",
"tests/compilers/test_ast.py::test_ast_unicode_strings",
"tests/compilers/test_ast.py::test_ast_unicode_vs_bytes",
"tests/compilers/test_ast.py::test_compile_error",
"tests/compilers/test_ast.py::test_for_compile_error",
"tests/compilers/test_ast.py::test_attribute_access",
"tests/compilers/test_ast.py::test_attribute_empty",
"tests/compilers/test_ast.py::test_cons_correct",
"tests/compilers/test_ast.py::test_invalid_list_comprehension",
"tests/compilers/test_ast.py::test_bad_setv",
"tests/compilers/test_ast.py::test_defn",
"tests/compilers/test_ast.py::test_setv_builtins",
"tests/native_tests/language.hy::test_sys_argv",
"tests/native_tests/language.hy::test_hex",
"tests/native_tests/language.hy::test_octal",
"tests/native_tests/language.hy::test_binary",
"tests/native_tests/language.hy::test_fractions",
"tests/native_tests/language.hy::test_lists",
"tests/native_tests/language.hy::test_dicts",
"tests/native_tests/language.hy::test_sets",
"tests/native_tests/language.hy::test_setv_get",
"tests/native_tests/language.hy::test_setv_builtin",
"tests/native_tests/language.hy::test_setv_pairs",
"tests/native_tests/language.hy::test_setv_returns_none",
"tests/native_tests/language.hy::test_store_errors",
"tests/native_tests/language.hy::test_fn_corner_cases",
"tests/native_tests/language.hy::test_alias_names_in_errors",
"tests/native_tests/language.hy::test_for_loop",
"tests/native_tests/language.hy::test_nasty_for_nesting",
"tests/native_tests/language.hy::test_while_loop",
"tests/native_tests/language.hy::test_branching",
"tests/native_tests/language.hy::test_branching_with_do",
"tests/native_tests/language.hy::test_branching_expr_count_with_do",
"tests/native_tests/language.hy::test_cond",
"tests/native_tests/language.hy::test_if",
"tests/native_tests/language.hy::test_index",
"tests/native_tests/language.hy::test_fn",
"tests/native_tests/language.hy::test_imported_bits",
"tests/native_tests/language.hy::test_kwargs",
"tests/native_tests/language.hy::test_apply",
"tests/native_tests/language.hy::test_apply_with_methods",
"tests/native_tests/language.hy::test_dotted",
"tests/native_tests/language.hy::test_do",
"tests/native_tests/language.hy::test_exceptions",
"tests/native_tests/language.hy::test_earmuffs",
"tests/native_tests/language.hy::test_threading",
"tests/native_tests/language.hy::test_tail_threading",
"tests/native_tests/language.hy::test_threading_two",
"tests/native_tests/language.hy::test_as_threading",
"tests/native_tests/language.hy::test_assoc",
"tests/native_tests/language.hy::test_multiassoc",
"tests/native_tests/language.hy::test_pass",
"tests/native_tests/language.hy::test_yield",
"tests/native_tests/language.hy::test_yield_with_return",
"tests/native_tests/language.hy::test_yield_in_try",
"tests/native_tests/language.hy::test_first",
"tests/native_tests/language.hy::test_cut",
"tests/native_tests/language.hy::test_rest",
"tests/native_tests/language.hy::test_importas",
"tests/native_tests/language.hy::test_context",
"tests/native_tests/language.hy::test_context_yield",
"tests/native_tests/language.hy::test_with_return",
"tests/native_tests/language.hy::test_for_doodle",
"tests/native_tests/language.hy::test_for_else",
"tests/native_tests/language.hy::test_list_comprehensions",
"tests/native_tests/language.hy::test_set_comprehensions",
"tests/native_tests/language.hy::test_dict_comprehensions",
"tests/native_tests/language.hy::test_generator_expressions",
"tests/native_tests/language.hy::test_defn_order",
"tests/native_tests/language.hy::test_defn_return",
"tests/native_tests/language.hy::test_defn_lambdakey",
"tests/native_tests/language.hy::test_defn_do",
"tests/native_tests/language.hy::test_defn_do_return",
"tests/native_tests/language.hy::test_defn_dunder_name",
"tests/native_tests/language.hy::test_mangles",
"tests/native_tests/language.hy::test_fn_return",
"tests/native_tests/language.hy::test_if_mangler",
"tests/native_tests/language.hy::test_nested_mangles",
"tests/native_tests/language.hy::test_symbol_utf_8",
"tests/native_tests/language.hy::test_symbol_dash",
"tests/native_tests/language.hy::test_symbol_question_mark",
"tests/native_tests/language.hy::test_and",
"tests/native_tests/language.hy::test_and_#1151_do",
"tests/native_tests/language.hy::test_and_#1151_for",
"tests/native_tests/language.hy::test_and_#1151_del",
"tests/native_tests/language.hy::test_or",
"tests/native_tests/language.hy::test_or_#1151_do",
"tests/native_tests/language.hy::test_or_#1151_for",
"tests/native_tests/language.hy::test_or_#1151_del",
"tests/native_tests/language.hy::test_xor",
"tests/native_tests/language.hy::test_if_return_branching",
"tests/native_tests/language.hy::test_keyword",
"tests/native_tests/language.hy::test_keyword_clash",
"tests/native_tests/language.hy::test_empty_keyword",
"tests/native_tests/language.hy::test_nested_if",
"tests/native_tests/language.hy::test_eval",
"tests/native_tests/language.hy::test_eval_false",
"tests/native_tests/language.hy::test_eval_globals",
"tests/native_tests/language.hy::test_eval_failure",
"tests/native_tests/language.hy::test_eval_quasiquote",
"tests/native_tests/language.hy::test_import_syntax",
"tests/native_tests/language.hy::test_lambda_keyword_lists",
"tests/native_tests/language.hy::test_key_arguments",
"tests/native_tests/language.hy::test_optional_arguments",
"tests/native_tests/language.hy::test_undefined_name",
"tests/native_tests/language.hy::test_if_in_if",
"tests/native_tests/language.hy::test_try_except_return",
"tests/native_tests/language.hy::test_try_else_return",
"tests/native_tests/language.hy::test_require",
"tests/native_tests/language.hy::test_require_native",
"tests/native_tests/language.hy::test_encoding_nightmares",
"tests/native_tests/language.hy::test_keyword_dict_access",
"tests/native_tests/language.hy::test_break_breaking",
"tests/native_tests/language.hy::test_continue_continuation",
"tests/native_tests/language.hy::test_empty_list",
"tests/native_tests/language.hy::test_string",
"tests/native_tests/language.hy::test_del",
"tests/native_tests/language.hy::test_macroexpand",
"tests/native_tests/language.hy::test_macroexpand_1",
"tests/native_tests/language.hy::test_merge_with",
"tests/native_tests/language.hy::test_calling_module_name",
"tests/native_tests/language.hy::test_attribute_access",
"tests/native_tests/language.hy::test_keyword_quoting",
"tests/native_tests/language.hy::test_only_parse_lambda_list_in_defn",
"tests/native_tests/language.hy::test_read",
"tests/native_tests/language.hy::test_read_str",
"tests/native_tests/language.hy::test_keyword_creation",
"tests/native_tests/language.hy::test_name_conversion",
"tests/native_tests/language.hy::test_keywords",
"tests/native_tests/language.hy::test_keywords_and_macros",
"tests/native_tests/language.hy::test_argument_destr",
"tests/native_tests/native_macros.hy::test_rev_macro",
"tests/native_tests/native_macros.hy::test_macro_kw",
"tests/native_tests/native_macros.hy::test_fn_calling_macro",
"tests/native_tests/native_macros.hy::test_optional_and_apply_in_macro",
"tests/native_tests/native_macros.hy::test_midtree_yield",
"tests/native_tests/native_macros.hy::test_midtree_yield_in_for",
"tests/native_tests/native_macros.hy::test_midtree_yield_in_while",
"tests/native_tests/native_macros.hy::test_multi_yield",
"tests/native_tests/native_macros.hy::test_if_python2",
"tests/native_tests/native_macros.hy::test_gensym_in_macros",
"tests/native_tests/native_macros.hy::test_with_gensym",
"tests/native_tests/native_macros.hy::test_defmacro_g_bang",
"tests/native_tests/native_macros.hy::test_defmacro_bang",
"tests/native_tests/native_macros.hy::test_if_not",
"tests/native_tests/native_macros.hy::test_lif",
"tests/native_tests/native_macros.hy::test_lif_not",
"tests/native_tests/native_macros.hy::test_yield_from",
"tests/native_tests/native_macros.hy::test_yield_from_exception_handling",
"tests/native_tests/native_macros.hy::test_defmain"
]
| [
"tests/native_tests/language.hy::test_disassemble"
]
| []
| []
| MIT License | 1,395 | [
"hy/cmdline.py",
"hy/models.py",
"hy/compiler.py",
"NEWS",
"hy/core/language.hy",
"docs/language/core.rst",
"hy/importer.py",
"docs/language/api.rst",
"hy/macros.py"
]
| [
"hy/cmdline.py",
"hy/models.py",
"hy/compiler.py",
"NEWS",
"hy/core/language.hy",
"docs/language/core.rst",
"hy/importer.py",
"docs/language/api.rst",
"hy/macros.py"
]
|
nipy__nipype-2093 | 4b587d6bc297d202148fa2a7f9ab4de50ff49bb7 | 2017-06-23 21:43:41 | 14161a590a3166b5a9c0f4afd42ff1acf843a960 | diff --git a/doc/users/saving_workflows.rst b/doc/users/saving_workflows.rst
index c97751eea..33d1e8a11 100644
--- a/doc/users/saving_workflows.rst
+++ b/doc/users/saving_workflows.rst
@@ -55,7 +55,7 @@ This will create a file "outputtestsave.py" with the following content:
from nipype.pipeline.engine import Workflow, Node, MapNode
from nipype.interfaces.utility import IdentityInterface
from nipype.interfaces.utility import Function
- from nipype.utils.misc import getsource
+ from nipype.utils.functions import getsource
from nipype.interfaces.fsl.preprocess import BET
from nipype.interfaces.fsl.utils import ImageMaths
# Functions
diff --git a/nipype/interfaces/utility/wrappers.py b/nipype/interfaces/utility/wrappers.py
index 4de11d7ea..6885d7218 100644
--- a/nipype/interfaces/utility/wrappers.py
+++ b/nipype/interfaces/utility/wrappers.py
@@ -24,7 +24,7 @@ from ..base import (traits, DynamicTraitedSpec, Undefined, isdefined, runtime_pr
BaseInterfaceInputSpec, get_max_resources_used)
from ..io import IOBase, add_traits
from ...utils.filemanip import filename_to_list
-from ...utils.misc import getsource, create_function_from_source
+from ...utils.functions import getsource, create_function_from_source
logger = logging.getLogger('interface')
if runtime_profile:
diff --git a/nipype/pipeline/engine/utils.py b/nipype/pipeline/engine/utils.py
index 25b12ab60..fe8228c8a 100644
--- a/nipype/pipeline/engine/utils.py
+++ b/nipype/pipeline/engine/utils.py
@@ -31,7 +31,8 @@ import networkx as nx
from ...utils.filemanip import (fname_presuffix, FileNotFoundError, to_str,
filename_to_list, get_related_files)
-from ...utils.misc import create_function_from_source, str2bool
+from ...utils.misc import str2bool
+from ...utils.functions import create_function_from_source
from ...interfaces.base import (CommandLine, isdefined, Undefined,
InterfaceResult)
from ...interfaces.utility import IdentityInterface
@@ -100,7 +101,7 @@ def _write_inputs(node):
lines[-1] = lines[-1].replace(' %s(' % funcname,
' %s_1(' % funcname)
funcname = '%s_1' % funcname
- lines.append('from nipype.utils.misc import getsource')
+ lines.append('from nipype.utils.functions import getsource')
lines.append("%s.inputs.%s = getsource(%s)" % (nodename,
key,
funcname))
diff --git a/nipype/pipeline/engine/workflows.py b/nipype/pipeline/engine/workflows.py
index f30ed5005..14c4920a7 100644
--- a/nipype/pipeline/engine/workflows.py
+++ b/nipype/pipeline/engine/workflows.py
@@ -36,8 +36,8 @@ import networkx as nx
from ... import config, logging
-from ...utils.misc import (unflatten, package_check, str2bool,
- getsource, create_function_from_source)
+from ...utils.misc import (unflatten, package_check, str2bool)
+from ...utils.functions import (getsource, create_function_from_source)
from ...interfaces.base import (traits, InputMultiPath, CommandLine,
Undefined, TraitedSpec, DynamicTraitedSpec,
Bunch, InterfaceResult, md5, Interface,
diff --git a/nipype/utils/functions.py b/nipype/utils/functions.py
new file mode 100644
index 000000000..aa72d8500
--- /dev/null
+++ b/nipype/utils/functions.py
@@ -0,0 +1,47 @@
+# -*- coding: utf-8 -*-
+"""
+Handles custom functions used in Function interface. Future imports
+are avoided to keep namespace as clear as possible.
+"""
+from builtins import next, str
+from future.utils import raise_from
+import inspect
+from textwrap import dedent
+
+def getsource(function):
+ """Returns the source code of a function"""
+ return dedent(inspect.getsource(function))
+
+
+def create_function_from_source(function_source, imports=None):
+ """Return a function object from a function source
+
+ Parameters
+ ----------
+ function_source : unicode string
+ unicode string defining a function
+ imports : list of strings
+ list of import statements in string form that allow the function
+ to be executed in an otherwise empty namespace
+ """
+ ns = {}
+ import_keys = []
+
+ try:
+ if imports is not None:
+ for statement in imports:
+ exec(statement, ns)
+ import_keys = list(ns.keys())
+ exec(function_source, ns)
+
+ except Exception as e:
+ msg = 'Error executing function\n{}\n'.format(function_source)
+ msg += ("Functions in connection strings have to be standalone. "
+ "They cannot be declared either interactively or inside "
+ "another function or inline in the connect string. Any "
+ "imports should be done inside the function.")
+ raise_from(RuntimeError(msg), e)
+ ns_funcs = list(set(ns) - set(import_keys + ['__builtins__']))
+ assert len(ns_funcs) == 1, "Function or inputs are ill-defined"
+ func = ns[ns_funcs[0]]
+ return func
diff --git a/nipype/utils/misc.py b/nipype/utils/misc.py
index 552e24c43..095e6b88f 100644
--- a/nipype/utils/misc.py
+++ b/nipype/utils/misc.py
@@ -3,7 +3,7 @@
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Miscellaneous utility functions
"""
-from __future__ import print_function, division, unicode_literals, absolute_import
+from __future__ import print_function, unicode_literals, division, absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import next, str
@@ -66,47 +66,6 @@ def trim(docstring, marker=None):
return '\n'.join(trimmed)
-def getsource(function):
- """Returns the source code of a function"""
- src = dedent(inspect.getsource(function))
- return src
-
-
-def create_function_from_source(function_source, imports=None):
- """Return a function object from a function source
-
- Parameters
- ----------
- function_source : pickled string
- string in pickled form defining a function
- imports : list of strings
- list of import statements in string form that allow the function
- to be executed in an otherwise empty namespace
- """
- ns = {}
- import_keys = []
- try:
- if imports is not None:
- for statement in imports:
- exec(statement, ns)
- import_keys = list(ns.keys())
- exec(function_source, ns)
-
- except Exception as e:
- msg = '\nError executing function:\n %s\n' % function_source
- msg += '\n'.join(["Functions in connection strings have to be standalone.",
- "They cannot be declared either interactively or inside",
- "another function or inline in the connect string. Any",
- "imports should be done inside the function"
- ])
- raise_from(RuntimeError(msg), e)
- ns_funcs = list(set(ns) - set(import_keys + ['__builtins__']))
- assert len(ns_funcs) == 1, "Function or inputs are ill-defined"
- funcname = ns_funcs[0]
- func = ns[funcname]
- return func
-
-
def find_indices(condition):
"Return the indices where ravel(condition) is true"
res, = np.nonzero(np.ravel(condition))
| Function interface crash when upgrading nipype
### Summary
The function interface crashes, in a code that was working in an older nipype installation (version details below). From the error, it seems I am doing something wrong when importing other functions inside of the function interface, but I don't know what it is exactly or how to fix it.
The function itself is intended to convert html into pdf and send it as an email attachment.
### Actual behavior
The function below does not run, and this is the crash information:
```
170601-20:23:45,389 workflow INFO:
Traceback (most recent call last):
File "/apps/software/anaconda/4.3.1/lib/python2.7/site-packages/nipype/pipeline/plugins/multiproc.py", line 52, in run_node
result['result'] = node.run(updatehash=updatehash)
File "/apps/software/anaconda/4.3.1/lib/python2.7/site-packages/nipype/pipeline/engine/nodes.py", line 372, in run
self._run_interface()
File "/apps/software/anaconda/4.3.1/lib/python2.7/site-packages/nipype/pipeline/engine/nodes.py", line 482, in _run_interface
self._result = self._run_command(execute)
File "/apps/software/anaconda/4.3.1/lib/python2.7/site-packages/nipype/pipeline/engine/nodes.py", line 613, in _run_command
result = self._interface.run()
File "/apps/software/anaconda/4.3.1/lib/python2.7/site-packages/nipype/interfaces/base.py", line 1081, in run
runtime = self._run_wrapper(runtime)
File "/apps/software/anaconda/4.3.1/lib/python2.7/site-packages/nipype/interfaces/base.py", line 1029, in _run_wrapper
runtime = self._run_interface(runtime)
File "/apps/software/anaconda/4.3.1/lib/python2.7/site-packages/nipype/interfaces/utility/wrappers.py", line 146, in _run_interface
self.imports)
File "/apps/software/anaconda/4.3.1/lib/python2.7/site-packages/nipype/utils/misc.py", line 102, in create_function_from_source
raise_from(RuntimeError(msg), e)
File "/apps/software/anaconda/4.3.1/lib/python2.7/site-packages/future/utils/__init__.py", line 454, in raise_from
raise e
RuntimeError:
Error executing function:
Functions in connection strings have to be standalone.
They cannot be declared either interactively or inside
another function or inline in the connect string. Any
imports should be done inside the function
Interface Function failed to run.
170601-20:23:46,403 workflow INFO:
Traceback (most recent call last):
***********************************
File "/home/myanezl2/workspace/c3nl/QA_matlab/QA_Matlab.py", line 192, in <module>
170601-20:23:46,403 workflow ERROR:
could not run node: QA.QA_email
170601-20:23:46,404 workflow INFO:
crashfile: /home/myanezl2/workspace/c3nl/QA_matlab/crash-20170601-202344-myanezl2-QA_email-98e03d45-765c-4bec-87c5-c4e253113c83.pklz
170601-20:23:46,404 workflow INFO:
***********************************
QA.run('MultiProc', plugin_args={'n_procs': 2})
File "/apps/software/anaconda/4.3.1/lib/python2.7/site-packages/nipype/pipeline/engine/workflows.py", line 590, in run
runner.run(execgraph, updatehash=updatehash, config=self.config)
File "/apps/software/anaconda/4.3.1/lib/python2.7/site-packages/nipype/pipeline/plugins/base.py", line 279, in run
report_nodes_not_run(notrun)
File "/apps/software/anaconda/4.3.1/lib/python2.7/site-packages/nipype/pipeline/plugins/base.py", line 101, in report_nodes_not_run
raise RuntimeError(('Workflow did not execute cleanly. '
RuntimeError: Workflow did not execute cleanly. Check log for details
Process finished with exit code 1
```
### Expected behavior
This function was working in this previous installation:
print(nipype.__version__)
0.11.0
print(nipype.get_info())
{'nibabel_version': '2.0.2', 'networkx_version': '1.10', 'numpy_version': '1.11.0', 'sys_platform': 'linux2', 'sys_version': '2.7.11 |Anaconda 2.4.1 (64-bit)| (default, Dec 6 2015, 18:08:32) \n[GCC 4.4.7 20120313 (Red Hat 4.4.7-1)]', 'commit_source': '(none found)', 'commit_hash': '<not found>', 'pkg_path': '/share/apps/anaconda-3-2.4.1/lib/python2.7/site-packages/nipype', 'sys_executable': '/share/apps/anaconda-3-2.4.1/bin/python', 'traits_version': '4.5.0', 'scipy_version': '0.17.0'}
### Script/Workflow details
```
def gen_qa_email(in_directory, empty_variable, in_date):
import os
import smtplib
import pdfkit
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
""" Convert html to pdf"""
pdf_file = pdfkit.from_file(in_directory + '/QA_results.html', False)
""" Create the email message container from the input args."""
recipients = ['[email protected]']
msg = MIMEMultipart('alternative')
msg['Subject'] = 'QA' + in_date
msg['From'] = '[email protected]'
msg['To'] = ", ".join(recipients)
""" Attach parts into message container"""
pdf = MIMEApplication(pdf_file, 'pdf')
pdf.add_header('Content-Disposition', 'attachment', filename= 'QA.pdf')
msg.attach(pdf)
try:
smtpserver = smtplib.SMTP("smtp.gmail.com", 587)
smtpserver.ehlo()
smtpserver.starttls()
smtpserver.ehlo
smtpserver.login(''[email protected]']', 'password')
smtpserver.sendmail(''[email protected]'], recipients, msg.as_string())
smtpserver.quit()
except smtplib.SMTPAuthenticationError as e:
print "Unable to send message: %s" % e
return None
QA_email = Node(name='QA_email', interface=Function(input_names=['in_directory', 'empty_variable', 'in_date'], output_names=[], function=gen_qa_email))
QA_email.inputs.in_date = experiment_date
```
### Platform details:
print(nipype.__version__)
0.13.1
print(nipype.get_info())
{'nibabel_version': '2.1.0', 'sys_executable': '/apps/software/anaconda/4.3.1/bin/python', 'networkx_version': '1.11', 'numpy_version': '1.11.3', 'sys_platform': 'linux2', 'sys_version': '2.7.13 |Anaconda 4.3.1 (64-bit)| (default, Dec 20 2016, 23:09:15) \n[GCC 4.4.7 20120313 (Red Hat 4.4.7-1)]', 'commit_source': u'installation', 'commit_hash': u'8946bcab9', 'pkg_path': '/apps/software/anaconda/4.3.1/lib/python2.7/site-packages/nipype', 'nipype_version': u'0.13.1', 'traits_version': '4.6.0', 'scipy_version': '0.18.1'}
| nipy/nipype | diff --git a/nipype/utils/tests/test_functions.py b/nipype/utils/tests/test_functions.py
new file mode 100644
index 000000000..1d9b9dac7
--- /dev/null
+++ b/nipype/utils/tests/test_functions.py
@@ -0,0 +1,41 @@
+# -*- coding: utf-8 -*-
+import sys
+import pytest
+from nipype.utils.functions import (getsource, create_function_from_source)
+
+def _func1(x):
+ return x**3
+
+def test_func_to_str():
+
+ def func1(x):
+ return x**2
+
+ # Should be ok with both functions!
+ for f in _func1, func1:
+ f_src = getsource(f)
+ f_recreated = create_function_from_source(f_src)
+ assert f(2.3) == f_recreated(2.3)
+
+def test_func_to_str_err():
+ bad_src = "obbledygobbledygook"
+ with pytest.raises(RuntimeError): create_function_from_source(bad_src)
+
+def _print_statement():
+ try:
+ exec('print ""')
+ return True
+ except SyntaxError:
+ return False
+
+def test_func_string():
+ def is_string():
+ return isinstance('string', str)
+
+ wrapped_func = create_function_from_source(getsource(is_string))
+ assert is_string() == wrapped_func()
+
[email protected](sys.version_info[0] > 2, reason="breaks python 3")
+def test_func_print_py2():
+ wrapped_func = create_function_from_source(getsource(_print_statement))
+ assert wrapped_func()
diff --git a/nipype/utils/tests/test_misc.py b/nipype/utils/tests/test_misc.py
index f2780a584..1685fd645 100644
--- a/nipype/utils/tests/test_misc.py
+++ b/nipype/utils/tests/test_misc.py
@@ -8,9 +8,8 @@ from builtins import next
import pytest
-from nipype.utils.misc import (container_to_string, getsource,
- create_function_from_source, str2bool, flatten,
- unflatten)
+from nipype.utils.misc import (container_to_string, str2bool,
+ flatten, unflatten)
def test_cont_to_str():
@@ -35,26 +34,6 @@ def test_cont_to_str():
assert (container_to_string(123) == '123')
-def _func1(x):
- return x**3
-
-
-def test_func_to_str():
-
- def func1(x):
- return x**2
-
- # Should be ok with both functions!
- for f in _func1, func1:
- f_src = getsource(f)
- f_recreated = create_function_from_source(f_src)
- assert f(2.3) == f_recreated(2.3)
-
-def test_func_to_str_err():
- bad_src = "obbledygobbledygook"
- with pytest.raises(RuntimeError): create_function_from_source(bad_src)
-
-
@pytest.mark.parametrize("string, expected", [
("yes", True), ("true", True), ("t", True), ("1", True),
("no", False), ("false", False), ("n", False), ("f", False), ("0", False)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 5
} | 0.13 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
click==8.0.4
configparser==5.2.0
decorator==4.4.2
funcsigs==1.0.2
future==1.0.0
importlib-metadata==4.8.3
iniconfig==1.1.1
isodate==0.6.1
lxml==5.3.1
mock==5.2.0
networkx==2.5.1
nibabel==3.2.2
-e git+https://github.com/nipy/nipype.git@4b587d6bc297d202148fa2a7f9ab4de50ff49bb7#egg=nipype
numpy==1.19.5
packaging==21.3
pluggy==1.0.0
prov==1.5.0
py==1.11.0
pydotplus==2.0.2
pyparsing==3.1.4
pytest==7.0.1
python-dateutil==2.9.0.post0
rdflib==5.0.0
scipy==1.5.4
simplejson==3.20.1
six==1.17.0
tomli==1.2.3
traits==6.4.1
typing_extensions==4.1.1
zipp==3.6.0
| name: nipype
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- click==8.0.4
- configparser==5.2.0
- decorator==4.4.2
- funcsigs==1.0.2
- future==1.0.0
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- isodate==0.6.1
- lxml==5.3.1
- mock==5.2.0
- networkx==2.5.1
- nibabel==3.2.2
- numpy==1.19.5
- packaging==21.3
- pluggy==1.0.0
- prov==1.5.0
- py==1.11.0
- pydotplus==2.0.2
- pyparsing==3.1.4
- pytest==7.0.1
- python-dateutil==2.9.0.post0
- rdflib==5.0.0
- scipy==1.5.4
- simplejson==3.20.1
- six==1.17.0
- tomli==1.2.3
- traits==6.4.1
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/nipype
| [
"nipype/utils/tests/test_functions.py::test_func_to_str",
"nipype/utils/tests/test_functions.py::test_func_to_str_err",
"nipype/utils/tests/test_functions.py::test_func_string",
"nipype/utils/tests/test_misc.py::test_cont_to_str",
"nipype/utils/tests/test_misc.py::test_str2bool[yes-True]",
"nipype/utils/tests/test_misc.py::test_str2bool[true-True]",
"nipype/utils/tests/test_misc.py::test_str2bool[t-True]",
"nipype/utils/tests/test_misc.py::test_str2bool[1-True]",
"nipype/utils/tests/test_misc.py::test_str2bool[no-False]",
"nipype/utils/tests/test_misc.py::test_str2bool[false-False]",
"nipype/utils/tests/test_misc.py::test_str2bool[n-False]",
"nipype/utils/tests/test_misc.py::test_str2bool[f-False]",
"nipype/utils/tests/test_misc.py::test_str2bool[0-False]",
"nipype/utils/tests/test_misc.py::test_flatten"
]
| []
| []
| []
| Apache License 2.0 | 1,396 | [
"nipype/utils/misc.py",
"nipype/utils/functions.py",
"nipype/interfaces/utility/wrappers.py",
"doc/users/saving_workflows.rst",
"nipype/pipeline/engine/workflows.py",
"nipype/pipeline/engine/utils.py"
]
| [
"nipype/utils/misc.py",
"nipype/utils/functions.py",
"nipype/interfaces/utility/wrappers.py",
"doc/users/saving_workflows.rst",
"nipype/pipeline/engine/workflows.py",
"nipype/pipeline/engine/utils.py"
]
|
|
pre-commit__pre-commit-hooks-205 | 78818b90cd694c29333ba54d38f9e60b6359ccfc | 2017-06-24 00:21:06 | 4a457a725e1c0349372672ab440eff745c04f915 | diff --git a/pre_commit_hooks/requirements_txt_fixer.py b/pre_commit_hooks/requirements_txt_fixer.py
index efa1906..41e1ffc 100644
--- a/pre_commit_hooks/requirements_txt_fixer.py
+++ b/pre_commit_hooks/requirements_txt_fixer.py
@@ -30,21 +30,25 @@ class Requirement(object):
def fix_requirements(f):
requirements = []
- before = []
+ before = list(f)
after = []
- for line in f:
- before.append(line)
+ before_string = b''.join(before)
+
+ # If the file is empty (i.e. only whitespace/newlines) exit early
+ if before_string.strip() == b'':
+ return 0
- # If the most recent requirement object has a value, then it's time to
- # start building the next requirement object.
+ for line in before:
+ # If the most recent requirement object has a value, then it's
+ # time to start building the next requirement object.
if not len(requirements) or requirements[-1].value is not None:
requirements.append(Requirement())
requirement = requirements[-1]
- # If we see a newline before any requirements, then this is a top of
- # file comment.
+ # If we see a newline before any requirements, then this is a
+ # top of file comment.
if len(requirements) == 1 and line.strip() == b'':
if len(requirement.comments) and requirement.comments[0].startswith(b'#'):
requirement.value = b'\n'
@@ -60,7 +64,6 @@ def fix_requirements(f):
after.append(comment)
after.append(requirement.value)
- before_string = b''.join(before)
after_string = b''.join(after)
if before_string == after_string:
| requirements-txt-fixer broken for empty requirements.txt
A bit of an edge case, shouldn't crash though:
```python
Sorting requirements.txt
Traceback (most recent call last):
File "/nail/tmp/tmp25dodv0q/venv/bin/requirements-txt-fixer", line 11, in <module>
sys.exit(fix_requirements_txt())
File "/nail/tmp/tmp25dodv0q/venv/lib/python3.5/site-packages/pre_commit_hooks/requirements_txt_fixer.py", line 84, in fix_requirements_txt
ret_for_file = fix_requirements(file_obj)
File "/nail/tmp/tmp25dodv0q/venv/lib/python3.5/site-packages/pre_commit_hooks/requirements_txt_fixer.py", line 64, in fix_requirements
after_string = b''.join(after)
TypeError: sequence item 1: expected a bytes-like object, NoneType found
``` | pre-commit/pre-commit-hooks | diff --git a/tests/requirements_txt_fixer_test.py b/tests/requirements_txt_fixer_test.py
index 1c590a5..33f6a47 100644
--- a/tests/requirements_txt_fixer_test.py
+++ b/tests/requirements_txt_fixer_test.py
@@ -5,6 +5,8 @@ from pre_commit_hooks.requirements_txt_fixer import Requirement
# Input, expected return value, expected output
TESTS = (
+ (b'', 0, b''),
+ (b'\n', 0, b'\n'),
(b'foo\nbar\n', 1, b'bar\nfoo\n'),
(b'bar\nfoo\n', 0, b'bar\nfoo\n'),
(b'#comment1\nfoo\n#comment2\nbar\n', 1, b'#comment2\nbar\n#comment1\nfoo\n'),
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 0.8 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
autopep8==2.0.4
certifi==2021.5.30
cfgv==3.3.1
coverage==6.2
distlib==0.3.9
filelock==3.4.1
flake8==2.5.5
identify==2.4.4
importlib-metadata==4.2.0
importlib-resources==5.2.3
iniconfig==1.1.1
mccabe==0.4.0
mock==5.2.0
nodeenv==1.6.0
packaging==21.3
pep8==1.7.1
platformdirs==2.4.0
pluggy==1.0.0
pre-commit==2.17.0
-e git+https://github.com/pre-commit/pre-commit-hooks.git@78818b90cd694c29333ba54d38f9e60b6359ccfc#egg=pre_commit_hooks
py==1.11.0
pycodestyle==2.10.0
pyflakes==1.0.0
pyparsing==3.1.4
pytest==7.0.1
PyYAML==6.0.1
simplejson==3.20.1
six==1.17.0
toml==0.10.2
tomli==1.2.3
typing_extensions==4.1.1
virtualenv==20.16.2
zipp==3.6.0
| name: pre-commit-hooks
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- autopep8==2.0.4
- cfgv==3.3.1
- coverage==6.2
- distlib==0.3.9
- filelock==3.4.1
- flake8==2.5.5
- identify==2.4.4
- importlib-metadata==4.2.0
- importlib-resources==5.2.3
- iniconfig==1.1.1
- mccabe==0.4.0
- mock==5.2.0
- nodeenv==1.6.0
- packaging==21.3
- pep8==1.7.1
- platformdirs==2.4.0
- pluggy==1.0.0
- pre-commit==2.17.0
- py==1.11.0
- pycodestyle==2.10.0
- pyflakes==1.0.0
- pyparsing==3.1.4
- pytest==7.0.1
- pyyaml==6.0.1
- simplejson==3.20.1
- six==1.17.0
- toml==0.10.2
- tomli==1.2.3
- typing-extensions==4.1.1
- virtualenv==20.16.2
- zipp==3.6.0
prefix: /opt/conda/envs/pre-commit-hooks
| [
"tests/requirements_txt_fixer_test.py::test_integration[\\n-0-\\n]"
]
| []
| [
"tests/requirements_txt_fixer_test.py::test_integration[-0-]",
"tests/requirements_txt_fixer_test.py::test_integration[foo\\nbar\\n-1-bar\\nfoo\\n]",
"tests/requirements_txt_fixer_test.py::test_integration[bar\\nfoo\\n-0-bar\\nfoo\\n]",
"tests/requirements_txt_fixer_test.py::test_integration[#comment1\\nfoo\\n#comment2\\nbar\\n-1-#comment2\\nbar\\n#comment1\\nfoo\\n]",
"tests/requirements_txt_fixer_test.py::test_integration[#comment1\\nbar\\n#comment2\\nfoo\\n-0-#comment1\\nbar\\n#comment2\\nfoo\\n]",
"tests/requirements_txt_fixer_test.py::test_integration[#comment\\n\\nfoo\\nbar\\n-1-#comment\\n\\nbar\\nfoo\\n]",
"tests/requirements_txt_fixer_test.py::test_integration[#comment\\n\\nbar\\nfoo\\n-0-#comment\\n\\nbar\\nfoo\\n]",
"tests/requirements_txt_fixer_test.py::test_integration[\\nfoo\\nbar\\n-1-bar\\n\\nfoo\\n]",
"tests/requirements_txt_fixer_test.py::test_integration[\\nbar\\nfoo\\n-0-\\nbar\\nfoo\\n]",
"tests/requirements_txt_fixer_test.py::test_integration[pyramid==1\\npyramid-foo==2\\n-0-pyramid==1\\npyramid-foo==2\\n]",
"tests/requirements_txt_fixer_test.py::test_integration[ocflib\\nDjango\\nPyMySQL\\n-1-Django\\nocflib\\nPyMySQL\\n]",
"tests/requirements_txt_fixer_test.py::test_integration[-e",
"tests/requirements_txt_fixer_test.py::test_requirement_object"
]
| []
| MIT License | 1,397 | [
"pre_commit_hooks/requirements_txt_fixer.py"
]
| [
"pre_commit_hooks/requirements_txt_fixer.py"
]
|
|
aio-libs__aiosmtpd-115 | a046b08ac5a697718268c30394bcd45586074be0 | 2017-06-24 19:18:35 | b87538bc1fc0137b5d188db938c9b386c71683a3 | diff --git a/MANIFEST.in b/MANIFEST.in
index 1b19070..6c44541 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,4 +1,4 @@
include *.py MANIFEST.in
global-include *.txt *.rst *.ini *.yml *.cfg *.crt *.key
-exclude .gitignore
+global-exclude .gitignore
prune build
diff --git a/aiosmtpd/smtp.py b/aiosmtpd/smtp.py
index b790785..dfdbcb5 100644
--- a/aiosmtpd/smtp.py
+++ b/aiosmtpd/smtp.py
@@ -201,12 +201,6 @@ class SMTP(asyncio.StreamReaderProtocol):
try:
line = await self._reader.readline()
log.debug('_handle_client readline: %s', line)
- except (ConnectionResetError, asyncio.CancelledError) as error:
- # The connection got reset during the DATA command.
- log.info('Connection lost during _handle_client()')
- self.connection_lost(error)
- return
- try:
# XXX this rstrip may not completely preserve old behavior.
line = line.rstrip(b'\r\n')
log.info('%r Data: %s', self.session.peer, line)
@@ -266,6 +260,13 @@ class SMTP(asyncio.StreamReaderProtocol):
'500 Error: command "%s" not recognized' % command)
continue
await method(arg)
+ except (ConnectionResetError, asyncio.CancelledError) as error:
+ # The connection got reset during the DATA command.
+ # XXX If handler method raises ConnectionResetError, we should
+ # verify that it was actually self._reader that was reset.
+ log.info('Connection lost during _handle_client()')
+ self.connection_lost(error)
+ return
except Exception as error:
try:
status = await self.handle_exception(error)
| CancelledError and misbehaving clients handling
I tried to write a RCPT handler that accepts connection from various embedded devices and forwards the email address to an external system. However, as the devices are not proper SMTP clients and their quality varies I need the solution to be pretty robust. While developing it I discovered a problem with the way aoismtpd deals with clients who send data and ignore the response. To reproduce:
sleep.py (simple handler class simulating my handler):
```
import asyncio
from aiosmtpd.handlers import Debugging
class RcptWebhook(Debugging):
async def handle_RCPT(self, server, session, envelope, address, rcpt_options):
await asyncio.sleep(10)
envelope.rcpt_tos.append(address)
return '250 OK'
```
Run aiosmtpd
python3 -m aiosmtpd -n -c sleep.RcptWebhook
smtp.txt (sample SMTP session)
```
HELO there
MAIL FROM: [email protected]
RCPT TO: [email protected]
DATA
test
.
```
(note the is no QUIT in the data)
Send it witch netcat
nc localhost 8025 < smtp.txt
It sends the content of the file and EOF which causes unhandled CanceledError in aiosmtpd while it is waiting for my sleep handler to finish:
```
INFO:mail.log:Server listening on localhost:8025
INFO:mail.log:Starting asyncio loop
INFO:mail.log:Peer: ('127.0.0.1', 39306)
INFO:mail.log:('127.0.0.1', 39306) handling connection
INFO:mail.log:('127.0.0.1', 39306) Data: b'HELO there'
INFO:mail.log:('127.0.0.1', 39306) Data: b'MAIL FROM: [email protected]'
INFO:mail.log:('127.0.0.1', 39306) sender: [email protected]
INFO:mail.log:('127.0.0.1', 39306) Data: b'RCPT TO: [email protected]'
INFO:mail.log:('127.0.0.1', 39306) EOF received
ERROR:mail.log:SMTP session exception
Traceback (most recent call last):
File "/usr/local/lib/python3.5/dist-packages/aiosmtpd/smtp.py", line 276, in _handle_client
yield from method(arg)
File "/usr/local/lib/python3.5/dist-packages/aiosmtpd/smtp.py", line 582, in smtp_RCPT
'RCPT', address, rcpt_options)
File "/usr/local/lib/python3.5/dist-packages/aiosmtpd/smtp.py", line 116, in _call_handler_hook
status = yield from hook(self, self.session, self.envelope, *args)
File "/home/vaclav/src/smtp-events-receiver/sleep.py", line 6, in handle_RCPT
await asyncio.sleep(10)
File "/usr/lib/python3.5/asyncio/tasks.py", line 516, in sleep
return (yield from future)
File "/usr/lib/python3.5/asyncio/futures.py", line 361, in __iter__
yield self # This tells Task to wait for completion.
File "/usr/lib/python3.5/asyncio/tasks.py", line 296, in _wakeup
future.result()
File "/usr/lib/python3.5/asyncio/futures.py", line 266, in result
raise CancelledError
concurrent.futures._base.CancelledError
INFO:mail.log:('127.0.0.1', 39306) Data: b'DATA'
INFO:mail.log:('127.0.0.1', 39306) Data: b'test'
INFO:mail.log:('127.0.0.1', 39306) Data: b'.'
INFO:mail.log:('127.0.0.1', 39306) Data: b''
INFO:mail.log:('127.0.0.1', 39306) Data: b''
INFO:mail.log:('127.0.0.1', 39306) Data: b''
INFO:mail.log:('127.0.0.1', 39306) Data: b''
...
```
aiosmtpd is now 100% busy on CPU and keeps printing "500 Error: bad syntax" to the netcat. It ignores KeyboardInterrupt and to quit it I need to terminate netcat with ^C which stops aiosmtp with "Connection lost".
I think aiosmtpd should handle this more gracefully as you don't want your server to get stuck on CPU due to misbehaving client. I see in RFC 5321 that the server must not close the connection until QUIT from the client so I'm not sure what is the proper way of handling EOF though. | aio-libs/aiosmtpd | diff --git a/aiosmtpd/tests/test_smtp.py b/aiosmtpd/tests/test_smtp.py
index 8a7ba7d..ab52cf6 100644
--- a/aiosmtpd/tests/test_smtp.py
+++ b/aiosmtpd/tests/test_smtp.py
@@ -124,6 +124,13 @@ class ErrorController(Controller):
return ErrorSMTP(self.handler)
+class SleepingHeloHandler:
+ async def handle_HELO(self, server, session, envelope, hostname):
+ await asyncio.sleep(0.01)
+ session.host_name = hostname
+ return '250 {}'.format(server.hostname)
+
+
class TestProtocol(unittest.TestCase):
def setUp(self):
self.transport = Mock()
@@ -1135,3 +1142,17 @@ Testing\xFF
""")
self.assertEqual(cm.exception.smtp_code, 500)
self.assertIn(b'Error: strict ASCII mode', cm.exception.smtp_error)
+
+
+class TestSleepingHandler(unittest.TestCase):
+ def setUp(self):
+ controller = NoDecodeController(SleepingHeloHandler())
+ controller.start()
+ self.addCleanup(controller.stop)
+ self.address = (controller.hostname, controller.port)
+
+ def test_close_after_helo(self):
+ with SMTP(*self.address) as client:
+ client.send('HELO example.com\r\n')
+ client.sock.shutdown(socket.SHUT_WR)
+ self.assertRaises(SMTPServerDisconnected, client.getreply)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 2
} | 1.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/aio-libs/aiosmtpd.git@a046b08ac5a697718268c30394bcd45586074be0#egg=aiosmtpd
atpublic==2.3
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: aiosmtpd
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- atpublic==2.3
prefix: /opt/conda/envs/aiosmtpd
| [
"aiosmtpd/tests/test_smtp.py::TestSleepingHandler::test_close_after_helo"
]
| []
| [
"aiosmtpd/tests/test_smtp.py::TestProtocol::test_empty_email",
"aiosmtpd/tests/test_smtp.py::TestProtocol::test_honors_mail_delimeters",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_data_invalid_params",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_data_no_helo",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_data_no_rcpt",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_ehlo",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_ehlo_duplicate",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_ehlo_no_hostname",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_ehlo_then_helo",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_empty_command",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_expn",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_helo",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_helo_duplicate",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_helo_no_hostname",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_helo_then_ehlo",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_help",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_help_bad_arg",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_help_data",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_help_ehlo",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_help_helo",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_help_mail",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_help_mail_esmtp",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_help_noop",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_help_quit",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_help_rcpt",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_help_rcpt_esmtp",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_help_rset",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_help_vrfy",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_mail_fail_parse_email",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_mail_from_malformed",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_mail_from_twice",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_mail_malformed_params_esmtp",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_mail_missing_params_esmtp",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_mail_no_arg",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_mail_no_from",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_mail_no_helo",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_mail_params_bad_syntax_esmtp",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_mail_params_esmtp",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_mail_params_no_esmtp",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_mail_unrecognized_params_esmtp",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_noop",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_noop_with_arg",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_quit",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_quit_with_arg",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_rcpt_fail_parse_email",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_rcpt_no_address",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_rcpt_no_arg",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_rcpt_no_arg_esmtp",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_rcpt_no_helo",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_rcpt_no_mail",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_rcpt_no_to",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_rcpt_with_bad_params",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_rcpt_with_params_no_esmtp",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_rcpt_with_unknown_params",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_rset",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_rset_with_arg",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_too_long_command",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_unknown_command",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_vrfy",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_vrfy_no_arg",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_vrfy_not_an_address",
"aiosmtpd/tests/test_smtp.py::TestResetCommands::test_ehlo",
"aiosmtpd/tests/test_smtp.py::TestResetCommands::test_helo",
"aiosmtpd/tests/test_smtp.py::TestResetCommands::test_rset",
"aiosmtpd/tests/test_smtp.py::TestSMTPWithController::test_bad_encodings",
"aiosmtpd/tests/test_smtp.py::TestSMTPWithController::test_dots_escaped",
"aiosmtpd/tests/test_smtp.py::TestSMTPWithController::test_esmtp_no_size_limit",
"aiosmtpd/tests/test_smtp.py::TestSMTPWithController::test_exception_handler_exception",
"aiosmtpd/tests/test_smtp.py::TestSMTPWithController::test_exception_handler_undescribable",
"aiosmtpd/tests/test_smtp.py::TestSMTPWithController::test_mail_invalid_body",
"aiosmtpd/tests/test_smtp.py::TestSMTPWithController::test_mail_with_compatible_smtputf8",
"aiosmtpd/tests/test_smtp.py::TestSMTPWithController::test_mail_with_incompatible_smtputf8",
"aiosmtpd/tests/test_smtp.py::TestSMTPWithController::test_mail_with_size_too_large",
"aiosmtpd/tests/test_smtp.py::TestSMTPWithController::test_mail_with_unrequited_smtputf8",
"aiosmtpd/tests/test_smtp.py::TestSMTPWithController::test_process_message_error",
"aiosmtpd/tests/test_smtp.py::TestSMTPWithController::test_too_long_message_body",
"aiosmtpd/tests/test_smtp.py::TestSMTPWithController::test_unexpected_errors",
"aiosmtpd/tests/test_smtp.py::TestSMTPWithController::test_unexpected_errors_custom_response",
"aiosmtpd/tests/test_smtp.py::TestSMTPWithController::test_unexpected_errors_unhandled",
"aiosmtpd/tests/test_smtp.py::TestCustomizations::test_custom_greeting",
"aiosmtpd/tests/test_smtp.py::TestCustomizations::test_custom_hostname",
"aiosmtpd/tests/test_smtp.py::TestCustomizations::test_default_greeting",
"aiosmtpd/tests/test_smtp.py::TestCustomizations::test_mail_invalid_body_param",
"aiosmtpd/tests/test_smtp.py::TestClientCrash::test_close_in_command",
"aiosmtpd/tests/test_smtp.py::TestClientCrash::test_close_in_data",
"aiosmtpd/tests/test_smtp.py::TestClientCrash::test_connection_reset_during_DATA",
"aiosmtpd/tests/test_smtp.py::TestClientCrash::test_connection_reset_during_command",
"aiosmtpd/tests/test_smtp.py::TestStrictASCII::test_bad_encoded_param",
"aiosmtpd/tests/test_smtp.py::TestStrictASCII::test_data",
"aiosmtpd/tests/test_smtp.py::TestStrictASCII::test_ehlo",
"aiosmtpd/tests/test_smtp.py::TestStrictASCII::test_mail_param"
]
| []
| Apache License 2.0 | 1,399 | [
"aiosmtpd/smtp.py",
"MANIFEST.in"
]
| [
"aiosmtpd/smtp.py",
"MANIFEST.in"
]
|
|
Turbo87__utm-31 | 4c7c13f2b2b9c01a8581392641aeb8bbda6aba6f | 2017-06-26 10:44:15 | 4c7c13f2b2b9c01a8581392641aeb8bbda6aba6f | diff --git a/utm/conversion.py b/utm/conversion.py
old mode 100755
new mode 100644
index d21742a..449f3d1
--- a/utm/conversion.py
+++ b/utm/conversion.py
@@ -216,13 +216,13 @@ def latlon_to_zone_number(latitude, longitude):
return 32
if 72 <= latitude <= 84 and longitude >= 0:
- if longitude <= 9:
+ if longitude < 9:
return 31
- elif longitude <= 21:
+ elif longitude < 21:
return 33
- elif longitude <= 33:
+ elif longitude < 33:
return 35
- elif longitude <= 42:
+ elif longitude < 42:
return 37
return int((longitude + 180) / 6) + 1
| UTM zone exceptions error
By definition zones are left-closed, right-open intervals, e.g. zone 31: 0 <= latitude < 6.
In function latlon_to_zone_number:
```
if 72 <= latitude <= 84 and longitude >= 0:
if longitude <= 9:
return 31
elif longitude <= 21:
return 33
elif longitude <= 33:
return 35
elif longitude <= 42:
return 37
```
For latitudes >=72, this results in:
zone 31: 0 <= longitude <= 9
zone 33: 9 < longitude <= 21
zone 35: 21< longitude <= 33
zone 37: 33< longitude <= 42
but for latitudes < 72:
zone 37: 36 <= latitude < 42
| Turbo87/utm | diff --git a/test/test_utm.py b/test/test_utm.py
index 55686d7..c820cea 100755
--- a/test/test_utm.py
+++ b/test/test_utm.py
@@ -231,5 +231,22 @@ class Zone32V(unittest.TestCase):
self.assert_zone_equal(UTM.from_latlon(64, 12), 33, 'W')
+class TestRightBoundaries(unittest.TestCase):
+
+ def assert_zone_equal(self, result, expected_number):
+ self.assertEqual(result[2], expected_number)
+
+ def test_limits(self):
+ self.assert_zone_equal(UTM.from_latlon(40, 0), 31)
+ self.assert_zone_equal(UTM.from_latlon(40, 5.999999), 31)
+ self.assert_zone_equal(UTM.from_latlon(40, 6), 32)
+
+ self.assert_zone_equal(UTM.from_latlon(72, 0), 31)
+ self.assert_zone_equal(UTM.from_latlon(72, 5.999999), 31)
+ self.assert_zone_equal(UTM.from_latlon(72, 6), 31)
+ self.assert_zone_equal(UTM.from_latlon(72, 8.999999), 31)
+ self.assert_zone_equal(UTM.from_latlon(72, 9), 33)
+
+
if __name__ == '__main__':
unittest.main()
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 0.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.5",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
-e git+https://github.com/Turbo87/utm.git@4c7c13f2b2b9c01a8581392641aeb8bbda6aba6f#egg=utm
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: utm
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
prefix: /opt/conda/envs/utm
| [
"test/test_utm.py::TestRightBoundaries::test_limits"
]
| []
| [
"test/test_utm.py::KnownValues::test_from_latlon",
"test/test_utm.py::KnownValues::test_to_latlon",
"test/test_utm.py::BadInput::test_from_latlon_range_checks",
"test/test_utm.py::BadInput::test_to_latlon_range_checks",
"test/test_utm.py::Zone32V::test_above",
"test/test_utm.py::Zone32V::test_below",
"test/test_utm.py::Zone32V::test_inside",
"test/test_utm.py::Zone32V::test_left_of",
"test/test_utm.py::Zone32V::test_right_of"
]
| []
| MIT License | 1,400 | [
"utm/conversion.py"
]
| [
"utm/conversion.py"
]
|
|
openmrslab__suspect-57 | f65efe543321e1d75d6ecbc7c22be73ab84d3d8a | 2017-06-26 15:26:22 | 820e897294d90e08c4b91be7289e4ee9ebc6d009 | diff --git a/suspect/mrsobjects.py b/suspect/mrsobjects.py
index 7fb4764..3785dc1 100644
--- a/suspect/mrsobjects.py
+++ b/suspect/mrsobjects.py
@@ -324,3 +324,46 @@ class MRSSpectrum(MRSBase):
suspect.adjust_frequency : equivalent function
"""
return self.fid().adjust_frequency(frequency_shift).spectrum()
+
+ def slice_hz(self, lower_bound, upper_bound):
+ """
+ Creates a slice object to access the region of the spectrum between
+ the specified bounds, in Hertz.
+
+ Parameters
+ ----------
+ lower_bound : float
+ The lower frequency bound of the region, in Hertz.
+ upper_bound : float
+ The upper frequency bound of the region, in Hertz.
+
+ Returns
+ -------
+ out : Slice
+ """
+ lower_index = numpy.floor((lower_bound + self.sw / 2) / self.df)
+ upper_index = numpy.ceil((upper_bound + self.sw / 2) / self.df)
+ if lower_index < 0:
+ raise ValueError("Could not create a slice for lower bound {}, value is outside range".format(lower_bound))
+ if upper_index < 0:
+ raise ValueError("Could not create a slice for upper bound {}, value is outside range".format(upper_bound))
+ return slice(int(lower_index), int(upper_index))
+
+ def slice_ppm(self, lower_bound, upper_bound):
+ """
+ Creates a slice object to access the region of the spectrum between
+ the specified bounds, in PPM.
+
+ Parameters
+ ----------
+ lower_bound : float
+ The lower frequency bound of the region, in PPM.
+ upper_bound : float
+ The upper frequency bound of the region, in PPM.
+
+ Returns
+ -------
+ out : Slice
+ """
+ return self.slice_hz(self.ppm_to_hertz(lower_bound),
+ self.ppm_to_hertz(upper_bound))
| Slice spectrum
It should be possible to generate a slice object for a specified frequency range from an MRSSpectrum object, which can be used to extract specific regions of a spectrum (or set of spectra). | openmrslab/suspect | diff --git a/tests/test_mrs/test_core.py b/tests/test_mrs/test_core.py
index 97a9f0e..2ea638e 100644
--- a/tests/test_mrs/test_core.py
+++ b/tests/test_mrs/test_core.py
@@ -1,6 +1,7 @@
import suspect
import numpy
+import pytest
def test_adjust_zero_phase():
@@ -15,3 +16,23 @@ def test_adjust_first_phase():
numpy.testing.assert_almost_equal(phased_data[0], -1j)
numpy.testing.assert_almost_equal(phased_data[-1], numpy.exp(1j * 0.4 * numpy.pi))
numpy.testing.assert_almost_equal(phased_data[5], 1)
+
+
+def test_slice_hz():
+ data = suspect.MRSData(numpy.ones(1024), 1e-3, 123)
+ spectrum = data.spectrum()
+ whole_slice = spectrum.slice_hz(-500, 500)
+ assert whole_slice == slice(0, 1024)
+ restricted_slice = spectrum.slice_hz(-100, 200)
+ assert restricted_slice == slice(409, 717)
+ with pytest.raises(ValueError):
+ too_large_slice = spectrum.slice_hz(-1000, 1000)
+
+
+def test_slice_ppm():
+ data = suspect.MRSData(numpy.ones(1000), 1e-3, 123)
+ spectrum = data.spectrum()
+ a_slice = spectrum.slice_ppm(5.7, 3.7)
+ assert a_slice == slice(377, 623)
+ reversed_slice = spectrum.slice_ppm(3.7, 5.7)
+ assert a_slice == slice(377, 623)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 3,
"test_score": 2
},
"num_modified_files": 1
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"mock"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.16
asteval==1.0.6
asttokens==3.0.0
attrs==25.3.0
babel==2.17.0
beautifulsoup4==4.13.3
bleach==6.2.0
certifi==2025.1.31
charset-normalizer==3.4.1
comm==0.2.2
debugpy==1.8.13
decorator==5.2.1
defusedxml==0.7.1
dill==0.3.9
docutils==0.21.2
exceptiongroup==1.2.2
executing==2.2.0
fastjsonschema==2.21.1
idna==3.10
imagesize==1.4.1
importlib_metadata==8.6.1
iniconfig==2.1.0
ipykernel==6.29.5
ipython==8.18.1
jedi==0.19.2
Jinja2==3.1.6
jsonschema==4.23.0
jsonschema-specifications==2024.10.1
jupyter_client==8.6.3
jupyter_core==5.7.2
jupyterlab_pygments==0.3.0
lmfit==1.3.3
MarkupSafe==3.0.2
matplotlib-inline==0.1.7
mistune==3.1.3
mock==5.2.0
nbclient==0.10.2
nbconvert==7.16.6
nbformat==5.10.4
nbsphinx==0.9.7
nest-asyncio==1.6.0
numpy==2.0.2
packaging==24.2
pandocfilters==1.5.1
Parsley==1.3
parso==0.8.4
pexpect==4.9.0
platformdirs==4.3.7
pluggy==1.5.0
prompt_toolkit==3.0.50
psutil==7.0.0
ptyprocess==0.7.0
pure_eval==0.2.3
pydicom==2.4.4
Pygments==2.19.1
pytest==8.3.5
python-dateutil==2.9.0.post0
PyWavelets==1.6.0
pyzmq==26.3.0
referencing==0.36.2
requests==2.32.3
rpds-py==0.24.0
scipy==1.13.1
six==1.17.0
snowballstemmer==2.2.0
soupsieve==2.6
Sphinx==7.4.7
sphinxcontrib-applehelp==2.0.0
sphinxcontrib-devhelp==2.0.0
sphinxcontrib-htmlhelp==2.1.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==2.0.0
sphinxcontrib-serializinghtml==2.0.0
stack-data==0.6.3
-e git+https://github.com/openmrslab/suspect.git@f65efe543321e1d75d6ecbc7c22be73ab84d3d8a#egg=suspect
tinycss2==1.4.0
tomli==2.2.1
tornado==6.4.2
traitlets==5.14.3
typing_extensions==4.13.0
uncertainties==3.2.2
urllib3==2.3.0
wcwidth==0.2.13
webencodings==0.5.1
zipp==3.21.0
| name: suspect
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.16
- asteval==1.0.6
- asttokens==3.0.0
- attrs==25.3.0
- babel==2.17.0
- beautifulsoup4==4.13.3
- bleach==6.2.0
- certifi==2025.1.31
- charset-normalizer==3.4.1
- comm==0.2.2
- debugpy==1.8.13
- decorator==5.2.1
- defusedxml==0.7.1
- dill==0.3.9
- docutils==0.21.2
- exceptiongroup==1.2.2
- executing==2.2.0
- fastjsonschema==2.21.1
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- ipykernel==6.29.5
- ipython==8.18.1
- jedi==0.19.2
- jinja2==3.1.6
- jsonschema==4.23.0
- jsonschema-specifications==2024.10.1
- jupyter-client==8.6.3
- jupyter-core==5.7.2
- jupyterlab-pygments==0.3.0
- lmfit==1.3.3
- markupsafe==3.0.2
- matplotlib-inline==0.1.7
- mistune==3.1.3
- mock==5.2.0
- nbclient==0.10.2
- nbconvert==7.16.6
- nbformat==5.10.4
- nbsphinx==0.9.7
- nest-asyncio==1.6.0
- numpy==2.0.2
- packaging==24.2
- pandocfilters==1.5.1
- parsley==1.3
- parso==0.8.4
- pexpect==4.9.0
- platformdirs==4.3.7
- pluggy==1.5.0
- prompt-toolkit==3.0.50
- psutil==7.0.0
- ptyprocess==0.7.0
- pure-eval==0.2.3
- pydicom==2.4.4
- pygments==2.19.1
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- pywavelets==1.6.0
- pyzmq==26.3.0
- referencing==0.36.2
- requests==2.32.3
- rpds-py==0.24.0
- scipy==1.13.1
- six==1.17.0
- snowballstemmer==2.2.0
- soupsieve==2.6
- sphinx==7.4.7
- sphinxcontrib-applehelp==2.0.0
- sphinxcontrib-devhelp==2.0.0
- sphinxcontrib-htmlhelp==2.1.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==2.0.0
- sphinxcontrib-serializinghtml==2.0.0
- stack-data==0.6.3
- tinycss2==1.4.0
- tomli==2.2.1
- tornado==6.4.2
- traitlets==5.14.3
- typing-extensions==4.13.0
- uncertainties==3.2.2
- urllib3==2.3.0
- wcwidth==0.2.13
- webencodings==0.5.1
- zipp==3.21.0
prefix: /opt/conda/envs/suspect
| [
"tests/test_mrs/test_core.py::test_slice_hz",
"tests/test_mrs/test_core.py::test_slice_ppm"
]
| []
| [
"tests/test_mrs/test_core.py::test_adjust_zero_phase",
"tests/test_mrs/test_core.py::test_adjust_first_phase"
]
| []
| MIT License | 1,401 | [
"suspect/mrsobjects.py"
]
| [
"suspect/mrsobjects.py"
]
|
|
mjs__imapclient-244 | 9e82aa8e7fe0a8cd3b9b6318579a873c9a1bdde6 | 2017-06-26 19:38:01 | 2abdac690fa653fa2d0d55b7617be24101597698 | mjs: OAUTH1 support was removed in #218 . Would you mind rebasing so this PR just has the `Address` fix? | diff --git a/imapclient/response_types.py b/imapclient/response_types.py
index ea5d71d..c35dd00 100644
--- a/imapclient/response_types.py
+++ b/imapclient/response_types.py
@@ -80,9 +80,12 @@ class Address(namedtuple("Address", "name route mailbox host")):
"""
def __str__(self):
- return formataddr((
- to_unicode(self.name),
- to_unicode(self.mailbox) + '@' + to_unicode(self.host)))
+ if self.mailbox and self.host:
+ address = to_unicode(self.mailbox) + '@' + to_unicode(self.host)
+ else:
+ address = to_unicode(self.mailbox or self.host)
+
+ return formataddr((to_unicode(self.name), address))
class SearchIds(list):
| Avoid TypeError when using `str` on Address tuple
Some emails have no mailbox or host (e.g. `undisclosed-recipients` case), so when parsing the ENVELOPE of the message using imapclient, we can get something like this:
```
In [8]: from imapclient.response_types import *
In [9]: addr = Address('Anonymous', None, None, 'undisclosed-recipients')
In [10]: str(addr)
---------------------------------------------------------------------------
.../lib/python3.5/site-packages/imapclient/response_types.py in __str__(self)
57 return formataddr((
58 to_unicode(self.name),
---> 59 to_unicode(self.mailbox) + '@' + to_unicode(self.host)))
60
61
TypeError: unsupported operand type(s) for +: 'NoneType' and 'str'
```
I think the `__str__` method should handle this and just returning `self.mailbox` or `self.host` if the other part is missing. I could write the PR but I prefer to have thoughs about this before. | mjs/imapclient | diff --git a/imapclient/test/test_response_parser.py b/imapclient/test/test_response_parser.py
index 3c13534..111188b 100644
--- a/imapclient/test/test_response_parser.py
+++ b/imapclient/test/test_response_parser.py
@@ -491,6 +491,12 @@ class TestParseFetchResponse(unittest.TestCase):
self.assertEqual(str(Address("Mary Jane", None, "mary", "jane.org")),
"Mary Jane <[email protected]>")
+ self.assertEqual(str(Address("Anonymous", None, "undisclosed-recipients", None)),
+ "Anonymous <undisclosed-recipients>")
+
+ self.assertEqual(str(Address(None, None, None, "undisclosed-recipients")),
+ "undisclosed-recipients")
+
def add_crlf(text):
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 1
} | 1.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": null,
"python": "3.5",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
coverage==6.2
-e git+https://github.com/mjs/imapclient.git@9e82aa8e7fe0a8cd3b9b6318579a873c9a1bdde6#egg=IMAPClient
importlib-metadata==4.8.3
iniconfig==1.1.1
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
six==1.17.0
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: imapclient
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- coverage==6.2
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- six==1.17.0
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/imapclient
| [
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_Address_str"
]
| []
| [
"imapclient/test/test_response_parser.py::TestParseResponse::test_bad_literal",
"imapclient/test/test_response_parser.py::TestParseResponse::test_bad_quoting",
"imapclient/test/test_response_parser.py::TestParseResponse::test_complex_mixed",
"imapclient/test/test_response_parser.py::TestParseResponse::test_deeper_nest_tuple",
"imapclient/test/test_response_parser.py::TestParseResponse::test_empty_tuple",
"imapclient/test/test_response_parser.py::TestParseResponse::test_envelopey",
"imapclient/test/test_response_parser.py::TestParseResponse::test_envelopey_quoted",
"imapclient/test/test_response_parser.py::TestParseResponse::test_incomplete_tuple",
"imapclient/test/test_response_parser.py::TestParseResponse::test_int",
"imapclient/test/test_response_parser.py::TestParseResponse::test_int_and_tuple",
"imapclient/test/test_response_parser.py::TestParseResponse::test_literal",
"imapclient/test/test_response_parser.py::TestParseResponse::test_literal_with_more",
"imapclient/test/test_response_parser.py::TestParseResponse::test_nested_tuple",
"imapclient/test/test_response_parser.py::TestParseResponse::test_nil",
"imapclient/test/test_response_parser.py::TestParseResponse::test_quoted_specials",
"imapclient/test/test_response_parser.py::TestParseResponse::test_square_brackets",
"imapclient/test/test_response_parser.py::TestParseResponse::test_string",
"imapclient/test/test_response_parser.py::TestParseResponse::test_tuple",
"imapclient/test/test_response_parser.py::TestParseResponse::test_unquoted",
"imapclient/test/test_response_parser.py::TestParseMessageList::test_basic",
"imapclient/test/test_response_parser.py::TestParseMessageList::test_modseq",
"imapclient/test/test_response_parser.py::TestParseMessageList::test_modseq_interleaved",
"imapclient/test/test_response_parser.py::TestParseMessageList::test_modseq_no_space",
"imapclient/test/test_response_parser.py::TestParseMessageList::test_one_id",
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_BODY",
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_BODYSTRUCTURE",
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_BODY_HEADER_FIELDS",
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_ENVELOPE",
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_ENVELOPE_with_empty_addresses",
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_ENVELOPE_with_invalid_date",
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_ENVELOPE_with_no_date",
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_FLAGS",
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_INTERNALDATE",
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_INTERNALDATE_normalised",
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_UID",
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_bad_UID",
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_bad_data",
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_bad_msgid",
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_basic",
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_literals",
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_literals_and_keys_with_square_brackets",
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_missing_data",
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_mixed_types",
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_multiple_messages",
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_none_special_case",
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_not_uid_is_key",
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_odd_pairs",
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_partial_fetch",
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_same_message_appearing_multiple_times",
"imapclient/test/test_response_parser.py::TestParseFetchResponse::test_simple_pairs"
]
| []
| BSD License | 1,402 | [
"imapclient/response_types.py"
]
| [
"imapclient/response_types.py"
]
|
streamlink__streamlink-1039 | 13abffa6f327c43fd67b1741a6d6a6027bccdcc1 | 2017-06-26 20:04:09 | 13abffa6f327c43fd67b1741a6d6a6027bccdcc1 | diff --git a/CHANGELOG.rst b/CHANGELOG.rst
index 08cddb4c..32487f7b 100644
--- a/CHANGELOG.rst
+++ b/CHANGELOG.rst
@@ -1,3 +1,114 @@
+streamlink 0.7.0 (2017-06-30)
+-----------------------------
+0.7.0 of Streamlink!
+
+Since our May release, we've incorporated quite a few changes!
+
+Outlined are the major features in this month's release:
+
+ - Stream types will now be sorted accordingly in terms of quality
+ - TeamLiquid.net Plugin added
+ - Numerous plugin & bug fixes
+ - Updated HomeBrew package
+ - Improved CLI documentation
+
+Many thanks to those who've contributed in this release!
+
+If you think that this application is helpful, please consider supporting the maintainers by [donating](https://streamlink.github.io/donate.html).
+
+
+::
+
+ Alex Shafer <[email protected]> (1):
+ Return sorted list of streams. (#731)
+
+ Alexandre Hitchcox <[email protected]> (1):
+ Allow live channel links without '/c/' prefix
+
+ Alexis Murzeau <[email protected]> (1):
+ docs: fix typo: specifiying, neverthless
+
+ CatKasha <[email protected]> (1):
+ Add MPC-HC x64 in streamlinkrc
+
+ Forrest <[email protected]> (1):
+ Add a few more examples to the player option (#896)
+
+ Jacob Malmberg <[email protected]> (3):
+ Here's the plugin I wrote for teamliquid.net (w/ some help from https://github.com/back-to)
+ Tests for teamliquid plugin
+ Now with RE!
+
+ Mohamed El Morabity <[email protected]> (9):
+ Update for live API changes
+ Add unit tests for Euronews plugin
+ Drop pcyourfreetv plugin
+ Add support for regional France 3 streams
+ Add support for TV5Monde
+ PEP8
+ Add support for VOD/audio streams
+ Add support for radio.net
+ Ignore unreliable stream status returned by radio.net
+
+ Sebastian Meyer <[email protected]> (1):
+ Homebrew package (#929)
+
+ back-to <[email protected]> (2):
+ [dailymotion] fix for broken .f4m file that is a .m3u8 file (only livestreams)
+ [arte] vod api url update & add new/missing languages
+
+ bastimeyer <[email protected]> (2):
+ docs: fix parameters being linked in code blocks
+ Improve CLI documentation
+
+ beardypig <[email protected]> (1):
+ plugins.hitbox: add support for smashcast.tv
+
+ beardypig <[email protected]> (21):
+ plugins.bbciplayer: update to reflect slight site layout change
+ plugins.bbciplayer: add option to login to a bbc account
+ http_server: handle socket closed exception for Python 2.7
+ docs: update Sphinx config to fix the rendering of --
+ docs: pin sphinx to 1.6.+ so that no future changes affect the docs
+ plugins.tvplayer: fix bug with some channels not loading
+ plugins.hitbox: fix new VOD urls, and add support for hosted streams
+ plugins.tvplayer: fix bug with some channels when not authenticated
+ setup: exclude requests version 2.16 through 2.17.1
+ win32: fix missing modules when using windows installer
+ bbciplayer: fix for api changes to iplayer
+ tvplayer: updated to match change token parameter name
+ plugins.looch: support for live and vod streams on looch.tv
+ plugins.webtv: decrypt the stream URL when applicable
+ plugins.dogan: small api change for teve2.com.tr
+ plugins.kanal7: fix for nested iframes
+ win32: update the dependencies for the windows installer
+ plugins.canlitv: simplified and fixed the m3u8 regex
+ plugins.picarto: support for VOD
+ plugins.ine: update to extract the relocated jwplayer config
+ plugin.ufctv: support for free and premium vod/live streams
+
+ cirrus <[email protected]> (3):
+ Create arconia.py
+ Rename arconia.py to arconai.py
+ Create plugin_matrix.rst
+
+ steven7851 <[email protected]> (4):
+ plugins.app17: fix hls url and support UID page
+ little change
+ plugins.app17: change ROOM_URL
+ [douyu] temporary fix by revert to previously commit (#1015)
+
+ whizzoo <[email protected]> (2):
+ Restore support for RTL XL
+ plugin.rtlxl: Remove spaces from line 14
+
+ yhel <[email protected]> (1):
+ Don't return an error when the stream is offline
+
+ yhel <[email protected]> (1):
+ Add capability of extracting current sport.francetv stream
+
+
streamlink 0.6.0 (2017-05-11)
-----------------------------
Another release of Streamlink!
diff --git a/docs/plugin_matrix.rst b/docs/plugin_matrix.rst
index b2e12936..d8f676f7 100644
--- a/docs/plugin_matrix.rst
+++ b/docs/plugin_matrix.rst
@@ -154,18 +154,18 @@ playtv playtv.fr Yes -- Streams may be geo-restrict
pluzz - france.tv Yes Yes Streams may be geo-restricted to France, Andorra and Monaco.
- ludo.fr
- zouzous.fr
- - france3-reg.. [8]_
+ - france3-reg.. [8]_
powerapp powerapp.com.tr Yes No
radionet - radio.net Yes --
- radio.at
- - radio.de
- - radio.dk
- - radio.es
- - radio.fr
- - radio.it
- - radio.pl
- - radio.pt
- - radio.se
+ - radio.de
+ - radio.dk
+ - radio.es
+ - radio.fr
+ - radio.it
+ - radio.pl
+ - radio.pt
+ - radio.se
raiplay raiplay.it Yes No Most streams are geo-restricted to Italy.
rtlxl rtlxl.nl No Yes Streams may be geo-restricted to The Netherlands. Livestreams not supported.
rte rte.ie/player Yes Yes
@@ -261,6 +261,9 @@ wwenetwork network.wwe.com Yes Yes Requires an account to acce
younow younow.com Yes --
youtube - youtube.com Yes Yes Protected videos are not supported.
- youtu.be
+zattoo - zattoo.com Yes Yes
+ - nettv.net... [10]_
+ - tvonline.ewe.de
zdf_mediathek zdf.de Yes Yes Streams may be geo-restricted to Germany.
zhanqitv zhanqi.tv Yes No
=================== ==================== ===== ===== ===========================
@@ -275,3 +278,4 @@ zhanqitv zhanqi.tv Yes No
.. [7] player.theplatform.com
.. [8] france3-regions.francetvinfo.fr
.. [9] tv5mondeplusafrique.com
+.. [10] nettv.netcologne.de
diff --git a/setup.py b/setup.py
index a5f9bc86..a79b1c52 100644
--- a/setup.py
+++ b/setup.py
@@ -23,7 +23,7 @@ if version_info[0] == 2:
if version_info[0] == 2 or (version_info[0] == 3 and version_info[1] < 4):
deps.append("singledispatch")
-deps.append("requests>=2.2,!=2.12.0,!=2.12.1,<3.0")
+deps.append("requests>=2.2,!=2.12.0,!=2.12.1,!=2.16.0,!=2.16.1,!=2.16.2,!=2.16.3,!=2.16.4,!=2.16.5,!=2.17.1,<3.0")
# for encrypted streams
if environ.get("STREAMLINK_USE_PYCRYPTO"):
@@ -53,7 +53,7 @@ srcdir = join(dirname(abspath(__file__)), "src/")
sys_path.insert(0, srcdir)
setup(name="streamlink",
- version="0.6.0",
+ version="0.7.0",
description="Streamlink is command-line utility that extracts streams "
"from various services and pipes them into a video player of "
"choice.",
diff --git a/src/streamlink/__init__.py b/src/streamlink/__init__.py
index 28dbb515..555b0cad 100644
--- a/src/streamlink/__init__.py
+++ b/src/streamlink/__init__.py
@@ -22,7 +22,7 @@ if version_info[:2] == (2, 6):
)
__title__ = "streamlink"
-__version__ = "0.6.0"
+__version__ = "0.7.0"
__license__ = "Simplified BSD"
__author__ = "Streamlink"
__copyright__ = "Copyright 2016 Streamlink"
diff --git a/src/streamlink/plugin/plugin.py b/src/streamlink/plugin/plugin.py
index 2721dab0..381275cd 100644
--- a/src/streamlink/plugin/plugin.py
+++ b/src/streamlink/plugin/plugin.py
@@ -1,5 +1,7 @@
import operator
import re
+from collections import OrderedDict
+
from functools import partial
from ..cache import Cache
@@ -11,6 +13,8 @@ from ..options import Options
# Someone who knows math, please fix.
BIT_RATE_WEIGHT_RATIO = 2.8
+ALT_WEIGHT_MOD = 0.01
+
QUALITY_WEIGTHS_EXTRA = {
"other": {
"live": 1080,
@@ -40,18 +44,26 @@ def stream_weight(stream):
if stream in weights:
return weights[stream], group
- match = re.match(r"^(\d+)(k|p)?(\d+)?(\+)?(?:_(\d+)k)?$", stream)
+ match = re.match(r"^(\d+)(k|p)?(\d+)?(\+)?(?:_(\d+)k)?(?:_(alt)(\d)?)?$", stream)
if match:
+ weight = 0
+
+ if match.group(6):
+ if match.group(7):
+ weight -= ALT_WEIGHT_MOD * int(match.group(7))
+ else:
+ weight -= ALT_WEIGHT_MOD
+
name_type = match.group(2)
if name_type == "k": # bit rate
bitrate = int(match.group(1))
- weight = bitrate / BIT_RATE_WEIGHT_RATIO
+ weight += bitrate / BIT_RATE_WEIGHT_RATIO
return weight, "bitrate"
elif name_type == "p": # resolution
- weight = int(match.group(1))
+ weight += int(match.group(1))
if match.group(3): # fps eg. 60p or 50p
weight += int(match.group(3))
@@ -310,13 +322,18 @@ class Plugin(object):
elif callable(sorting_excludes):
sorted_streams = list(filter(sorting_excludes, sorted_streams))
+ final_sorted_streams = OrderedDict()
+
+ for stream_name in sorted(streams, key=stream_weight_only):
+ final_sorted_streams[stream_name] = streams[stream_name]
+
if len(sorted_streams) > 0:
best = sorted_streams[-1]
worst = sorted_streams[0]
- streams["best"] = streams[best]
- streams["worst"] = streams[worst]
-
- return streams
+ final_sorted_streams["worst"] = streams[worst]
+ final_sorted_streams["best"] = streams[best]
+
+ return final_sorted_streams
def get_streams(self, *args, **kwargs):
"""Deprecated since version 1.9.0.
diff --git a/src/streamlink/plugins/artetv.py b/src/streamlink/plugins/artetv.py
index 96c048c3..d5360b7c 100644
--- a/src/streamlink/plugins/artetv.py
+++ b/src/streamlink/plugins/artetv.py
@@ -12,7 +12,7 @@ from streamlink.stream import HDSStream
from streamlink.stream import HLSStream
from streamlink.stream import HTTPStream
-JSON_VOD_URL = "https://api.arte.tv/api/player/v1/config/{0}/{1}"
+JSON_VOD_URL = "https://api.arte.tv/api/player/v1/config/{0}/{1}?platform=ARTE_NEXT"
JSON_LIVE_URL = "https://api.arte.tv/api/player/v1/livestream/{0}"
_url_re = re.compile(r"""
@@ -53,13 +53,13 @@ class ArteTV(Plugin):
stream_language = stream["versionShortLibelle"]
if language == "de":
- language = ["DE", "VOST-DE", "VA", "VOA", "Dt. Live"]
+ language = ["DE", "VOST-DE", "VA", "VOA", "Dt. Live", "OV", "OmU"]
elif language == "en":
language = ["ANG", "VOST-ANG"]
elif language == "es":
language = ["ESP", "VOST-ESP"]
elif language == "fr":
- language = ["FR", "VOST-FR", "VF", "VOF", "Frz. Live"]
+ language = ["FR", "VOST-FR", "VF", "VOF", "Frz. Live", "VO", "ST mal"]
elif language == "pl":
language = ["POL", "VOST-POL"]
diff --git a/src/streamlink/plugins/teamliquid.py b/src/streamlink/plugins/teamliquid.py
new file mode 100755
index 00000000..66d9f11c
--- /dev/null
+++ b/src/streamlink/plugins/teamliquid.py
@@ -0,0 +1,26 @@
+import re
+
+from streamlink.plugin import Plugin
+from streamlink.plugin.api import http
+
+
+_url_re = re.compile(r'''https?://(?:www\.)?teamliquid\.net/video/streams/''')
+
+
+class Teamliquid(Plugin):
+ @classmethod
+ def can_handle_url(cls, url):
+ return _url_re.match(url)
+
+ def _get_streams(self):
+ res = http.get(self.url)
+
+ stream_address_re = re.compile(r'''href\s*=\s*"([^"]+)"\s*>\s*View on''')
+
+ stream_url_match = stream_address_re.search(res.text)
+ if stream_url_match:
+ stream_url = stream_url_match.group(1)
+ self.logger.info("Attempting to play streams from {0}", stream_url)
+ return self.session.streams(stream_url)
+
+__plugin__ = Teamliquid
diff --git a/src/streamlink/plugins/zattoo.py b/src/streamlink/plugins/zattoo.py
new file mode 100644
index 00000000..966500eb
--- /dev/null
+++ b/src/streamlink/plugins/zattoo.py
@@ -0,0 +1,238 @@
+import re
+import time
+import uuid
+
+from streamlink.cache import Cache
+from streamlink.plugin import Plugin
+from streamlink.plugin import PluginOptions
+from streamlink.plugin.api import http
+from streamlink.plugin.api import useragents
+from streamlink.plugin.api import validate
+from streamlink.stream import HLSStream
+
+
+class Zattoo(Plugin):
+ API_HELLO = '{0}/zapi/session/hello'
+ API_LOGIN = '{0}/zapi/v2/account/login'
+ API_CHANNELS = '{0}/zapi/v2/cached/channels/{1}?details=False'
+ API_WATCH = '{0}/zapi/watch'
+ API_WATCH_VOD = '{0}/zapi/avod/videos/{1}/watch'
+
+ _url_re = re.compile(r'''
+ https?://
+ (?P<base_url>
+ zattoo\.com
+ |
+ tvonline\.ewe\.de
+ |
+ nettv\.netcologne\.de
+ )/(?:watch/(?P<channel>[^/\s]+)
+ |
+ ondemand/watch/(?P<vod_id>[^-]+)-)
+ ''', re.VERBOSE)
+
+ _app_token_re = re.compile(r"""window\.appToken\s+=\s+'([^']+)'""")
+
+ _channels_schema = validate.Schema({
+ 'success': int,
+ 'channel_groups': [{
+ 'channels': [
+ {
+ 'display_alias': validate.text,
+ 'cid': validate.text
+ },
+ ]
+ }]},
+ validate.get('channel_groups'),
+ )
+
+ options = PluginOptions({
+ 'email': None,
+ 'password': None,
+ 'purge_credentials': None
+ })
+
+ def __init__(self, url):
+ super(Zattoo, self).__init__(url)
+ self._session_attributes = Cache(filename='plugin-cache.json', key_prefix='zattoo:attributes')
+ self._authed = self._session_attributes.get('beaker.session.id') and self._session_attributes.get('pzuid') and self._session_attributes.get('power_guide_hash')
+ self._uuid = self._session_attributes.get('uuid')
+ self._expires = self._session_attributes.get('expires')
+
+ self.base_url = 'https://{0}'.format(Zattoo._url_re.match(url).group('base_url'))
+ self.headers = {
+ 'User-Agent': useragents.CHROME,
+ 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
+ 'X-Requested-With': 'XMLHttpRequest',
+ 'Referer': self.base_url
+ }
+
+ @classmethod
+ def can_handle_url(cls, url):
+ return Zattoo._url_re.match(url)
+
+ def _hello(self):
+ self.logger.debug('_hello ...')
+ res = http.get(self.base_url)
+ match = self._app_token_re.search(res.text)
+
+ app_token = match.group(1)
+ hello_url = self.API_HELLO.format(self.base_url)
+
+ if self._uuid:
+ __uuid = self._uuid
+ else:
+ __uuid = str(uuid.uuid4())
+ self._session_attributes.set('uuid', __uuid, expires=3600 * 24)
+
+ params = {
+ 'client_app_token': app_token,
+ 'uuid': __uuid,
+ 'lang': 'en',
+ 'format': 'json'
+ }
+ res = http.post(hello_url, headers=self.headers, data=params)
+ return res
+
+ def _login(self, email, password, _hello):
+ self.logger.debug('_login ... Attempting login as {0}'.format(email))
+
+ login_url = self.API_LOGIN.format(self.base_url)
+
+ params = {
+ 'login': email,
+ 'password': password,
+ 'remember': 'true'
+ }
+
+ res = http.post(login_url, headers=self.headers, data=params, cookies=_hello.cookies)
+ data = http.json(res)
+
+ self._authed = data['success']
+ if self._authed:
+ self.logger.debug('New Session Data')
+ self._session_attributes.set('beaker.session.id', res.cookies.get('beaker.session.id'), expires=3600 * 24)
+ self._session_attributes.set('pzuid', res.cookies.get('pzuid'), expires=3600 * 24)
+ self._session_attributes.set('power_guide_hash', data['session']['power_guide_hash'], expires=3600 * 24)
+ return self._authed
+ else:
+ return None
+
+ def _watch(self):
+ self.logger.debug('_watch ...')
+ match = self._url_re.match(self.url)
+ if not match:
+ return
+ channel = match.group('channel')
+ vod_id = match.group('vod_id')
+
+ cookies = {
+ 'beaker.session.id': self._session_attributes.get('beaker.session.id'),
+ 'pzuid': self._session_attributes.get('pzuid')
+ }
+
+ watch_url = []
+ if channel:
+ params, watch_url = self._watch_live(channel, cookies)
+ elif vod_id:
+ params, watch_url = self._watch_vod(vod_id)
+
+ if not watch_url:
+ return
+
+ res = []
+ try:
+ res = http.post(watch_url, headers=self.headers, data=params, cookies=cookies)
+ except Exception as e:
+ if '404 Client Error' in str(e):
+ self.logger.error('Unfortunately streaming is not permitted in this country or this channel does not exist.')
+ elif '402 Client Error: Payment Required' in str(e):
+ self.logger.error('Paid subscription required for this channel.')
+ self.logger.info('If paid subscription exist, use --zattoo-purge-credentials to start a new session.')
+ else:
+ self.logger.error(str(e))
+ return
+
+ data = http.json(res)
+
+ if data['success']:
+ for hls_url in data['stream']['watch_urls']:
+ for s in HLSStream.parse_variant_playlist(self.session, hls_url['url']).items():
+ yield s
+
+ def _watch_live(self, channel, cookies):
+ self.logger.debug('_watch_live ... Channel: {0}'.format(channel))
+ watch_url = self.API_WATCH.format(self.base_url)
+
+ channels_url = self.API_CHANNELS.format(self.base_url, self._session_attributes.get('power_guide_hash'))
+ res = http.get(channels_url, headers=self.headers, cookies=cookies)
+ data = http.json(res, schema=self._channels_schema)
+
+ c_list = []
+ for d in data:
+ for c in d['channels']:
+ c_list.append(c)
+
+ cid = []
+ zattoo_list = []
+ for c in c_list:
+ zattoo_list.append(c['display_alias'])
+ if c['display_alias'] == channel:
+ cid = c['cid']
+
+ self.logger.debug('Available zattoo channels in this country: {0}'.format(', '.join(sorted(zattoo_list))))
+
+ if not cid:
+ cid = channel
+
+ self.logger.debug('CHANNEL ID: {0}'.format(cid))
+
+ params = {
+ 'cid': cid,
+ 'https_watch_urls': True,
+ 'stream_type': 'hls'
+ }
+ return params, watch_url
+
+ def _watch_vod(self, vod_id):
+ self.logger.debug('_watch_vod ...')
+ watch_url = self.API_WATCH_VOD.format(self.base_url, vod_id)
+ params = {
+ 'https_watch_urls': True,
+ 'stream_type': 'hls'
+ }
+ return params, watch_url
+
+ def _get_streams(self):
+ email = self.get_option('email')
+ password = self.get_option('password')
+
+ if self.options.get('purge_credentials'):
+ self._session_attributes.set('beaker.session.id', None, expires=0)
+ self._session_attributes.set('expires', None, expires=0)
+ self._session_attributes.set('power_guide_hash', None, expires=0)
+ self._session_attributes.set('pzuid', None, expires=0)
+ self._session_attributes.set('uuid', None, expires=0)
+ self._authed = False
+ self.logger.info('All credentials were successfully removed.')
+
+ if not self._authed and (not email and not password):
+ self.logger.error('A login for Zattoo is required, use --zattoo-email EMAIL --zattoo-password PASSWORD to set them')
+ return
+
+ if self._authed:
+ if self._expires < time.time():
+ # login after 24h
+ expires = time.time() + 3600 * 24
+ self._session_attributes.set('expires', expires, expires=3600 * 24)
+ self._authed = False
+
+ if not self._authed:
+ __hello = self._hello()
+ if not self._login(email, password, __hello):
+ self.logger.error('Failed to login, check your username/password')
+ return
+
+ return self._watch()
+
+__plugin__ = Zattoo
diff --git a/src/streamlink_cli/argparser.py b/src/streamlink_cli/argparser.py
index 2628b15b..1c96a4e9 100644
--- a/src/streamlink_cli/argparser.py
+++ b/src/streamlink_cli/argparser.py
@@ -1285,6 +1285,28 @@ plugin.add_argument(
A ufc.tv account password to use with --ufctv-username.
"""
)
+plugin.add_argument(
+ "--zattoo-email",
+ metavar="EMAIL",
+ help="""
+ The email associated with your zattoo account, required to access any zattoo stream.
+ """
+)
+plugin.add_argument(
+ "--zattoo-password",
+ metavar="PASSWORD",
+ help="""
+ A zattoo account password to use with --zattoo-email.
+ """
+)
+plugin.add_argument(
+ "--zattoo-purge-credentials",
+ action="store_true",
+ help="""
+ Purge cached zattoo credentials to initiate a new session
+ and reauthenticate.
+ """
+)
# Deprecated options
stream.add_argument(
diff --git a/src/streamlink_cli/main.py b/src/streamlink_cli/main.py
index 24195591..113bb38b 100644
--- a/src/streamlink_cli/main.py
+++ b/src/streamlink_cli/main.py
@@ -941,6 +941,19 @@ def setup_plugin_options():
if ufctv_password:
streamlink.set_plugin_option("ufctv", "password", ufctv_password)
+ if args.zattoo_email:
+ streamlink.set_plugin_option("zattoo", "email", args.zattoo_email)
+ if args.zattoo_email and not args.zattoo_password:
+ zattoo_password = console.askpass("Enter zattoo password: ")
+ else:
+ zattoo_password = args.zattoo_password
+ if zattoo_password:
+ streamlink.set_plugin_option("zattoo", "password", zattoo_password)
+
+ if args.zattoo_purge_credentials:
+ streamlink.set_plugin_option("zattoo", "purge_credentials",
+ args.zattoo_purge_credentials)
+
# Deprecated options
if args.jtv_legacy_names:
console.logger.warning("The option --jtv/twitch-legacy-names is "
| Plugin for zattoo.com
### Checklist
- [ ] This is a bug report.
- [ ] This is a feature request.
- [x] This is a plugin (improvement) request.
- [ ] I have read the contribution guidelines.
### Description
Could You please add a plugin for https://zattoo.com, a livestreaming platform on which many TV stations are streamed. There are about 70 channels on the free version and about 90 channels in the premium version.
Sorry for grammatic mistakes if done and thanks a lot.
... | streamlink/streamlink | diff --git a/tests/test_plugin_teamliquid.py b/tests/test_plugin_teamliquid.py
new file mode 100644
index 00000000..c2ea01bb
--- /dev/null
+++ b/tests/test_plugin_teamliquid.py
@@ -0,0 +1,19 @@
+import unittest
+
+from streamlink.plugins.teamliquid import Teamliquid
+
+
+class TestPluginTeamliquid(unittest.TestCase):
+ def test_can_handle_url(self):
+ # should match
+ self.assertTrue(Teamliquid.can_handle_url("http://www.teamliquid.net/video/streams/Classic%20BW%20VODs"))
+ self.assertTrue(Teamliquid.can_handle_url("http://teamliquid.net/video/streams/iwl-fuNny"))
+ self.assertTrue(Teamliquid.can_handle_url("http://www.teamliquid.net/video/streams/OGamingTV%20SC2"))
+ self.assertTrue(Teamliquid.can_handle_url("http://www.teamliquid.net/video/streams/Check"))
+
+ # shouldn't match
+ self.assertFalse(Teamliquid.can_handle_url("http://www.teamliquid.net/Classic%20BW%20VODs"))
+ self.assertFalse(Teamliquid.can_handle_url("http://www.teamliquid.net/video/Check"))
+ self.assertFalse(Teamliquid.can_handle_url("http://www.teamliquid.com/video/streams/Check"))
+ self.assertFalse(Teamliquid.can_handle_url("http://www.teamliquid.net/video/stream/Check"))
+
diff --git a/tests/test_plugin_zattoo.py b/tests/test_plugin_zattoo.py
new file mode 100644
index 00000000..0ad0dfcd
--- /dev/null
+++ b/tests/test_plugin_zattoo.py
@@ -0,0 +1,24 @@
+import unittest
+
+from streamlink.plugins.zattoo import Zattoo
+
+
+class TestPluginZattoo(unittest.TestCase):
+ def test_can_handle_url(self):
+ # ewe live
+ self.assertTrue(Zattoo.can_handle_url('http://tvonline.ewe.de/watch/daserste'))
+ self.assertTrue(Zattoo.can_handle_url('http://tvonline.ewe.de/watch/zdf'))
+ # netcologne live
+ self.assertTrue(Zattoo.can_handle_url('https://nettv.netcologne.de/watch/daserste'))
+ self.assertTrue(Zattoo.can_handle_url('https://nettv.netcologne.de/watch/zdf'))
+ # zattoo live
+ self.assertTrue(Zattoo.can_handle_url('https://zattoo.com/watch/daserste'))
+ self.assertTrue(Zattoo.can_handle_url('https://zattoo.com/watch/zdf'))
+ # zattoo vod
+ self.assertTrue(Zattoo.can_handle_url('https://zattoo.com/ondemand/watch/ibR2fpisWFZGvmPBRaKnFnuT-alarm-am-airport'))
+ self.assertTrue(Zattoo.can_handle_url('https://zattoo.com/ondemand/watch/G8S7JxcewY2jEwAgMzvFWK8c-berliner-schnauzen'))
+
+ # shouldn't match
+ self.assertFalse(Zattoo.can_handle_url('https://ewe.de'))
+ self.assertFalse(Zattoo.can_handle_url('https://netcologne.de'))
+ self.assertFalse(Zattoo.can_handle_url('https://zattoo.com'))
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 3,
"issue_text_score": 3,
"test_score": 3
},
"num_modified_files": 8
} | 0.6 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pip",
"pytest",
"mock"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
charset-normalizer==2.0.12
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
iso-639==0.4.5
iso3166==2.1.1
mock==5.2.0
packaging==21.3
pluggy==1.0.0
py==1.11.0
pycryptodome==3.21.0
pyparsing==3.1.4
pytest==7.0.1
requests==2.27.1
-e git+https://github.com/streamlink/streamlink.git@13abffa6f327c43fd67b1741a6d6a6027bccdcc1#egg=streamlink
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
zipp==3.6.0
| name: streamlink
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- charset-normalizer==2.0.12
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- iso-639==0.4.5
- iso3166==2.1.1
- mock==5.2.0
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pycryptodome==3.21.0
- pyparsing==3.1.4
- pytest==7.0.1
- requests==2.27.1
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- zipp==3.6.0
prefix: /opt/conda/envs/streamlink
| [
"tests/test_plugin_teamliquid.py::TestPluginTeamliquid::test_can_handle_url",
"tests/test_plugin_zattoo.py::TestPluginZattoo::test_can_handle_url"
]
| []
| []
| []
| BSD 2-Clause "Simplified" License | 1,403 | [
"src/streamlink/plugins/artetv.py",
"src/streamlink_cli/main.py",
"setup.py",
"docs/plugin_matrix.rst",
"src/streamlink/plugins/teamliquid.py",
"CHANGELOG.rst",
"src/streamlink/__init__.py",
"src/streamlink_cli/argparser.py",
"src/streamlink/plugins/zattoo.py",
"src/streamlink/plugin/plugin.py"
]
| [
"src/streamlink/plugins/artetv.py",
"src/streamlink_cli/main.py",
"setup.py",
"docs/plugin_matrix.rst",
"src/streamlink/plugins/teamliquid.py",
"CHANGELOG.rst",
"src/streamlink/__init__.py",
"src/streamlink_cli/argparser.py",
"src/streamlink/plugins/zattoo.py",
"src/streamlink/plugin/plugin.py"
]
|
|
networkx__networkx-2488 | 22a6ebaf0c235a825195e48558f39b65c26d5a1c | 2017-06-26 23:27:26 | 3f4fd85765bf2d88188cfd4c84d0707152e6cd1e | dschult: Let's go ahead and make the relabel files pep8 compliant. Our general philosophy is to improve pep8 stuff only when we touch code for other reasons. Sometimes that means only part of a file and sometimes it means the whole file. The key is to make it so the pep8 changes don't swamp the actual changes and they won't here. I don't change the tests for pep8 very often because it feels like I make the readability worse by adding so many line breaks to keep the code within 80 chars per line. But that's personal preference and depends on the test code your looking at. I'll leave that to you.
Thanks!
jarrodmillman: This is ready to merge.
I rebased on master and made a separate commit for pep8 changes outside of the functions I modified. I didn't make any new code wraps and took out an existing one in `networkx/tests/test_relabel.py` that I thought was unhelpful in terms of readability.
jarrodmillman: @dschult Ready for merge. | diff --git a/networkx/classes/digraph.py b/networkx/classes/digraph.py
index 73a59c30c..2c84b9d35 100644
--- a/networkx/classes/digraph.py
+++ b/networkx/classes/digraph.py
@@ -1124,7 +1124,8 @@ class DiGraph(Graph):
H.add_edges_from((v, u, deepcopy(d)) for u, v, d
in self.edges(data=True))
H.graph = deepcopy(self.graph)
- H._node = deepcopy(self._node)
+ for n in self._node:
+ H._node[n] = deepcopy(self._node[n])
else:
self._pred, self._succ = self._succ, self._pred
self._adj = self._succ
diff --git a/networkx/relabel.py b/networkx/relabel.py
index 569ba2c02..9cac3f169 100644
--- a/networkx/relabel.py
+++ b/networkx/relabel.py
@@ -6,10 +6,11 @@
# BSD license.
import networkx as nx
__author__ = """\n""".join(['Aric Hagberg <[email protected]>',
- 'Pieter Swart ([email protected])',
- 'Dan Schult ([email protected])'])
+ 'Pieter Swart ([email protected])',
+ 'Dan Schult ([email protected])'])
__all__ = ['convert_node_labels_to_integers', 'relabel_nodes']
+
def relabel_nodes(G, mapping, copy=True):
"""Relabel the nodes of the graph G.
@@ -76,21 +77,21 @@ def relabel_nodes(G, mapping, copy=True):
Only the nodes specified in the mapping will be relabeled.
The keyword setting copy=False modifies the graph in place.
- Relabel_nodes avoids naming collisions by building a
+ Relabel_nodes avoids naming collisions by building a
directed graph from ``mapping`` which specifies the order of
relabelings. Naming collisions, such as a->b, b->c, are ordered
such that "b" gets renamed to "c" before "a" gets renamed "b".
- In cases of circular mappings (e.g. a->b, b->a), modifying the
+ In cases of circular mappings (e.g. a->b, b->a), modifying the
graph is not possible in-place and an exception is raised.
In that case, use copy=True.
-
+
See Also
--------
convert_node_labels_to_integers
"""
# you can pass a function f(old_label)->new_label
# but we'll just make a dictionary here regardless
- if not hasattr(mapping,"__getitem__"):
+ if not hasattr(mapping, "__getitem__"):
m = dict((n, mapping(n)) for n in G)
else:
m = mapping
@@ -131,38 +132,39 @@ def _relabel_inplace(G, mapping):
try:
G.add_node(new, **G.node[old])
except KeyError:
- raise KeyError("Node %s is not in the graph"%old)
+ raise KeyError("Node %s is not in the graph" % old)
if multigraph:
new_edges = [(new, new if old == target else target, key, data)
- for (_,target,key,data)
+ for (_, target, key, data)
in G.edges(old, data=True, keys=True)]
if directed:
new_edges += [(new if old == source else source, new, key, data)
- for (source, _, key,data)
+ for (source, _, key, data)
in G.in_edges(old, data=True, keys=True)]
else:
new_edges = [(new, new if old == target else target, data)
- for (_,target,data) in G.edges(old, data=True)]
+ for (_, target, data) in G.edges(old, data=True)]
if directed:
- new_edges += [(new if old == source else source,new,data)
- for (source,_,data) in G.in_edges(old, data=True)]
+ new_edges += [(new if old == source else source, new, data)
+ for (source, _, data) in G.in_edges(old, data=True)]
G.remove_node(old)
G.add_edges_from(new_edges)
return G
+
def _relabel_copy(G, mapping):
H = G.__class__()
+ H.add_nodes_from(mapping.get(n, n) for n in G)
+ H._node.update(dict((mapping.get(n, n), d.copy()) for n, d in G.node.items()))
if G.name:
H.name = "(%s)" % G.name
if G.is_multigraph():
- H.add_edges_from( (mapping.get(n1, n1),mapping.get(n2, n2),k,d.copy())
- for (n1,n2,k,d) in G.edges(keys=True, data=True))
+ H.add_edges_from((mapping.get(n1, n1), mapping.get(n2, n2), k, d.copy())
+ for (n1, n2, k, d) in G.edges(keys=True, data=True))
else:
- H.add_edges_from( (mapping.get(n1, n1),mapping.get(n2, n2),d.copy())
- for (n1, n2, d) in G.edges(data=True))
+ H.add_edges_from((mapping.get(n1, n1), mapping.get(n2, n2), d.copy())
+ for (n1, n2, d) in G.edges(data=True))
- H.add_nodes_from(mapping.get(n, n) for n in G)
- H._node.update(dict((mapping.get(n, n), d.copy()) for n,d in G.node.items()))
H.graph.update(G.graph.copy())
return H
@@ -200,27 +202,27 @@ def convert_node_labels_to_integers(G, first_label=0, ordering="default",
--------
relabel_nodes
"""
- N = G.number_of_nodes()+first_label
+ N = G.number_of_nodes() + first_label
if ordering == "default":
mapping = dict(zip(G.nodes(), range(first_label, N)))
elif ordering == "sorted":
nlist = sorted(G.nodes())
mapping = dict(zip(nlist, range(first_label, N)))
elif ordering == "increasing degree":
- dv_pairs = [(d,n) for (n,d) in G.degree()]
- dv_pairs.sort() # in-place sort from lowest to highest degree
- mapping = dict(zip([n for d,n in dv_pairs], range(first_label, N)))
+ dv_pairs = [(d, n) for (n, d) in G.degree()]
+ dv_pairs.sort() # in-place sort from lowest to highest degree
+ mapping = dict(zip([n for d, n in dv_pairs], range(first_label, N)))
elif ordering == "decreasing degree":
- dv_pairs = [(d,n) for (n,d) in G.degree()]
- dv_pairs.sort() # in-place sort from lowest to highest degree
+ dv_pairs = [(d, n) for (n, d) in G.degree()]
+ dv_pairs.sort() # in-place sort from lowest to highest degree
dv_pairs.reverse()
- mapping = dict(zip([n for d,n in dv_pairs], range(first_label, N)))
+ mapping = dict(zip([n for d, n in dv_pairs], range(first_label, N)))
else:
- raise nx.NetworkXError('Unknown node ordering: %s'%ordering)
+ raise nx.NetworkXError('Unknown node ordering: %s' % ordering)
H = relabel_nodes(G, mapping)
- H.name = "("+G.name+")_with_int_labels"
+ H.name = "(" + G.name + ")_with_int_labels"
# create node attribute with the old label
if label_attribute is not None:
nx.set_node_attributes(H, label_attribute,
- dict((v,k) for k,v in mapping.items()))
+ dict((v, k) for k, v in mapping.items()))
return H
| _relabel_copy and OrderedGraph
The current behavior of `_relabel_copy()` is somewhat unintuitive for OrderedGraphs, where the nodes in the newly created graph are ordered according to the edge iterator of the original graph, not the node iterator. I think this would be fixed by placing `H.add_nodes_from(mapping.get(n, n) for n in G)` at the beginning of the function. | networkx/networkx | diff --git a/networkx/classes/tests/test_digraph.py b/networkx/classes/tests/test_digraph.py
index b46530e4d..c5b50c720 100644
--- a/networkx/classes/tests/test_digraph.py
+++ b/networkx/classes/tests/test_digraph.py
@@ -1,173 +1,192 @@
#!/usr/bin/env python
-from nose.tools import *
-import networkx
+
+from nose.tools import assert_equal
+from nose.tools import assert_false
+from nose.tools import assert_true
+from nose.tools import assert_raises
+
+
+import networkx as nx
+from networkx.testing import assert_nodes_equal
from test_graph import BaseGraphTester, BaseAttrGraphTester, TestGraph
from test_graph import TestEdgeSubgraph as TestGraphEdgeSubgraph
+
class BaseDiGraphTester(BaseGraphTester):
def test_has_successor(self):
- G=self.K3
- assert_equal(G.has_successor(0,1),True)
- assert_equal(G.has_successor(0,-1),False)
+ G = self.K3
+ assert_equal(G.has_successor(0, 1), True)
+ assert_equal(G.has_successor(0, -1), False)
def test_successors(self):
- G=self.K3
- assert_equal(sorted(G.successors(0)),[1,2])
- assert_raises((KeyError,networkx.NetworkXError), G.successors,-1)
+ G = self.K3
+ assert_equal(sorted(G.successors(0)), [1, 2])
+ assert_raises((KeyError, nx.NetworkXError), G.successors, -1)
def test_has_predecessor(self):
- G=self.K3
- assert_equal(G.has_predecessor(0,1),True)
- assert_equal(G.has_predecessor(0,-1),False)
+ G = self.K3
+ assert_equal(G.has_predecessor(0, 1), True)
+ assert_equal(G.has_predecessor(0, -1), False)
def test_predecessors(self):
- G=self.K3
- assert_equal(sorted(G.predecessors(0)),[1,2])
- assert_raises((KeyError,networkx.NetworkXError), G.predecessors,-1)
+ G = self.K3
+ assert_equal(sorted(G.predecessors(0)), [1, 2])
+ assert_raises((KeyError, nx.NetworkXError), G.predecessors, -1)
def test_edges(self):
- G=self.K3
- assert_equal(sorted(G.edges()),[(0,1),(0,2),(1,0),(1,2),(2,0),(2,1)])
- assert_equal(sorted(G.edges(0)),[(0,1),(0,2)])
+ G = self.K3
+ assert_equal(sorted(G.edges()), [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)])
+ assert_equal(sorted(G.edges(0)), [(0, 1), (0, 2)])
assert_equal(sorted(G.edges([0, 1])), [(0, 1), (0, 2), (1, 0), (1, 2)])
- assert_raises((KeyError,networkx.NetworkXError), G.edges,-1)
+ assert_raises((KeyError, nx.NetworkXError), G.edges, -1)
def test_edges_data(self):
- G=self.K3
- all_edges = [(0,1,{}),(0,2,{}),(1,0,{}),(1,2,{}),(2,0,{}),(2,1,{})]
+ G = self.K3
+ all_edges = [(0, 1, {}), (0, 2, {}), (1, 0, {}), (1, 2, {}), (2, 0, {}), (2, 1, {})]
assert_equal(sorted(G.edges(data=True)), all_edges)
assert_equal(sorted(G.edges(0, data=True)), all_edges[:2])
assert_equal(sorted(G.edges([0, 1], data=True)), all_edges[:4])
- assert_raises((KeyError,networkx.NetworkXError), G.edges, -1, True)
+ assert_raises((KeyError, nx.NetworkXError), G.edges, -1, True)
def test_out_edges(self):
- G=self.K3
- assert_equal(sorted(G.out_edges()),
- [(0,1),(0,2),(1,0),(1,2),(2,0),(2,1)])
- assert_equal(sorted(G.out_edges(0)),[(0,1),(0,2)])
- assert_raises((KeyError,networkx.NetworkXError), G.out_edges,-1)
+ G = self.K3
+ assert_equal(sorted(G.out_edges()), [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)])
+ assert_equal(sorted(G.out_edges(0)), [(0, 1), (0, 2)])
+ assert_raises((KeyError, nx.NetworkXError), G.out_edges, -1)
def test_out_edges_dir(self):
- G=self.P3
- assert_equal(sorted(G.out_edges()),[(0, 1), (1, 2)])
- assert_equal(sorted(G.out_edges(0)),[(0, 1)])
- assert_equal(sorted(G.out_edges(2)),[])
+ G = self.P3
+ assert_equal(sorted(G.out_edges()), [(0, 1), (1, 2)])
+ assert_equal(sorted(G.out_edges(0)), [(0, 1)])
+ assert_equal(sorted(G.out_edges(2)), [])
def test_out_edges_data(self):
- G=networkx.DiGraph([(0, 1, {'data' : 0}), (1, 0, {})])
- assert_equal(sorted(G.out_edges(data=True)), [(0, 1, {'data' : 0}), (1, 0, {})])
- assert_equal(sorted(G.out_edges(0, data=True)), [(0, 1, {'data' : 0})])
+ G = nx.DiGraph([(0, 1, {'data': 0}), (1, 0, {})])
+ assert_equal(sorted(G.out_edges(data=True)), [(0, 1, {'data': 0}), (1, 0, {})])
+ assert_equal(sorted(G.out_edges(0, data=True)), [(0, 1, {'data': 0})])
assert_equal(sorted(G.out_edges(data='data')), [(0, 1, 0), (1, 0, None)])
assert_equal(sorted(G.out_edges(0, data='data')), [(0, 1, 0)])
def test_in_edges_dir(self):
- G=self.P3
+ G = self.P3
assert_equal(sorted(G.in_edges()), [(0, 1), (1, 2)])
assert_equal(sorted(G.in_edges(0)), [])
- assert_equal(sorted(G.in_edges(2)), [(1,2)])
+ assert_equal(sorted(G.in_edges(2)), [(1, 2)])
def test_in_edges_data(self):
- G=networkx.DiGraph([(0, 1, {'data' : 0}), (1, 0, {})])
- assert_equal(sorted(G.in_edges(data=True)), [(0, 1, {'data' : 0}), (1, 0, {})])
- assert_equal(sorted(G.in_edges(1, data=True)), [(0, 1, {'data' : 0})])
+ G = nx.DiGraph([(0, 1, {'data': 0}), (1, 0, {})])
+ assert_equal(sorted(G.in_edges(data=True)), [(0, 1, {'data': 0}), (1, 0, {})])
+ assert_equal(sorted(G.in_edges(1, data=True)), [(0, 1, {'data': 0})])
assert_equal(sorted(G.in_edges(data='data')), [(0, 1, 0), (1, 0, None)])
assert_equal(sorted(G.in_edges(1, data='data')), [(0, 1, 0)])
def test_degree(self):
- G=self.K3
- assert_equal(sorted(G.degree()),[(0,4),(1,4),(2,4)])
- assert_equal(dict(G.degree()),{0:4,1:4,2:4})
+ G = self.K3
+ assert_equal(sorted(G.degree()), [(0, 4), (1, 4), (2, 4)])
+ assert_equal(dict(G.degree()), {0: 4, 1: 4, 2: 4})
assert_equal(G.degree(0), 4)
- assert_equal(list(G.degree(iter([0]))), [(0, 4)]) #run through iterator
+ assert_equal(list(G.degree(iter([0]))), [
+ (0, 4)]) # run through iterator
def test_in_degree(self):
- G=self.K3
- assert_equal(sorted(G.in_degree()),[(0,2),(1,2),(2,2)])
- assert_equal(dict(G.in_degree()),{0:2,1:2,2:2})
+ G = self.K3
+ assert_equal(sorted(G.in_degree()), [(0, 2), (1, 2), (2, 2)])
+ assert_equal(dict(G.in_degree()), {0: 2, 1: 2, 2: 2})
assert_equal(G.in_degree(0), 2)
- assert_equal(list(G.in_degree(iter([0]))), [(0, 2)]) #run through iterator
+ assert_equal(list(G.in_degree(iter([0]))), [(0, 2)]) # run through iterator
def test_in_degree_weighted(self):
- G=self.K3
- G.add_edge(0,1,weight=0.3,other=1.2)
- assert_equal(sorted(G.in_degree(weight='weight')),[(0,2),(1,1.3),(2,2)])
- assert_equal(dict(G.in_degree(weight='weight')),{0:2,1:1.3,2:2})
- assert_equal(G.in_degree(1,weight='weight'), 1.3)
- assert_equal(sorted(G.in_degree(weight='other')),[(0,2),(1,2.2),(2,2)])
- assert_equal(dict(G.in_degree(weight='other')),{0:2,1:2.2,2:2})
- assert_equal(G.in_degree(1,weight='other'), 2.2)
- assert_equal(list(G.in_degree(iter([1]),weight='other')), [(1, 2.2)])
+ G = self.K3
+ G.add_edge(0, 1, weight=0.3, other=1.2)
+ assert_equal(sorted(G.in_degree(weight='weight')), [(0, 2), (1, 1.3), (2, 2)])
+ assert_equal(dict(G.in_degree(weight='weight')), {0: 2, 1: 1.3, 2: 2})
+ assert_equal(G.in_degree(1, weight='weight'), 1.3)
+ assert_equal(sorted(G.in_degree(weight='other')), [(0, 2), (1, 2.2), (2, 2)])
+ assert_equal(dict(G.in_degree(weight='other')), {0: 2, 1: 2.2, 2: 2})
+ assert_equal(G.in_degree(1, weight='other'), 2.2)
+ assert_equal(list(G.in_degree(iter([1]), weight='other')), [(1, 2.2)])
def test_out_degree_weighted(self):
- G=self.K3
- G.add_edge(0,1,weight=0.3,other=1.2)
- assert_equal(sorted(G.out_degree(weight='weight')),[(0,1.3),(1,2),(2,2)])
- assert_equal(dict(G.out_degree(weight='weight')),{0:1.3,1:2,2:2})
- assert_equal(G.out_degree(0,weight='weight'), 1.3)
- assert_equal(sorted(G.out_degree(weight='other')),[(0,2.2),(1,2),(2,2)])
- assert_equal(dict(G.out_degree(weight='other')),{0:2.2,1:2,2:2})
- assert_equal(G.out_degree(0,weight='other'), 2.2)
+ G = self.K3
+ G.add_edge(0, 1, weight=0.3, other=1.2)
+ assert_equal(sorted(G.out_degree(weight='weight')), [(0, 1.3), (1, 2), (2, 2)])
+ assert_equal(dict(G.out_degree(weight='weight')), {0: 1.3, 1: 2, 2: 2})
+ assert_equal(G.out_degree(0, weight='weight'), 1.3)
+ assert_equal(sorted(G.out_degree(weight='other')), [(0, 2.2), (1, 2), (2, 2)])
+ assert_equal(dict(G.out_degree(weight='other')), {0: 2.2, 1: 2, 2: 2})
+ assert_equal(G.out_degree(0, weight='other'), 2.2)
assert_equal(list(G.out_degree(iter([0]), weight='other')), [(0, 2.2)])
def test_out_degree(self):
- G=self.K3
- assert_equal(sorted(G.out_degree()),[(0,2),(1,2),(2,2)])
- assert_equal(dict(G.out_degree()),{0:2,1:2,2:2})
+ G = self.K3
+ assert_equal(sorted(G.out_degree()), [(0, 2), (1, 2), (2, 2)])
+ assert_equal(dict(G.out_degree()), {0: 2, 1: 2, 2: 2})
assert_equal(G.out_degree(0), 2)
assert_equal(list(G.out_degree(iter([0]))), [(0, 2)])
def test_size(self):
- G=self.K3
- assert_equal(G.size(),6)
- assert_equal(G.number_of_edges(),6)
+ G = self.K3
+ assert_equal(G.size(), 6)
+ assert_equal(G.number_of_edges(), 6)
def test_to_undirected_reciprocal(self):
- G=self.Graph()
- G.add_edge(1,2)
- assert_true(G.to_undirected().has_edge(1,2))
- assert_false(G.to_undirected(reciprocal=True).has_edge(1,2))
- G.add_edge(2,1)
- assert_true(G.to_undirected(reciprocal=True).has_edge(1,2))
+ G = self.Graph()
+ G.add_edge(1, 2)
+ assert_true(G.to_undirected().has_edge(1, 2))
+ assert_false(G.to_undirected(reciprocal=True).has_edge(1, 2))
+ G.add_edge(2, 1)
+ assert_true(G.to_undirected(reciprocal=True).has_edge(1, 2))
def test_reverse_copy(self):
- G=networkx.DiGraph([(0,1),(1,2)])
- R=G.reverse()
- assert_equal(sorted(R.edges()),[(1,0),(2,1)])
- R.remove_edge(1,0)
- assert_equal(sorted(R.edges()),[(2,1)])
- assert_equal(sorted(G.edges()),[(0,1),(1,2)])
+ G = nx.DiGraph([(0, 1), (1, 2)])
+ R = G.reverse()
+ assert_equal(sorted(R.edges()), [(1, 0), (2, 1)])
+ R.remove_edge(1, 0)
+ assert_equal(sorted(R.edges()), [(2, 1)])
+ assert_equal(sorted(G.edges()), [(0, 1), (1, 2)])
def test_reverse_nocopy(self):
- G=networkx.DiGraph([(0,1),(1,2)])
- R=G.reverse(copy=False)
- assert_equal(sorted(R.edges()),[(1,0),(2,1)])
- R.remove_edge(1,0)
- assert_equal(sorted(R.edges()),[(2,1)])
- assert_equal(sorted(G.edges()),[(2,1)])
-
-
-class BaseAttrDiGraphTester(BaseDiGraphTester,BaseAttrGraphTester):
+ G = nx.DiGraph([(0, 1), (1, 2)])
+ R = G.reverse(copy=False)
+ assert_equal(sorted(R.edges()), [(1, 0), (2, 1)])
+ R.remove_edge(1, 0)
+ assert_equal(sorted(R.edges()), [(2, 1)])
+ assert_equal(sorted(G.edges()), [(2, 1)])
+
+ def test_reverse_hashable(self):
+ class Foo(object):
+ pass
+ x = Foo()
+ y = Foo()
+ G = nx.DiGraph()
+ G.add_edge(x, y)
+ assert_nodes_equal(G.nodes(), G.reverse().nodes())
+ assert_equal([(y, x)], list(G.reverse().edges()))
+
+
+class BaseAttrDiGraphTester(BaseDiGraphTester, BaseAttrGraphTester):
pass
-class TestDiGraph(BaseAttrDiGraphTester,TestGraph):
+class TestDiGraph(BaseAttrDiGraphTester, TestGraph):
"""Tests specific to dict-of-dict-of-dict digraph data structure"""
+
def setUp(self):
- self.Graph=networkx.DiGraph
+ self.Graph = nx.DiGraph
# build dict-of-dict-of-dict K3
- ed1,ed2,ed3,ed4,ed5,ed6 = ({},{},{},{},{},{})
- self.k3adj = {0: {1: ed1, 2: ed2}, 1: {0: ed3, 2: ed4}, 2: {0: ed5, 1:ed6}}
+ ed1, ed2, ed3, ed4, ed5, ed6 = ({}, {}, {}, {}, {}, {})
+ self.k3adj = {0: {1: ed1, 2: ed2}, 1: {0: ed3, 2: ed4}, 2: {0: ed5, 1: ed6}}
self.k3edges = [(0, 1), (0, 2), (1, 2)]
self.k3nodes = [0, 1, 2]
self.K3 = self.Graph()
self.K3._adj = self.K3._succ = self.k3adj
- self.K3._pred = {0: {1: ed3, 2: ed5}, 1: {0: ed1, 2: ed6}, 2: {0: ed2, 1:ed4}}
+ self.K3._pred = {0: {1: ed3, 2: ed5}, 1: {0: ed1, 2: ed6}, 2: {0: ed2, 1: ed4}}
self.K3._node = {}
self.K3._node[0] = {}
self.K3._node[1] = {}
self.K3._node[2] = {}
- ed1,ed2 = ({},{})
+ ed1, ed2 = ({}, {})
self.P3 = self.Graph()
self.P3._adj = {0: {1: ed1}, 1: {2: ed2}, 2: {}}
self.P3._succ = self.P3._adj
@@ -178,48 +197,48 @@ class TestDiGraph(BaseAttrDiGraphTester,TestGraph):
self.P3._node[2] = {}
def test_data_input(self):
- G=self.Graph(data={1:[2],2:[1]}, name="test")
- assert_equal(G.name,"test")
- assert_equal(sorted(G.adj.items()),[(1, {2: {}}), (2, {1: {}})])
- assert_equal(sorted(G.succ.items()),[(1, {2: {}}), (2, {1: {}})])
- assert_equal(sorted(G.pred.items()),[(1, {2: {}}), (2, {1: {}})])
+ G = self.Graph(data={1: [2], 2: [1]}, name="test")
+ assert_equal(G.name, "test")
+ assert_equal(sorted(G.adj.items()), [(1, {2: {}}), (2, {1: {}})])
+ assert_equal(sorted(G.succ.items()), [(1, {2: {}}), (2, {1: {}})])
+ assert_equal(sorted(G.pred.items()), [(1, {2: {}}), (2, {1: {}})])
def test_add_edge(self):
- G=self.Graph()
- G.add_edge(0,1)
- assert_equal(G.adj,{0: {1: {}}, 1: {}})
- assert_equal(G.succ,{0: {1: {}}, 1: {}})
- assert_equal(G.pred,{0: {}, 1: {0:{}}})
- G=self.Graph()
- G.add_edge(*(0,1))
- assert_equal(G.adj,{0: {1: {}}, 1: {}})
- assert_equal(G.succ,{0: {1: {}}, 1: {}})
- assert_equal(G.pred,{0: {}, 1: {0:{}}})
+ G = self.Graph()
+ G.add_edge(0, 1)
+ assert_equal(G.adj, {0: {1: {}}, 1: {}})
+ assert_equal(G.succ, {0: {1: {}}, 1: {}})
+ assert_equal(G.pred, {0: {}, 1: {0: {}}})
+ G = self.Graph()
+ G.add_edge(*(0, 1))
+ assert_equal(G.adj, {0: {1: {}}, 1: {}})
+ assert_equal(G.succ, {0: {1: {}}, 1: {}})
+ assert_equal(G.pred, {0: {}, 1: {0: {}}})
def test_add_edges_from(self):
- G=self.Graph()
- G.add_edges_from([(0,1),(0,2,{'data':3})],data=2)
- assert_equal(G.adj,{0: {1: {'data':2}, 2: {'data':3}}, 1: {}, 2: {}})
- assert_equal(G.succ,{0: {1: {'data':2}, 2: {'data':3}}, 1: {}, 2: {}})
- assert_equal(G.pred,{0: {}, 1: {0: {'data':2}}, 2: {0: {'data':3}}})
+ G = self.Graph()
+ G.add_edges_from([(0, 1), (0, 2, {'data': 3})], data=2)
+ assert_equal(G.adj, {0: {1: {'data': 2}, 2: {'data': 3}}, 1: {}, 2: {}})
+ assert_equal(G.succ, {0: {1: {'data': 2}, 2: {'data': 3}}, 1: {}, 2: {}})
+ assert_equal(G.pred, {0: {}, 1: {0: {'data': 2}}, 2: {0: {'data': 3}}})
- assert_raises(networkx.NetworkXError, G.add_edges_from,[(0,)]) # too few in tuple
- assert_raises(networkx.NetworkXError, G.add_edges_from,[(0,1,2,3)]) # too many in tuple
- assert_raises(TypeError, G.add_edges_from,[0]) # not a tuple
+ assert_raises(nx.NetworkXError, G.add_edges_from, [(0,)]) # too few in tuple
+ assert_raises(nx.NetworkXError, G.add_edges_from, [(0, 1, 2, 3)]) # too many in tuple
+ assert_raises(TypeError, G.add_edges_from, [0]) # not a tuple
def test_remove_edge(self):
- G=self.K3
- G.remove_edge(0,1)
- assert_equal(G.succ,{0:{2:{}},1:{0:{},2:{}},2:{0:{},1:{}}})
- assert_equal(G.pred,{0:{1:{}, 2:{}}, 1:{2:{}}, 2:{0:{},1:{}}})
- assert_raises((KeyError,networkx.NetworkXError), G.remove_edge,-1,0)
+ G = self.K3
+ G.remove_edge(0, 1)
+ assert_equal(G.succ, {0: {2: {}}, 1: {0: {}, 2: {}}, 2: {0: {}, 1: {}}})
+ assert_equal(G.pred, {0: {1: {}, 2: {}}, 1: {2: {}}, 2: {0: {}, 1: {}}})
+ assert_raises((KeyError, nx.NetworkXError), G.remove_edge, -1, 0)
def test_remove_edges_from(self):
- G=self.K3
- G.remove_edges_from([(0,1)])
- assert_equal(G.succ,{0:{2:{}},1:{0:{},2:{}},2:{0:{},1:{}}})
- assert_equal(G.pred,{0:{1:{}, 2:{}}, 1:{2:{}}, 2:{0:{},1: {}}})
- G.remove_edges_from([(0,0)]) # silent fail
+ G = self.K3
+ G.remove_edges_from([(0, 1)])
+ assert_equal(G.succ, {0: {2: {}}, 1: {0: {}, 2: {}}, 2: {0: {}, 1: {}}})
+ assert_equal(G.pred, {0: {1: {}, 2: {}}, 1: {2: {}}, 2: {0: {}, 1: {}}})
+ G.remove_edges_from([(0, 0)]) # silent fail
class TestEdgeSubgraph(TestGraphEdgeSubgraph):
@@ -227,7 +246,7 @@ class TestEdgeSubgraph(TestGraphEdgeSubgraph):
def setup(self):
# Create a doubly-linked path graph on five nodes.
- G = networkx.DiGraph(networkx.path_graph(5))
+ G = nx.DiGraph(nx.path_graph(5))
# Add some node, edge, and graph attributes.
for i in range(5):
G.node[i]['name'] = 'node{}'.format(i)
@@ -244,7 +263,7 @@ class TestEdgeSubgraph(TestGraphEdgeSubgraph):
For more information, see GitHub issue #2370.
"""
- G = networkx.DiGraph()
+ G = nx.DiGraph()
G.add_edge(0, 1)
H = G.edge_subgraph([(0, 1)])
assert_equal(list(H.predecessors(0)), [])
diff --git a/networkx/tests/test_relabel.py b/networkx/tests/test_relabel.py
index 31d25fe90..f99ec7354 100644
--- a/networkx/tests/test_relabel.py
+++ b/networkx/tests/test_relabel.py
@@ -3,153 +3,161 @@ from nose.tools import *
from networkx import *
from networkx.convert import *
from networkx.algorithms.operators import *
-from networkx.generators.classic import barbell_graph,cycle_graph
+from networkx.generators.classic import barbell_graph, cycle_graph
from networkx.testing import *
+
class TestRelabel():
def test_convert_node_labels_to_integers(self):
# test that empty graph converts fine for all options
- G=empty_graph()
- H=convert_node_labels_to_integers(G,100)
+ G = empty_graph()
+ H = convert_node_labels_to_integers(G, 100)
assert_equal(H.name, '(empty_graph(0))_with_int_labels')
assert_equal(list(H.nodes()), [])
assert_equal(list(H.edges()), [])
- for opt in ["default", "sorted", "increasing degree",
- "decreasing degree"]:
- G=empty_graph()
- H=convert_node_labels_to_integers(G,100, ordering=opt)
+ for opt in ["default", "sorted", "increasing degree", "decreasing degree"]:
+ G = empty_graph()
+ H = convert_node_labels_to_integers(G, 100, ordering=opt)
assert_equal(H.name, '(empty_graph(0))_with_int_labels')
assert_equal(list(H.nodes()), [])
assert_equal(list(H.edges()), [])
- G=empty_graph()
- G.add_edges_from([('A','B'),('A','C'),('B','C'),('C','D')])
- G.name="paw"
- H=convert_node_labels_to_integers(G)
+ G = empty_graph()
+ G.add_edges_from([('A', 'B'), ('A', 'C'), ('B', 'C'), ('C', 'D')])
+ G.name = "paw"
+ H = convert_node_labels_to_integers(G)
degH = (d for n, d in H.degree())
degG = (d for n, d in G.degree())
assert_equal(sorted(degH), sorted(degG))
- H=convert_node_labels_to_integers(G,1000)
+ H = convert_node_labels_to_integers(G, 1000)
degH = (d for n, d in H.degree())
degG = (d for n, d in G.degree())
assert_equal(sorted(degH), sorted(degG))
assert_nodes_equal(H.nodes(), [1000, 1001, 1002, 1003])
- H=convert_node_labels_to_integers(G,ordering="increasing degree")
+ H = convert_node_labels_to_integers(G, ordering="increasing degree")
degH = (d for n, d in H.degree())
degG = (d for n, d in G.degree())
assert_equal(sorted(degH), sorted(degG))
- assert_equal(degree(H,0), 1)
- assert_equal(degree(H,1), 2)
- assert_equal(degree(H,2), 2)
- assert_equal(degree(H,3), 3)
+ assert_equal(degree(H, 0), 1)
+ assert_equal(degree(H, 1), 2)
+ assert_equal(degree(H, 2), 2)
+ assert_equal(degree(H, 3), 3)
- H=convert_node_labels_to_integers(G,ordering="decreasing degree")
+ H = convert_node_labels_to_integers(G, ordering="decreasing degree")
degH = (d for n, d in H.degree())
degG = (d for n, d in G.degree())
assert_equal(sorted(degH), sorted(degG))
- assert_equal(degree(H,0), 3)
- assert_equal(degree(H,1), 2)
- assert_equal(degree(H,2), 2)
- assert_equal(degree(H,3), 1)
+ assert_equal(degree(H, 0), 3)
+ assert_equal(degree(H, 1), 2)
+ assert_equal(degree(H, 2), 2)
+ assert_equal(degree(H, 3), 1)
- H=convert_node_labels_to_integers(G,ordering="increasing degree",
- label_attribute='label')
+ H = convert_node_labels_to_integers(G, ordering="increasing degree",
+ label_attribute='label')
degH = (d for n, d in H.degree())
degG = (d for n, d in G.degree())
assert_equal(sorted(degH), sorted(degG))
- assert_equal(degree(H,0), 1)
- assert_equal(degree(H,1), 2)
- assert_equal(degree(H,2), 2)
- assert_equal(degree(H,3), 3)
+ assert_equal(degree(H, 0), 1)
+ assert_equal(degree(H, 1), 2)
+ assert_equal(degree(H, 2), 2)
+ assert_equal(degree(H, 3), 3)
# check mapping
- assert_equal(H.node[3]['label'],'C')
- assert_equal(H.node[0]['label'],'D')
- assert_true(H.node[1]['label']=='A' or H.node[2]['label']=='A')
- assert_true(H.node[1]['label']=='B' or H.node[2]['label']=='B')
+ assert_equal(H.node[3]['label'], 'C')
+ assert_equal(H.node[0]['label'], 'D')
+ assert_true(H.node[1]['label'] == 'A' or H.node[2]['label'] == 'A')
+ assert_true(H.node[1]['label'] == 'B' or H.node[2]['label'] == 'B')
def test_convert_to_integers2(self):
- G=empty_graph()
- G.add_edges_from([('C','D'),('A','B'),('A','C'),('B','C')])
- G.name="paw"
- H=convert_node_labels_to_integers(G,ordering="sorted")
+ G = empty_graph()
+ G.add_edges_from([('C', 'D'), ('A', 'B'), ('A', 'C'), ('B', 'C')])
+ G.name = "paw"
+ H = convert_node_labels_to_integers(G, ordering="sorted")
degH = (d for n, d in H.degree())
degG = (d for n, d in G.degree())
assert_equal(sorted(degH), sorted(degG))
- H=convert_node_labels_to_integers(G,ordering="sorted",
- label_attribute='label')
- assert_equal(H.node[0]['label'],'A')
- assert_equal(H.node[1]['label'],'B')
- assert_equal(H.node[2]['label'],'C')
- assert_equal(H.node[3]['label'],'D')
+ H = convert_node_labels_to_integers(G, ordering="sorted",
+ label_attribute='label')
+ assert_equal(H.node[0]['label'], 'A')
+ assert_equal(H.node[1]['label'], 'B')
+ assert_equal(H.node[2]['label'], 'C')
+ assert_equal(H.node[3]['label'], 'D')
@raises(nx.NetworkXError)
def test_convert_to_integers_raise(self):
G = nx.Graph()
- H=convert_node_labels_to_integers(G,ordering="increasing age")
-
+ H = convert_node_labels_to_integers(G, ordering="increasing age")
def test_relabel_nodes_copy(self):
- G=empty_graph()
- G.add_edges_from([('A','B'),('A','C'),('B','C'),('C','D')])
- mapping={'A':'aardvark','B':'bear','C':'cat','D':'dog'}
- H=relabel_nodes(G,mapping)
+ G = empty_graph()
+ G.add_edges_from([('A', 'B'), ('A', 'C'), ('B', 'C'), ('C', 'D')])
+ mapping = {'A': 'aardvark', 'B': 'bear', 'C': 'cat', 'D': 'dog'}
+ H = relabel_nodes(G, mapping)
assert_nodes_equal(H.nodes(), ['aardvark', 'bear', 'cat', 'dog'])
def test_relabel_nodes_function(self):
- G=empty_graph()
- G.add_edges_from([('A','B'),('A','C'),('B','C'),('C','D')])
+ G = empty_graph()
+ G.add_edges_from([('A', 'B'), ('A', 'C'), ('B', 'C'), ('C', 'D')])
# function mapping no longer encouraged but works
+
def mapping(n):
return ord(n)
- H=relabel_nodes(G,mapping)
+ H = relabel_nodes(G, mapping)
assert_nodes_equal(H.nodes(), [65, 66, 67, 68])
def test_relabel_nodes_graph(self):
- G=Graph([('A','B'),('A','C'),('B','C'),('C','D')])
- mapping={'A':'aardvark','B':'bear','C':'cat','D':'dog'}
- H=relabel_nodes(G,mapping)
+ G = Graph([('A', 'B'), ('A', 'C'), ('B', 'C'), ('C', 'D')])
+ mapping = {'A': 'aardvark', 'B': 'bear', 'C': 'cat', 'D': 'dog'}
+ H = relabel_nodes(G, mapping)
assert_nodes_equal(H.nodes(), ['aardvark', 'bear', 'cat', 'dog'])
+ def test_relabel_nodes_orderedgraph(self):
+ G = OrderedGraph()
+ G.add_nodes_from([1, 2, 3])
+ G.add_edges_from([(1, 3), (2, 3)])
+ mapping = {1: 'a', 2: 'b', 3: 'c'}
+ H = relabel_nodes(G, mapping)
+ assert list(H.nodes) == ['a', 'b', 'c']
+
def test_relabel_nodes_digraph(self):
- G=DiGraph([('A','B'),('A','C'),('B','C'),('C','D')])
- mapping={'A':'aardvark','B':'bear','C':'cat','D':'dog'}
- H=relabel_nodes(G,mapping,copy=False)
+ G = DiGraph([('A', 'B'), ('A', 'C'), ('B', 'C'), ('C', 'D')])
+ mapping = {'A': 'aardvark', 'B': 'bear', 'C': 'cat', 'D': 'dog'}
+ H = relabel_nodes(G, mapping, copy=False)
assert_nodes_equal(H.nodes(), ['aardvark', 'bear', 'cat', 'dog'])
def test_relabel_nodes_multigraph(self):
- G=MultiGraph([('a','b'),('a','b')])
- mapping={'a':'aardvark','b':'bear'}
- G=relabel_nodes(G,mapping,copy=False)
- assert_nodes_equal(G.nodes(),['aardvark', 'bear'])
- assert_edges_equal(G.edges(),[('aardvark', 'bear'), ('aardvark', 'bear')])
+ G = MultiGraph([('a', 'b'), ('a', 'b')])
+ mapping = {'a': 'aardvark', 'b': 'bear'}
+ G = relabel_nodes(G, mapping, copy=False)
+ assert_nodes_equal(G.nodes(), ['aardvark', 'bear'])
+ assert_edges_equal(G.edges(), [('aardvark', 'bear'), ('aardvark', 'bear')])
def test_relabel_nodes_multidigraph(self):
- G=MultiDiGraph([('a','b'),('a','b')])
- mapping={'a':'aardvark','b':'bear'}
- G=relabel_nodes(G,mapping,copy=False)
- assert_nodes_equal(G.nodes(),['aardvark', 'bear'])
- assert_edges_equal(G.edges(),[('aardvark', 'bear'), ('aardvark', 'bear')])
+ G = MultiDiGraph([('a', 'b'), ('a', 'b')])
+ mapping = {'a': 'aardvark', 'b': 'bear'}
+ G = relabel_nodes(G, mapping, copy=False)
+ assert_nodes_equal(G.nodes(), ['aardvark', 'bear'])
+ assert_edges_equal(G.edges(), [('aardvark', 'bear'), ('aardvark', 'bear')])
def test_relabel_isolated_nodes_to_same(self):
- G=Graph()
+ G = Graph()
G.add_nodes_from(range(4))
- mapping={1:1}
- H=relabel_nodes(G, mapping, copy=False)
+ mapping = {1: 1}
+ H = relabel_nodes(G, mapping, copy=False)
assert_nodes_equal(H.nodes(), list(range(4)))
@raises(KeyError)
def test_relabel_nodes_missing(self):
- G=Graph([('A','B'),('A','C'),('B','C'),('C','D')])
- mapping={0:'aardvark'}
- G=relabel_nodes(G,mapping,copy=False)
+ G = Graph([('A', 'B'), ('A', 'C'), ('B', 'C'), ('C', 'D')])
+ mapping = {0: 'aardvark'}
+ G = relabel_nodes(G, mapping, copy=False)
def test_relabel_copy_name(self):
- G=Graph()
+ G = Graph()
H = relabel_nodes(G, {}, copy=True)
assert_equal(H.graph, G.graph)
H = relabel_nodes(G, {}, copy=False)
@@ -161,22 +169,21 @@ class TestRelabel():
assert_equal(H.graph, G.graph)
def test_relabel_toposort(self):
- K5=nx.complete_graph(4)
- G=nx.complete_graph(4)
- G=nx.relabel_nodes(G,dict( [(i,i+1) for i in range(4)]),copy=False)
- nx.is_isomorphic(K5,G)
- G=nx.complete_graph(4)
- G=nx.relabel_nodes(G,dict( [(i,i-1) for i in range(4)]),copy=False)
- nx.is_isomorphic(K5,G)
-
+ K5 = nx.complete_graph(4)
+ G = nx.complete_graph(4)
+ G = nx.relabel_nodes(G, dict([(i, i + 1) for i in range(4)]), copy=False)
+ nx.is_isomorphic(K5, G)
+ G = nx.complete_graph(4)
+ G = nx.relabel_nodes(G, dict([(i, i - 1) for i in range(4)]), copy=False)
+ nx.is_isomorphic(K5, G)
def test_relabel_selfloop(self):
G = nx.DiGraph([(1, 1), (1, 2), (2, 3)])
G = nx.relabel_nodes(G, {1: 'One', 2: 'Two', 3: 'Three'}, copy=False)
- assert_nodes_equal(G.nodes(),['One','Three','Two'])
+ assert_nodes_equal(G.nodes(), ['One', 'Three', 'Two'])
G = nx.MultiDiGraph([(1, 1), (1, 2), (2, 3)])
G = nx.relabel_nodes(G, {1: 'One', 2: 'Two', 3: 'Three'}, copy=False)
- assert_nodes_equal(G.nodes(),['One','Three','Two'])
+ assert_nodes_equal(G.nodes(), ['One', 'Three', 'Two'])
G = nx.MultiDiGraph([(1, 1)])
G = nx.relabel_nodes(G, {1: 0}, copy=False)
assert_nodes_equal(G.nodes(), [0])
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 2
} | help | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y libgdal-dev graphviz"
],
"python": "3.6",
"reqs_path": [
"requirements/default.txt",
"requirements/test.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
decorator==5.1.1
importlib-metadata==4.8.3
iniconfig==1.1.1
-e git+https://github.com/networkx/networkx.git@22a6ebaf0c235a825195e48558f39b65c26d5a1c#egg=networkx
nose==1.3.7
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: networkx
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- decorator==5.1.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- nose==1.3.7
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/networkx
| [
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_reverse_hashable",
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_nodes_orderedgraph"
]
| [
"networkx/classes/tests/test_digraph.py::TestGraph::test_contains",
"networkx/classes/tests/test_digraph.py::TestGraph::test_order",
"networkx/classes/tests/test_digraph.py::TestGraph::test_nodes",
"networkx/classes/tests/test_digraph.py::TestGraph::test_has_node",
"networkx/classes/tests/test_digraph.py::TestGraph::test_has_edge",
"networkx/classes/tests/test_digraph.py::TestGraph::test_neighbors",
"networkx/classes/tests/test_digraph.py::TestGraph::test_edges",
"networkx/classes/tests/test_digraph.py::TestGraph::test_degree",
"networkx/classes/tests/test_digraph.py::TestGraph::test_size",
"networkx/classes/tests/test_digraph.py::TestGraph::test_nbunch_iter",
"networkx/classes/tests/test_digraph.py::TestGraph::test_nbunch_iter_node_format_raise",
"networkx/classes/tests/test_digraph.py::TestGraph::test_selfloop_degree",
"networkx/classes/tests/test_digraph.py::TestGraph::test_selfloops",
"networkx/classes/tests/test_digraph.py::TestGraph::test_weighted_degree",
"networkx/classes/tests/test_digraph.py::TestGraph::test_name",
"networkx/classes/tests/test_digraph.py::TestGraph::test_copy",
"networkx/classes/tests/test_digraph.py::TestGraph::test_class_copy",
"networkx/classes/tests/test_digraph.py::TestGraph::test_attr_reference",
"networkx/classes/tests/test_digraph.py::TestGraph::test_fresh_copy",
"networkx/classes/tests/test_digraph.py::TestGraph::test_graph_attr",
"networkx/classes/tests/test_digraph.py::TestGraph::test_node_attr",
"networkx/classes/tests/test_digraph.py::TestGraph::test_node_attr2",
"networkx/classes/tests/test_digraph.py::TestGraph::test_edge_lookup",
"networkx/classes/tests/test_digraph.py::TestGraph::test_edge_attr",
"networkx/classes/tests/test_digraph.py::TestGraph::test_edge_attr2",
"networkx/classes/tests/test_digraph.py::TestGraph::test_edge_attr3",
"networkx/classes/tests/test_digraph.py::TestGraph::test_edge_attr4",
"networkx/classes/tests/test_digraph.py::TestGraph::test_to_undirected",
"networkx/classes/tests/test_digraph.py::TestGraph::test_to_directed",
"networkx/classes/tests/test_digraph.py::TestGraph::test_subgraph",
"networkx/classes/tests/test_digraph.py::TestGraph::test_selfloops_attr",
"networkx/classes/tests/test_digraph.py::TestGraph::test_data_input",
"networkx/classes/tests/test_digraph.py::TestGraph::test_adjacency",
"networkx/classes/tests/test_digraph.py::TestGraph::test_getitem",
"networkx/classes/tests/test_digraph.py::TestGraph::test_add_node",
"networkx/classes/tests/test_digraph.py::TestGraph::test_add_nodes_from",
"networkx/classes/tests/test_digraph.py::TestGraph::test_remove_node",
"networkx/classes/tests/test_digraph.py::TestGraph::test_remove_nodes_from",
"networkx/classes/tests/test_digraph.py::TestGraph::test_add_edge",
"networkx/classes/tests/test_digraph.py::TestGraph::test_add_edges_from",
"networkx/classes/tests/test_digraph.py::TestGraph::test_remove_edge",
"networkx/classes/tests/test_digraph.py::TestGraph::test_remove_edges_from",
"networkx/classes/tests/test_digraph.py::TestGraph::test_clear",
"networkx/classes/tests/test_digraph.py::TestGraph::test_edges_data",
"networkx/classes/tests/test_digraph.py::TestGraph::test_get_edge_data",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_contains",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_order",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_nodes",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_has_node",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_has_edge",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_neighbors",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_nbunch_iter",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_nbunch_iter_node_format_raise",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_selfloop_degree",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_selfloops",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_weighted_degree",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_name",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_copy",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_class_copy",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_attr_reference",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_fresh_copy",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_graph_attr",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_node_attr",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_node_attr2",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_edge_lookup",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_edge_attr",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_edge_attr2",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_edge_attr3",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_edge_attr4",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_to_undirected",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_to_directed",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_subgraph",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_selfloops_attr",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_adjacency",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_getitem",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_add_node",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_add_nodes_from",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_remove_node",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_remove_nodes_from",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_clear",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_get_edge_data",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_has_successor",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_successors",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_has_predecessor",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_predecessors",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_edges",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_edges_data",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_out_edges",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_out_edges_dir",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_in_edges_dir",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_degree",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_in_degree",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_in_degree_weighted",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_out_degree_weighted",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_out_degree",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_size",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_to_undirected_reciprocal",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_data_input",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_add_edge",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_add_edges_from",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_remove_edge",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_remove_edges_from",
"networkx/tests/test_relabel.py::test"
]
| [
"networkx/classes/tests/test_digraph.py::TestGraphEdgeSubgraph::test_correct_nodes",
"networkx/classes/tests/test_digraph.py::TestGraphEdgeSubgraph::test_correct_edges",
"networkx/classes/tests/test_digraph.py::TestGraphEdgeSubgraph::test_add_node",
"networkx/classes/tests/test_digraph.py::TestGraphEdgeSubgraph::test_remove_node",
"networkx/classes/tests/test_digraph.py::TestGraphEdgeSubgraph::test_node_attr_dict",
"networkx/classes/tests/test_digraph.py::TestGraphEdgeSubgraph::test_edge_attr_dict",
"networkx/classes/tests/test_digraph.py::TestGraphEdgeSubgraph::test_graph_attr_dict",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_out_edges_data",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_in_edges_data",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_reverse_copy",
"networkx/classes/tests/test_digraph.py::TestDiGraph::test_reverse_nocopy",
"networkx/classes/tests/test_digraph.py::TestEdgeSubgraph::test_correct_nodes",
"networkx/classes/tests/test_digraph.py::TestEdgeSubgraph::test_correct_edges",
"networkx/classes/tests/test_digraph.py::TestEdgeSubgraph::test_add_node",
"networkx/classes/tests/test_digraph.py::TestEdgeSubgraph::test_remove_node",
"networkx/classes/tests/test_digraph.py::TestEdgeSubgraph::test_node_attr_dict",
"networkx/classes/tests/test_digraph.py::TestEdgeSubgraph::test_edge_attr_dict",
"networkx/classes/tests/test_digraph.py::TestEdgeSubgraph::test_graph_attr_dict",
"networkx/classes/tests/test_digraph.py::TestEdgeSubgraph::test_pred_succ",
"networkx/tests/test_relabel.py::TestRelabel::test_convert_node_labels_to_integers",
"networkx/tests/test_relabel.py::TestRelabel::test_convert_to_integers2",
"networkx/tests/test_relabel.py::TestRelabel::test_convert_to_integers_raise",
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_nodes_copy",
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_nodes_function",
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_nodes_graph",
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_nodes_digraph",
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_nodes_multigraph",
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_nodes_multidigraph",
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_isolated_nodes_to_same",
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_nodes_missing",
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_copy_name",
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_toposort",
"networkx/tests/test_relabel.py::TestRelabel::test_relabel_selfloop"
]
| []
| BSD 3-Clause | 1,404 | [
"networkx/relabel.py",
"networkx/classes/digraph.py"
]
| [
"networkx/relabel.py",
"networkx/classes/digraph.py"
]
|
mapbox__mapbox-sdk-py-191 | ac54c0dd453c61020f1f42eafdc3af34a7664718 | 2017-06-27 14:58:23 | d503098e549834471e0857adf5163085af6b4355 | diff --git a/mapbox/errors.py b/mapbox/errors.py
index 005796a..95d982f 100644
--- a/mapbox/errors.py
+++ b/mapbox/errors.py
@@ -46,14 +46,22 @@ class InvalidParameterError(ValidationError):
class InvalidFileError(ValidationError):
pass
+
class InvalidResourceTypeError(ValidationError):
pass
-
+
+
class InvalidPeriodError(ValidationError):
pass
+
class InvalidUsernameError(ValidationError):
pass
+
class InvalidId(ValidationError):
pass
+
+
+class MapboxDeprecationWarning(UserWarning):
+ pass
diff --git a/mapbox/services/static.py b/mapbox/services/static.py
index fe5a9fb..235a238 100644
--- a/mapbox/services/static.py
+++ b/mapbox/services/static.py
@@ -40,7 +40,8 @@ class Static(Service):
return val
def image(self, mapid, lon=None, lat=None, z=None, features=None,
- width=600, height=600, image_format='png256', sort_keys=False):
+ width=600, height=600, image_format='png256', sort_keys=False,
+ retina=False):
if lon is not None and lat is not None and z is not None:
auto = False
@@ -58,8 +59,7 @@ class Static(Service):
lat=str(lat),
z=str(z),
width=str(width),
- height=str(height),
- fmt=image_format)
+ height=str(height))
if features:
collection = normalize_geojson_featurecollection(features)
@@ -69,19 +69,25 @@ class Static(Service):
self._validate_overlay(values['overlay'])
if auto:
- pth = '/{mapid}/geojson({overlay})/auto/{width}x{height}.{fmt}'
+ pth = '/{mapid}/geojson({overlay})/auto/{width}x{height}'
else:
pth = ('/{mapid}/geojson({overlay})/{lon},{lat},{z}'
- '/{width}x{height}.{fmt}')
+ '/{width}x{height}')
else:
if auto:
raise errors.InvalidCoordError(
"Must provide features if lat, lon, z are None")
# No overlay
- pth = '/{mapid}/{lon},{lat},{z}/{width}x{height}.{fmt}'
+ pth = '/{mapid}/{lon},{lat},{z}/{width}x{height}'
uri = URITemplate(self.baseuri + pth).expand(**values)
+
+ # @2x.format handled separately to avoid HTML escaping the ampersand
+ twox = '@2x' if retina else ''
+ full_fmt = '{0}.{1}'.format(twox, image_format)
+ uri += full_fmt
+
res = self.session.get(uri)
self.handle_http_error(res)
return res
diff --git a/mapbox/services/static_style.py b/mapbox/services/static_style.py
index 50eb0b5..b0cef3a 100644
--- a/mapbox/services/static_style.py
+++ b/mapbox/services/static_style.py
@@ -1,4 +1,5 @@
import json
+import warnings
from uritemplate import URITemplate
@@ -61,13 +62,13 @@ class StaticStyle(Service):
raise errors.ImageSizeError('tile_size must be 256 or 512 pixels')
pth = '/{username}/{style_id}/tiles/{tile_size}/{z}/{x}/{y}'
- if retina:
- pth += '@2x'
values = dict(username=username, style_id=style_id,
tile_size=tile_size, z=z, x=x, y=y)
uri = URITemplate(self.baseuri + pth).expand(**values)
+ if retina:
+ uri += '@2x'
res = self.session.get(uri)
self.handle_http_error(res)
return res
@@ -80,8 +81,8 @@ class StaticStyle(Service):
return res
def image(self, username, style_id, lon=None, lat=None, zoom=None, features=None,
- pitch=0, bearing=0, width=600, height=600, twox=False, sort_keys=False,
- attribution=None, logo=None, before_layer=None):
+ pitch=0, bearing=0, width=600, height=600, retina=None, sort_keys=False,
+ attribution=None, logo=None, before_layer=None, twox=None):
params = {}
if attribution is not None:
@@ -91,6 +92,16 @@ class StaticStyle(Service):
if before_layer is not None:
params['before_layer'] = before_layer
+ # twox as a deprecated alias for retina
+ if retina is None:
+ if twox is not None:
+ warnings.warn('twox is a deprecated alias for retina',
+ errors.MapboxDeprecationWarning)
+ retina = twox
+ else:
+ if twox is not None:
+ raise errors.ValidationError('Conflicting args; Remove twox and use retina')
+
if lon is not None and lat is not None and zoom is not None:
auto = False
lat = validate_lat(lat)
@@ -112,7 +123,6 @@ class StaticStyle(Service):
lat=str(lat),
zoom=str(zoom),
auto=auto,
- twox='@2x' if twox else '',
width=str(width),
height=str(height))
@@ -126,9 +136,9 @@ class StaticStyle(Service):
pth = '/{username}/{style_id}/static/geojson({overlay})/'
if auto:
# TODO what about {bearing} and {pitch}
- pth += 'auto/{width}x{height}{twox}'
+ pth += 'auto/{width}x{height}'
else:
- pth += '{lon},{lat},{zoom},{bearing},{pitch}/{width}x{height}{twox}'
+ pth += '{lon},{lat},{zoom},{bearing},{pitch}/{width}x{height}'
else:
if auto:
raise errors.InvalidCoordError(
@@ -136,9 +146,14 @@ class StaticStyle(Service):
# No overlay
pth = ('/{username}/{style_id}/static/'
- '{lon},{lat},{zoom},{bearing},{pitch}/{width}x{height}{twox}')
+ '{lon},{lat},{zoom},{bearing},{pitch}/{width}x{height}')
uri = URITemplate(self.baseuri + pth).expand(**values)
+
+ # @2x handled separately to avoid HTML escaping the ampersand
+ if retina:
+ uri += '@2x'
+
res = self.session.get(uri, params=params)
self.handle_http_error(res)
return res
| Retina with Static Api?
Hello, it seems I can't choose the retina format for my static generated maps.
If I look in the code:
` if auto:
pth = '/{mapid}/geojson({overlay})/auto/{width}x{height}.{fmt}'`
But if I look at the global documentation, the url scheme is `{width}x{height}@2x.{fmt}`
So the is not the possibility to add retina images here? | mapbox/mapbox-sdk-py | diff --git a/tests/test_staticmaps.py b/tests/test_staticmaps.py
index 4e582d4..cb325bf 100644
--- a/tests/test_staticmaps.py
+++ b/tests/test_staticmaps.py
@@ -104,6 +104,7 @@ def test_staticmap_featurestoolarge(points):
with pytest.raises(mapbox.errors.ValidationError):
service._validate_overlay(json.dumps(points * 100))
+
def test_staticmap_imagesize():
service = mapbox.Static(access_token='pk.test')
with pytest.raises(mapbox.errors.ValidationError):
@@ -111,14 +112,32 @@ def test_staticmap_imagesize():
with pytest.raises(mapbox.errors.ValidationError):
service._validate_image_size(2000)
+
def test_latlon():
service = mapbox.Static(access_token='pk.test')
assert -179.0 == service._validate_lon(-179.0)
assert -85.0 == service._validate_lat(-85.0)
+
def test_lon_invalid():
service = mapbox.Static(access_token='pk.test')
with pytest.raises(mapbox.errors.ValidationError):
service._validate_lat(-86.0)
with pytest.raises(mapbox.errors.ValidationError):
service._validate_lon(-181.0)
+
+
[email protected]
+def test_staticmap_retina():
+
+ responses.add(
+ responses.GET,
+ 'https://api.mapbox.com/v4/mapbox.satellite/-61.7,12.1,12/[email protected]?access_token=pk.test',
+ match_querystring=True,
+ body='png123',
+ status=200,
+ content_type='image/png')
+
+ res = mapbox.Static(access_token='pk.test').image(
+ 'mapbox.satellite', -61.7, 12.1, 12, retina=True)
+ assert res.status_code == 200
diff --git a/tests/test_staticstyle.py b/tests/test_staticstyle.py
index 1d89344..6cf9941 100644
--- a/tests/test_staticstyle.py
+++ b/tests/test_staticstyle.py
@@ -184,7 +184,7 @@ def test_bad_tilesize():
@responses.activate
-def test_staticmap_tile():
+def test_staticmap_tile_retina():
responses.add(
responses.GET,
@@ -212,3 +212,44 @@ def test_staticmap_wmts():
res = mapbox.StaticStyle(access_token='pk.test').wmts('mapbox', 'streets-v9')
assert res.status_code == 200
+
+
[email protected]
+def test_staticmap_retina():
+
+ responses.add(
+ responses.GET,
+ 'https://api.mapbox.com/styles/v1/mapbox/streets-v9/static/-61.7,12.1,12.5,75,25/600x600@2x?access_token=pk.test',
+ match_querystring=True,
+ body='png123',
+ status=200,
+ content_type='image/png')
+
+ res = mapbox.StaticStyle(access_token='pk.test').image(
+ 'mapbox', 'streets-v9', -61.7, 12.1, 12.5, pitch=25,
+ bearing=75, retina=True)
+ assert res.status_code == 200
+
+
[email protected]
+def test_staticmap_twox_deprecated():
+ responses.add(
+ responses.GET,
+ 'https://api.mapbox.com/styles/v1/mapbox/streets-v9/static/-61.7,12.1,12.5,75,25/600x600@2x?access_token=pk.test',
+ match_querystring=True,
+ body='png123',
+ status=200,
+ content_type='image/png')
+
+ with pytest.warns(mapbox.errors.MapboxDeprecationWarning):
+ res = mapbox.StaticStyle(access_token='pk.test').image(
+ 'mapbox', 'streets-v9', -61.7, 12.1, 12.5, pitch=25,
+ bearing=75, twox=True)
+ assert res.status_code == 200
+
+
+def test_staticmap_twox_deprecated_error():
+ with pytest.raises(mapbox.errors.ValidationError):
+ mapbox.StaticStyle(access_token='pk.test').image(
+ 'mapbox', 'streets-v9', -61.7, 12.1, 12.5, pitch=25,
+ bearing=75, retina=True, twox=True)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 3
} | 0.13 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-cov",
"responses",
"tox"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
boto3==1.23.10
botocore==1.26.10
CacheControl==0.12.14
certifi==2021.5.30
charset-normalizer==2.0.12
click==8.0.4
click-plugins==1.1.1
cligj==0.7.2
coverage==6.2
coveralls==3.3.1
distlib==0.3.9
docopt==0.6.2
filelock==3.4.1
idna==3.10
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
iso3166==2.1.1
jmespath==0.10.0
-e git+https://github.com/mapbox/mapbox-sdk-py.git@ac54c0dd453c61020f1f42eafdc3af34a7664718#egg=mapbox
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
msgpack==1.0.5
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
platformdirs==2.4.0
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
polyline==1.4.0
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-cov==4.0.0
python-dateutil==2.9.0.post0
requests==2.27.1
responses==0.17.0
s3transfer==0.5.2
six==1.17.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tomli==1.2.3
tox==3.28.0
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
uritemplate==4.1.1
urllib3==1.26.20
virtualenv==20.17.1
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: mapbox-sdk-py
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- boto3==1.23.10
- botocore==1.26.10
- cachecontrol==0.12.14
- charset-normalizer==2.0.12
- click==8.0.4
- click-plugins==1.1.1
- cligj==0.7.2
- coverage==6.2
- coveralls==3.3.1
- distlib==0.3.9
- docopt==0.6.2
- filelock==3.4.1
- idna==3.10
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iso3166==2.1.1
- jmespath==0.10.0
- msgpack==1.0.5
- platformdirs==2.4.0
- polyline==1.4.0
- pytest-cov==4.0.0
- python-dateutil==2.9.0.post0
- requests==2.27.1
- responses==0.17.0
- s3transfer==0.5.2
- six==1.17.0
- tomli==1.2.3
- tox==3.28.0
- uritemplate==4.1.1
- urllib3==1.26.20
- virtualenv==20.17.1
prefix: /opt/conda/envs/mapbox-sdk-py
| [
"tests/test_staticmaps.py::test_staticmap_retina",
"tests/test_staticstyle.py::test_staticmap_retina",
"tests/test_staticstyle.py::test_staticmap_twox_deprecated",
"tests/test_staticstyle.py::test_staticmap_twox_deprecated_error"
]
| []
| [
"tests/test_staticmaps.py::test_staticmap_lonlatz_only",
"tests/test_staticmaps.py::test_staticmap_lonlatz_features",
"tests/test_staticmaps.py::test_staticmap_auto_features",
"tests/test_staticmaps.py::test_staticmap_auto_nofeatures",
"tests/test_staticmaps.py::test_staticmap_featurestoolarge",
"tests/test_staticmaps.py::test_staticmap_imagesize",
"tests/test_staticmaps.py::test_latlon",
"tests/test_staticmaps.py::test_lon_invalid",
"tests/test_staticstyle.py::test_staticmap_lonlatzpitchbearing",
"tests/test_staticstyle.py::test_staticmap_lonlatz_features",
"tests/test_staticstyle.py::test_staticmap_auto_features",
"tests/test_staticstyle.py::test_staticmap_auto_nofeatures",
"tests/test_staticstyle.py::test_staticmap_featurestoolarge",
"tests/test_staticstyle.py::test_staticmap_validate_bearing",
"tests/test_staticstyle.py::test_staticmap_validate_pitch",
"tests/test_staticstyle.py::test_staticmap_imagesize",
"tests/test_staticstyle.py::test_latlon",
"tests/test_staticstyle.py::test_lon_invalid",
"tests/test_staticstyle.py::test_staticmap_options",
"tests/test_staticstyle.py::test_staticmap_tile",
"tests/test_staticstyle.py::test_bad_tilesize",
"tests/test_staticstyle.py::test_staticmap_tile_retina",
"tests/test_staticstyle.py::test_staticmap_wmts"
]
| []
| MIT License | 1,405 | [
"mapbox/errors.py",
"mapbox/services/static_style.py",
"mapbox/services/static.py"
]
| [
"mapbox/errors.py",
"mapbox/services/static_style.py",
"mapbox/services/static.py"
]
|
|
mapbox__mapbox-sdk-py-192 | ac54c0dd453c61020f1f42eafdc3af34a7664718 | 2017-06-27 16:03:43 | d503098e549834471e0857adf5163085af6b4355 | diff --git a/README.rst b/README.rst
index fd8f00c..d5e4771 100644
--- a/README.rst
+++ b/README.rst
@@ -15,6 +15,11 @@ The Mapbox Python SDK is a low-level client API, not a Resource API such as the
Services
========
+- **Analytics** `examples <./docs/analytics.md>`__, `website <https://www.mapbox.com/api-documentation/#analytics>`__
+
+ - API usage for services by resource.
+ - available for premium and enterprise plans.
+
- **Directions** `examples <./docs/directions.md#directions>`__, `website <https://www.mapbox.com/api-documentation/?language=Python#directions>`__
- Profiles for driving, walking, and cycling
@@ -37,9 +42,15 @@ Services
- **Static Maps** `examples <./docs/static.md#static-maps>`__, `website <https://www.mapbox.com/api-documentation/pages/static_classic.html>`__
- - Generate standalone images from existing Mapbox mapids
+ - Generate standalone images from existing Mapbox *mapids* (tilesets)
- Render with GeoJSON overlays
+
+- **Static Styles** `examples <./docs/static.md#static-maps>`__, `website <https://www.mapbox.com/api-documentation/#static>`__
+ - Generate standalone images from existing Mapbox *styles*
+ - Render with GeoJSON overlays
+ - Adjust pitch and bearing, decimal zoom levels
+
- **Surface** `examples <./docs/surface.md#surface>`__, `website <https://www.mapbox.com/developers/api/surface/>`__
- Interpolates values along lines. Useful for elevation traces.
diff --git a/mapbox/services/geocoding.py b/mapbox/services/geocoding.py
index 9bccfd2..e22b91e 100644
--- a/mapbox/services/geocoding.py
+++ b/mapbox/services/geocoding.py
@@ -42,7 +42,7 @@ class Geocoder(Service):
return {'types': ",".join(types)}
def forward(self, address, types=None, lon=None, lat=None,
- country=None, bbox=None, limit=None):
+ country=None, bbox=None, limit=None, languages=None):
"""Returns a Requests response object that contains a GeoJSON
collection of places matching the given address.
@@ -64,6 +64,8 @@ class Geocoder(Service):
params.update(proximity='{0},{1}'.format(
round(float(lon), self.precision.get('proximity', 3)),
round(float(lat), self.precision.get('proximity', 3))))
+ if languages:
+ params.update(language=','.join(languages))
if bbox is not None:
params.update(bbox='{0},{1},{2},{3}'.format(*bbox))
if limit is not None:
@@ -124,7 +126,7 @@ class Geocoder(Service):
'address': "A street address with house number. Examples: 1600 Pennsylvania Ave NW, 1051 Market St, Oberbaumstrasse 7.",
'country': "Sovereign states and other political entities. Examples: United States, France, China, Russia.",
'place': "City, town, village or other municipality relevant to a country's address or postal system. Examples: Cleveland, Saratoga Springs, Berlin, Paris.",
- 'locality': "A smaller area within a place that possesses official status and boundaries. Examples: Oakleigh (Melbourne)",
+ 'locality': "A smaller area within a place that possesses official status and boundaries. Examples: Oakleigh (Melbourne)",
'neighborhood': "A smaller area within a place, often without formal boundaries. Examples: Montparnasse, Downtown, Haight-Ashbury.",
'poi': "Places of interest including commercial venues, major landmarks, parks, and other features. Examples: Subway Restaurant, Yosemite National Park, Statue of Liberty.",
'poi.landmark': "Places of interest that are particularly notable or long-lived like parks, places of worship and museums. A strict subset of the poi place type. Examples: Yosemite National Park, Statue of Liberty.",
| Isn't `forward()` supporting the language query?
In `https://github.com/mapbox/mapbox-sdk-py/blob/master/mapbox/services/geocoding.py` ' s current definition, language queries - according to `https://www.mapbox.com/api-documentation/?language=Python#geocoding` are not supported. Am I right, or did I something wrong? | mapbox/mapbox-sdk-py | diff --git a/tests/test_geocoder.py b/tests/test_geocoder.py
index 0e82af0..46efa78 100644
--- a/tests/test_geocoder.py
+++ b/tests/test_geocoder.py
@@ -350,3 +350,17 @@ def test_geocoder_forward_country():
response = mapbox.Geocoder(
access_token='pk.test').forward('1600 pennsylvania ave nw', country=['us'])
assert response.status_code == 200
+
+
[email protected]
+def test_geocoder_language():
+ responses.add(
+ responses.GET,
+ 'https://api.mapbox.com/geocoding/v5/mapbox.places/1600%20pennsylvania%20ave%20nw.json?language=en,de&access_token=pk.test',
+ match_querystring=True,
+ body='{"query": ["1600", "pennsylvania", "ave", "nw"]}', status=200,
+ content_type='application/json')
+
+ response = mapbox.Geocoder(access_token='pk.test').forward(
+ '1600 pennsylvania ave nw', languages=['en', 'de'])
+ assert response.status_code == 200
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 2
} | 0.13 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
boto3==1.23.10
botocore==1.26.10
CacheControl==0.12.14
certifi==2021.5.30
charset-normalizer==2.0.12
click==8.0.4
click-plugins==1.1.1
cligj==0.7.2
coverage==6.2
coveralls==3.3.1
distlib==0.3.9
docopt==0.6.2
filelock==3.4.1
idna==3.10
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
iso3166==2.1.1
jmespath==0.10.0
-e git+https://github.com/mapbox/mapbox-sdk-py.git@ac54c0dd453c61020f1f42eafdc3af34a7664718#egg=mapbox
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
msgpack==1.0.5
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
platformdirs==2.4.0
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
polyline==1.4.0
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-cov==4.0.0
python-dateutil==2.9.0.post0
requests==2.27.1
responses==0.17.0
s3transfer==0.5.2
six==1.17.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tomli==1.2.3
tox==3.28.0
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
uritemplate==4.1.1
urllib3==1.26.20
virtualenv==20.17.1
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: mapbox-sdk-py
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- boto3==1.23.10
- botocore==1.26.10
- cachecontrol==0.12.14
- charset-normalizer==2.0.12
- click==8.0.4
- click-plugins==1.1.1
- cligj==0.7.2
- coverage==6.2
- coveralls==3.3.1
- distlib==0.3.9
- docopt==0.6.2
- filelock==3.4.1
- idna==3.10
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iso3166==2.1.1
- jmespath==0.10.0
- msgpack==1.0.5
- platformdirs==2.4.0
- polyline==1.4.0
- pytest-cov==4.0.0
- python-dateutil==2.9.0.post0
- requests==2.27.1
- responses==0.17.0
- s3transfer==0.5.2
- six==1.17.0
- tomli==1.2.3
- tox==3.28.0
- uritemplate==4.1.1
- urllib3==1.26.20
- virtualenv==20.17.1
prefix: /opt/conda/envs/mapbox-sdk-py
| [
"tests/test_geocoder.py::test_geocoder_language"
]
| []
| [
"tests/test_geocoder.py::test_geocoder_default_name",
"tests/test_geocoder.py::test_geocoder_name",
"tests/test_geocoder.py::test_geocoder_forward",
"tests/test_geocoder.py::test_geocoder_forward_geojson",
"tests/test_geocoder.py::test_geocoder_reverse",
"tests/test_geocoder.py::test_geocoder_reverse_geojson",
"tests/test_geocoder.py::test_geocoder_place_types",
"tests/test_geocoder.py::test_validate_country_codes_err",
"tests/test_geocoder.py::test_validate_country",
"tests/test_geocoder.py::test_validate_place_types_err",
"tests/test_geocoder.py::test_validate_place_types",
"tests/test_geocoder.py::test_geocoder_forward_types",
"tests/test_geocoder.py::test_geocoder_reverse_types",
"tests/test_geocoder.py::test_geocoder_forward_proximity",
"tests/test_geocoder.py::test_geocoder_proximity_rounding",
"tests/test_geocoder.py::test_geocoder_forward_bbox",
"tests/test_geocoder.py::test_geocoder_forward_limit",
"tests/test_geocoder.py::test_geocoder_reverse_limit",
"tests/test_geocoder.py::test_geocoder_reverse_limit_requires_onetype",
"tests/test_geocoder.py::test_geocoder_reverse_rounding",
"tests/test_geocoder.py::test_geocoder_unicode",
"tests/test_geocoder.py::test_geocoder_forward_country"
]
| []
| MIT License | 1,406 | [
"README.rst",
"mapbox/services/geocoding.py"
]
| [
"README.rst",
"mapbox/services/geocoding.py"
]
|
|
mapbox__mapbox-sdk-py-193 | d503098e549834471e0857adf5163085af6b4355 | 2017-06-27 16:32:09 | d503098e549834471e0857adf5163085af6b4355 | diff --git a/mapbox/services/uploads.py b/mapbox/services/uploads.py
index b085cd5..86d49d4 100644
--- a/mapbox/services/uploads.py
+++ b/mapbox/services/uploads.py
@@ -2,6 +2,7 @@ from boto3.session import Session as boto3_session
from uritemplate import URITemplate
from mapbox.errors import InvalidFileError
+from mapbox.errors import ValidationError
from mapbox.services.base import Service
@@ -45,6 +46,16 @@ class Uploader(Service):
429: "Too many requests"})
return resp
+ def _validate_tileset(self, tileset):
+ """Validate the tileset name and
+ ensure that it includes the username
+ """
+ if '.' not in tileset:
+ tileset = "{0}.{1}".format(self.username, tileset)
+ if len(tileset) > 64:
+ raise ValidationError('tileset including username must be < 64 char')
+ return tileset
+
def stage(self, fileobj, creds=None, callback=None):
"""Stages the user's file on S3
If creds are not provided, temporary credientials will be generated
@@ -88,9 +99,7 @@ class Uploader(Service):
Returns a response object where the json() contents are
an upload dict
"""
- if '.' not in tileset:
- tileset = "{0}.{1}".format(self.username, tileset)
-
+ tileset = self._validate_tileset(tileset)
account, _name = tileset.split(".")
msg = {'tileset': tileset,
@@ -166,5 +175,6 @@ class Uploader(Service):
Effectively replicates the upload functionality using the HTML form
Returns a response object where the json() is a dict with upload metadata
"""
+ tileset = self._validate_tileset(tileset)
url = self.stage(fileobj, callback=callback)
return self.create(url, tileset, name=name, patch=patch)
| Raise exception for too-long tileset names
Uploads API limits tileset names to 64 characters. Let's raise an exception before making an API call that will fail.
cc: @geografa | mapbox/mapbox-sdk-py | diff --git a/tests/test_upload.py b/tests/test_upload.py
index 0111948..3bdbf99 100644
--- a/tests/test_upload.py
+++ b/tests/test_upload.py
@@ -385,3 +385,26 @@ def test_upload_patch(monkeypatch):
assert res.status_code == 201
job = res.json()
assert job['tileset'] == "{0}.test1".format(username)
+
+
+def test_upload_tileset_validation():
+ with pytest.raises(mapbox.errors.ValidationError):
+ with open('tests/moors.json', 'rb') as src:
+ mapbox.Uploader(access_token=access_token).upload(
+ src, 'a' * 65, name='test1', patch=True)
+
+
+def test_upload_tileset_validation_username():
+ # even with 60 chars, the addition of the
+ # testuser username puts it over 64 chars
+ with pytest.raises(mapbox.errors.ValidationError):
+ with open('tests/moors.json', 'rb') as src:
+ mapbox.Uploader(access_token=access_token).upload(
+ src, 'a' * 60, name='test1', patch=True)
+
+
+def test_create_tileset_validation():
+ # even with 60 chars, the addition of the username puts it over 64 chars
+ with pytest.raises(mapbox.errors.ValidationError):
+ mapbox.Uploader(access_token=access_token).create(
+ 'http://example.com/test.json', 'a' * 60)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 1
} | 0.13 | {
"env_vars": null,
"env_yml_path": [],
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [],
"pre_install": [],
"python": "3.6",
"reqs_path": [],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
boto3==1.23.10
botocore==1.26.10
CacheControl==0.12.14
certifi==2021.5.30
charset-normalizer==2.0.12
click==8.0.4
click-plugins==1.1.1
cligj==0.7.2
coverage==6.2
coveralls==3.3.1
distlib==0.3.9
docopt==0.6.2
filelock==3.4.1
idna==3.10
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
iso3166==2.1.1
jmespath==0.10.0
-e git+https://github.com/mapbox/mapbox-sdk-py.git@d503098e549834471e0857adf5163085af6b4355#egg=mapbox
msgpack==1.0.5
packaging==21.3
platformdirs==2.4.0
pluggy==1.0.0
polyline==1.4.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
python-dateutil==2.9.0.post0
requests==2.27.1
responses==0.17.0
s3transfer==0.5.2
six==1.17.0
toml==0.10.2
tomli==1.2.3
tox==3.28.0
typing_extensions==4.1.1
uritemplate==4.1.1
urllib3==1.26.20
virtualenv==20.17.1
zipp==3.6.0
| name: mapbox-sdk-py
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- boto3==1.23.10
- botocore==1.26.10
- cachecontrol==0.12.14
- charset-normalizer==2.0.12
- click==8.0.4
- click-plugins==1.1.1
- cligj==0.7.2
- coverage==6.2
- coveralls==3.3.1
- distlib==0.3.9
- docopt==0.6.2
- filelock==3.4.1
- idna==3.10
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- iso3166==2.1.1
- jmespath==0.10.0
- msgpack==1.0.5
- packaging==21.3
- platformdirs==2.4.0
- pluggy==1.0.0
- polyline==1.4.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- python-dateutil==2.9.0.post0
- requests==2.27.1
- responses==0.17.0
- s3transfer==0.5.2
- six==1.17.0
- toml==0.10.2
- tomli==1.2.3
- tox==3.28.0
- typing-extensions==4.1.1
- uritemplate==4.1.1
- urllib3==1.26.20
- virtualenv==20.17.1
- zipp==3.6.0
prefix: /opt/conda/envs/mapbox-sdk-py
| [
"tests/test_upload.py::test_create_tileset_validation"
]
| []
| [
"tests/test_upload.py::test_get_credentials",
"tests/test_upload.py::test_create",
"tests/test_upload.py::test_create_name",
"tests/test_upload.py::test_list",
"tests/test_upload.py::test_status",
"tests/test_upload.py::test_delete",
"tests/test_upload.py::test_stage",
"tests/test_upload.py::test_big_stage",
"tests/test_upload.py::test_upload",
"tests/test_upload.py::test_upload_error",
"tests/test_upload.py::test_invalid_fileobj",
"tests/test_upload.py::test_upload_patch",
"tests/test_upload.py::test_upload_tileset_validation",
"tests/test_upload.py::test_upload_tileset_validation_username"
]
| []
| MIT License | 1,407 | [
"mapbox/services/uploads.py"
]
| [
"mapbox/services/uploads.py"
]
|
|
palantir__python-language-server-63 | 8c038c38106bae4d04faf88fa53755e263cfd586 | 2017-06-27 19:20:58 | ac405e6ff8d886bc79d7e47b1104b10f2383f4bc | diff --git a/pyls/__main__.py b/pyls/__main__.py
index fe9f564..b88ae07 100644
--- a/pyls/__main__.py
+++ b/pyls/__main__.py
@@ -53,4 +53,29 @@ def main():
if args.tcp:
language_server.start_tcp_lang_server(args.host, args.port, PythonLanguageServer)
else:
- language_server.start_io_lang_server(sys.stdin, sys.stdout, PythonLanguageServer)
+ stdin, stdout = _binary_stdio()
+ language_server.start_io_lang_server(stdin, stdout, PythonLanguageServer)
+
+
+def _binary_stdio():
+ """Construct binary stdio streams (not text mode).
+
+ This seems to be different for Window/Unix Python2/3, so going by:
+ https://stackoverflow.com/questions/2850893/reading-binary-data-from-stdin
+ """
+ PY3K = sys.version_info >= (3, 0)
+
+ if PY3K:
+ stdin, stdout = sys.stdin.buffer, sys.stdout.buffer
+ else:
+ # Python 2 on Windows opens sys.stdin in text mode, and
+ # binary data that read from it becomes corrupted on \r\n
+ if sys.platform == "win32":
+ # set sys.stdin to binary mode
+ import os
+ import msvcrt
+ msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
+ msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
+ stdin, stdout = sys.stdin, sys.stdout
+
+ return stdin, stdout
diff --git a/pyls/server.py b/pyls/server.py
index 391ca99..9e3fcac 100644
--- a/pyls/server.py
+++ b/pyls/server.py
@@ -49,8 +49,8 @@ class JSONRPCServer(object):
self._write_message(req.data)
def _content_length(self, line):
- if line.startswith("Content-Length: "):
- _, value = line.split("Content-Length: ")
+ if line.startswith(b'Content-Length: '):
+ _, value = line.split(b'Content-Length: ')
value = value.strip()
try:
return int(value)
@@ -83,5 +83,5 @@ class JSONRPCServer(object):
"Content-Type: application/vscode-jsonrpc; charset=utf8\r\n\r\n"
"{}".format(content_length, body)
)
- self.wfile.write(response)
+ self.wfile.write(response.encode('utf-8'))
self.wfile.flush()
| Newline issues on Windows
Hi there,
Thanks for the excellent work you've done on the language server! Overall, it's working great on OSX and Linux - I'm working on incorporating it into Oni: https://github.com/extr0py/oni
There is one blocking issue on Windows, however:
**Issue:** The LSP protocol expects there to be `\r\n\r\n` following the Content-Header. This works as expected on OSX and Linux. However, on Windows, we actually get `\r\r\n\r\r\n`, which some LSP clients will not handle this. `vscode-jsonrpc` is strict on matching this and therefore never realizes the stream is complete, and never completes initialization. I've tested this using the _stdio_ strategy - I haven't validated with the _tcp_ strategy.
**Defect:** It turns out Python has some 'magic' behavior with newlines - see https://stackoverflow.com/questions/2536545/how-to-write-unix-end-of-line-characters-in-windows-using-python. It looks like Python is converting `\n` on Windows to `os.linesep` - which is `\r\n`.
This is the impacted code (`server.py`):
```
def _write_message(self, msg):
body = json.dumps(msg, separators=(",", ":"))
content_length = len(body)
response = (
"Content-Length: {}\r\n"
"Content-Type: application/vscode-jsonrpc; charset=utf8\r\n\r\n"
"{}".format(content_length, body)
)
self.wfile.write(response)
self.wfile.flush()
```
In this case, the `\n` in the `\r\n` is getting expanded - so we actually end up with `\r\n`.
**Proposed Fix**: The `os.linesep` should be checked. If it is `\n`, we should use `\r\n` as the line ending to conform to the protocol. If it is `\r\n`, that means `\n` will be expanded to `\r\n`, so we should use `\n` as the terminator above.
I'm not an expert in Python, so there may be a cleaner way to fix this. It looks like when reading from a file, there are options in terms of handling newlines, so maybe there is an option to set when writing to the output. I'm not sure as well if this would cause problems with the tcp server.
Let me know if this isn't clear - happy to give more info. | palantir/python-language-server | diff --git a/test/test_language_server.py b/test/test_language_server.py
index b1a68a5..7f0de1b 100644
--- a/test/test_language_server.py
+++ b/test/test_language_server.py
@@ -28,12 +28,12 @@ def client_server():
scr, scw = os.pipe()
server = Thread(target=start_io_lang_server, args=(
- os.fdopen(csr), os.fdopen(scw, 'w'), PythonLanguageServer
+ os.fdopen(csr, 'rb'), os.fdopen(scw, 'wb'), PythonLanguageServer
))
server.daemon = True
server.start()
- client = JSONRPCClient(os.fdopen(scr), os.fdopen(csw, 'w'))
+ client = JSONRPCClient(os.fdopen(scr, 'rb'), os.fdopen(csw, 'wb'))
yield client, server
@@ -95,10 +95,10 @@ def test_linting(client_server):
def _get_notification(client):
- request = jsonrpc.jsonrpc.JSONRPCRequest.from_json(client._read_message())
+ request = jsonrpc.jsonrpc.JSONRPCRequest.from_json(client._read_message().decode('utf-8'))
assert request.is_notification
return request.data
def _get_response(client):
- return json.loads(client._read_message())
+ return json.loads(client._read_message().decode('utf-8'))
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 2
} | 0.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"coverage",
"pytest-cov"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | cachetools==5.5.2
chardet==5.2.0
colorama==0.4.6
configparser==7.2.0
coverage==7.8.0
distlib==0.3.9
exceptiongroup==1.2.2
filelock==3.18.0
future==1.0.0
iniconfig==2.1.0
jedi==0.19.2
json-rpc==1.15.0
packaging==24.2
parso==0.8.4
platformdirs==4.3.7
pluggy==1.5.0
pycodestyle==2.13.0
pyflakes==3.3.1
pyproject-api==1.9.0
pytest==8.3.5
pytest-cov==6.0.0
-e git+https://github.com/palantir/python-language-server.git@8c038c38106bae4d04faf88fa53755e263cfd586#egg=python_language_server
tomli==2.2.1
tox==4.25.0
typing_extensions==4.13.0
versioneer==0.29
virtualenv==20.29.3
yapf==0.43.0
| name: python-language-server
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cachetools==5.5.2
- chardet==5.2.0
- colorama==0.4.6
- configparser==7.2.0
- coverage==7.8.0
- distlib==0.3.9
- exceptiongroup==1.2.2
- filelock==3.18.0
- future==1.0.0
- iniconfig==2.1.0
- jedi==0.19.2
- json-rpc==1.15.0
- packaging==24.2
- parso==0.8.4
- platformdirs==4.3.7
- pluggy==1.5.0
- pycodestyle==2.13.0
- pyflakes==3.3.1
- pyproject-api==1.9.0
- pytest==8.3.5
- pytest-cov==6.0.0
- tomli==2.2.1
- tox==4.25.0
- typing-extensions==4.13.0
- versioneer==0.29
- virtualenv==20.29.3
- yapf==0.43.0
prefix: /opt/conda/envs/python-language-server
| [
"test/test_language_server.py::test_initialize",
"test/test_language_server.py::test_missing_message",
"test/test_language_server.py::test_linting"
]
| []
| [
"test/test_language_server.py::test_file_closed"
]
| []
| MIT License | 1,408 | [
"pyls/__main__.py",
"pyls/server.py"
]
| [
"pyls/__main__.py",
"pyls/server.py"
]
|
|
containers-tools__cct-107 | 27179b6004c7ec49335bc78c462a3918402856ce | 2017-06-27 19:45:20 | 27179b6004c7ec49335bc78c462a3918402856ce | diff --git a/cct/cfg.py b/cct/cfg.py
new file mode 100644
index 0000000..87ca19b
--- /dev/null
+++ b/cct/cfg.py
@@ -0,0 +1,2 @@
+dogen = False
+artifacts = []
diff --git a/cct/change_processor.py b/cct/change_processor.py
index d8b7025..a47ee4b 100644
--- a/cct/change_processor.py
+++ b/cct/change_processor.py
@@ -8,6 +8,7 @@ of the MIT license. See the LICENSE file for details.
import logging
+from cct import cfg
from cct.module import ModuleManager, ModuleRunner, Module
logger = logging.getLogger('cct')
@@ -21,10 +22,10 @@ class ChangeProcessor(object):
self.modules_dir = modules_dir
self.artifacts_dir = artifacts_dir
- def process(self, fetch_only=False):
+ def process(self):
logger.debug("processing change %s" % self.config)
for change in self.config:
- return self._process_change(change, fetch_only)
+ return self._process_change(change)
def _merge_environment(self, change_env, module_env):
if change_env is None:
@@ -44,7 +45,9 @@ class ChangeProcessor(object):
env_dict[variable] = env[variable]
return env_dict
- def _process_change(self, change_cfg, fetch_only):
+ def _process_change(self, change_cfg):
+ if 'name' not in change_cfg:
+ change_cfg['name'] = ''
logger.info("processing change %s" % change_cfg['name'])
change_env = self._create_env_dict(change_cfg.get('environment'))
if 'description' not in change_cfg:
@@ -58,18 +61,16 @@ class ChangeProcessor(object):
override = module['override'] if 'override' in module else False
mr.install_module(url, ver, override)
- if fetch_only:
- artifacts = {}
- for _, module in mr.modules.items():
- for _, artifact in module.artifacts.items():
- artifacts[artifact.filename] = artifact.hash
- return artifacts
+ if cfg.dogen:
+ return
steps = []
for modules in change_cfg['changes']:
for module_name, operations in modules.items():
if module_name not in mr.modules:
- raise Exception("Module %s cannot be found" % module_name)
+ msg = "Module %s cannot be found" % module_name
+ logger.error(msg)
+ raise Exception(msg)
module = Module(module_name, None, None)
module.instance = mr.modules[module_name]
module._update_env(change_env)
@@ -82,8 +83,6 @@ class ChangeProcessor(object):
runner.run()
except:
raise
- finally:
- runner.print_result_report()
class Change(object):
@@ -113,9 +112,3 @@ class ChangeRunner(object):
logger.error("module %s failed processing steps" % module.name)
self.results.append(module)
raise
-
- def print_result_report(self):
- for module in self.results:
- print("Processed module: %s" % module.name)
- for operation in module.operations:
- print(" %-30s: %s" % (operation.command, operation.state))
diff --git a/cct/cli/main.py b/cct/cli/main.py
index 223b8b2..35003a5 100755
--- a/cct/cli/main.py
+++ b/cct/cli/main.py
@@ -13,7 +13,8 @@ import os
import sys
import urllib2
-from cct import setup_logging, version
+
+from cct import setup_logging, version, cfg
from cct.change_processor import ChangeProcessor
from cct.module import ModuleManager
from urlparse import urlparse
@@ -32,7 +33,6 @@ class CCT_CLI(object):
self.parser = MyParser(description='Container configuration tool')
def setup_arguments(self):
- self.parser.add_argument('--fetch-only', action="store_true", help="cct will fetch modules and artifacts only")
self.parser.add_argument('--modules-dir', nargs='?', default="%s/%s" % (os.getcwd(), 'modules'), help='directory from where modules are executed')
self.parser.add_argument('--artifacts-dir', nargs='?', default="%s/%s" % (os.getcwd(), 'artifacts'), help='directory where artifacts are stored')
self.parser.add_argument('-v', '--verbose', action="store_true", help='verbose output')
@@ -59,10 +59,7 @@ class CCT_CLI(object):
stream = open(file, 'r')
return yaml.load(stream)
- def fetch_artifacts(self, changes, modules_dir, artifacts_dir):
- return self.process_changes(changes, modules_dir, artifacts_dir, True)
-
- def process_changes(self, changes, modules_dir, artifacts_dir, fetch_only):
+ def process_changes(self, changes, modules_dir, artifacts_dir):
for change in changes:
if change is '':
continue
@@ -72,7 +69,7 @@ class CCT_CLI(object):
else:
change = self.process_file(change)
cp = ChangeProcessor(change, modules_dir, artifacts_dir)
- return cp.process(fetch_only)
+ return cp.process()
def run(self):
self.setup_arguments()
@@ -110,7 +107,7 @@ class CCT_CLI(object):
else:
changes += args.changes
try:
- self.process_changes(changes, args.modules_dir, args.artifacts_dir, args.fetch_only)
+ self.process_changes(changes, args.modules_dir, args.artifacts_dir)
except KeyboardInterrupt:
pass
except Exception:
diff --git a/cct/module.py b/cct/module.py
index 2ab2434..5db6002 100644
--- a/cct/module.py
+++ b/cct/module.py
@@ -19,6 +19,7 @@ import subprocess
import traceback
import yaml
+from cct import cfg
from cct.errors import CCTError
from cct.lib.git import clone_repo, get_tag_or_branch
@@ -90,7 +91,8 @@ class ModuleManager(object):
self.check_module_sh(candidate)
elif 'script' in language:
for candidate in filter(lambda f: os.path.isdir(os.path.join(directory, f)), os.listdir(directory)):
- self.check_module_script(os.path.join(directory, candidate))
+ if candidate != '.git':
+ self.check_module_script(os.path.join(directory, candidate))
else:
pattern = os.path.join(os.path.abspath(directory), '*.py')
for candidate in glob.glob(pattern):
@@ -124,9 +126,8 @@ class ModuleManager(object):
self.modules[name].override = self.override
def check_module_script(self, candidate):
- module_name = "cct.module." + candidate.split('/')[-1]
- logger.debug("Importing module %s to %s" % (os.path.abspath(candidate), module_name))
- name = "%s.%s" % (os.path.dirname(candidate).split('/')[-1], module_name.split('.')[-1])
+ name = "%s.%s" % (os.path.dirname(candidate).split('/')[-1], candidate.split('/')[-1])
+ logger.debug("Importing script from %s to %s" % (os.path.abspath(candidate), name))
self.modules[name] = ScriptModule(name, os.path.dirname(candidate), self.artifacts_dir)
self.modules[name].version = self.version
self.modules[name].override = self.override
@@ -174,7 +175,7 @@ class ModuleManager(object):
for method in dir(module):
if callable(getattr(module, method)):
- if method[0] in string.ascii_lowercase and method not in ['run', 'setup', 'teardown']:
+ if method[0] in string.ascii_lowercase and method not in ['setup', 'teardown']:
print(" %s: %s" % (method, getattr(module, method).__doc__))
if getattr(module, "teardown").__doc__:
@@ -211,11 +212,11 @@ class ModuleRunner(object):
def run(self):
self.module.instance.setup()
for operation in self.module.operations:
- if operation.command in ['setup', 'run', 'teardown']:
+ if operation.command in ['setup', 'teardown']:
continue
if operation.command == 'user':
logger.info("setting uid to %s" % operation.args[0])
- self.module.uid=operation.args[0]
+ self.module.uid = operation.args[0]
continue
self.module.instance._process_environment(operation)
try:
@@ -237,6 +238,7 @@ class ModuleRunner(object):
class Module(object):
artifacts = {}
+ modules_dirs = {}
def __init__(self, name, directory, artifacts_dir, version=None):
self.name = name
@@ -251,6 +253,7 @@ class Module(object):
self.uid = os.getuid()
if not directory:
return
+ self.modules_dirs[os.path.splitext(name)[0]] = directory
with open(os.path.join(directory, "module.yaml")) as stream:
config = yaml.load(stream)
if 'artifacts' in config:
@@ -297,9 +300,13 @@ class Module(object):
def _get_artifacts(self, artifacts, destination):
for artifact in artifacts:
- cct_artifact = CctArtifact(**artifact)
- cct_artifact.fetch(destination)
- self.artifacts[cct_artifact.name] = cct_artifact
+ if cfg.dogen:
+ if artifact not in cfg.artifacts:
+ cfg.artifacts.append(artifact)
+ else:
+ cct_artifact = CctArtifact(**artifact)
+ cct_artifact.fetch(destination)
+ self.artifacts[cct_artifact.name] = cct_artifact
def setup(self):
pass
@@ -345,13 +352,15 @@ class CctArtifact(object):
"""
Object representing artifact file for changes
name - name of the file
- md5sum - md5sum
+ md5 - hash of artifact
+ sha256 - hash of artifact
+ sha1 - has of artifact
"""
- def __init__(self, name, chksum, artifact="", hint=""):
+ def __init__(self, name, md5=None, sha1=None, sha256=None, artifact="", hint=""):
self.name = name
- self.chksum = chksum
- self.alg = chksum.split(':')[0]
- self.hash = chksum.split(':')[1]
+ self.sums = {'sha1': sha1,
+ 'sha256': sha256,
+ 'md5': md5}
self.artifact = self.replace_variables(artifact) if '$' in artifact else artifact
self.filename = os.path.basename(artifact)
self.path = None
@@ -364,16 +373,6 @@ class CctArtifact(object):
self.path = os.path.join(directory, self.filename)
url = self.artifact
- if 'CCT_ARTIFACT_CACHE' in os.environ:
- cache = os.environ['CCT_ARTIFACT_CACHE']
- logger.info('Using CCT_ARTIFACT_CACHE=%s to fetch artifact' % cache)
- for var in [v for v in dir(self) if not callable(getattr(self, v))]:
- if var.startswith('_'):
- continue
- token = '#%s#' % var
- cache = cache.replace(token, getattr(self, var))
- url = cache
-
if self.check_sum():
logger.info("Using cached artifact for %s" % self.filename)
return
@@ -394,18 +393,19 @@ class CctArtifact(object):
if self.hint:
raise CCTError('hash is not correct for artifact: "%s". %s' % (self.path, self.hint))
else:
- raise CCTError("artifact from %s doesn't match required chksum %s" % (url, self.chksum))
+ raise CCTError("artifact from %s doesn't match required chksum" % url)
def check_sum(self):
if not os.path.exists(self.path):
return False
- hash = getattr(hashlib, self.chksum[:self.chksum.index(':')])()
- with open(self.path, "rb") as f:
- for block in iter(lambda: f.read(65536), b""):
- hash.update(block)
- if self.chksum[self.chksum.index(':') + 1:] == hash.hexdigest():
- return True
- return False
+ for alg, sum in self.sums.items():
+ hash = getattr(hashlib, alg)()
+ with open(self.path, "rb") as f:
+ for block in iter(lambda: f.read(65536), b""):
+ hash.update(block)
+ if sum is not None and sum != hash.hexdigest():
+ return False
+ return True
def replace_variables(self, string):
var_regex = re.compile('\$\{.*\}')
@@ -433,7 +433,6 @@ class Operation(object):
class ScriptModule(Module):
- modules_dirs = {}
def __init__(self, name, directory, artifacts_dir):
Module.__init__(self, name, directory, artifacts_dir)
@@ -442,7 +441,6 @@ class ScriptModule(Module):
self.names = {}
for script in filter(lambda f: os.path.isfile(f), glob.glob(pattern)):
self.names[os.path.basename(script).replace('-', '_').replace('.', '_')] = script
- self.modules_dirs[os.path.splitext(name)[0]] = os.path.dirname(script)
def __getattr__(self, name):
def wrapper(*args, **kwargs):
@@ -456,7 +454,7 @@ class ScriptModule(Module):
cmd = 'bash -x %s %s' % (script, " ".join(args))
try:
env = dict(os.environ)
- mod_dir = os.path.dirname(script)
+ mod_dir = os.path.dirname(os.path.dirname(script))
env['CCT_MODULE_PATH'] = mod_dir
logger.info('Created CCT_MODULE_PATH environment variable for module %s' % mod_dir)
diff --git a/os.yaml b/os.yaml
deleted file mode 100644
index ad7d120..0000000
--- a/os.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
-- name: openshift.eap.setup
- description: execute steps for eap
- environment:
- - foo: gfoo
- - bar: gbar
- changes:
- - openshift:
- - setup: /tmp/install/
- - setup_jolokia: version=1.3.2.redhat-1
- - add_custom_launch_script:
- - setup_liveness_probe:
- - setup_readiness_probe:
- - setup_logging:
- - add_custom_configfile:
- - add_amq_rar:
- - inject_maven_settings:
- - link_java_db_drivers:
- - remove_console:
- - setup_jdk: version=1.8.0-openjdk
- - setup_s2i:
- - add_openshift_ping: version=1.0.0.Beta7-redhat-1
- - setup_deployment_dir:
diff --git a/samples/dummy.yaml b/samples/dummy.yaml
index 996f9c5..6d4d034 100644
--- a/samples/dummy.yaml
+++ b/samples/dummy.yaml
@@ -1,8 +1,9 @@
- name: samples.jboss_cli
description: execute samples jboss_cli steps
+ modules:
+ - url: https://github.com/containers-tools/base
changes:
- base.Dummy:
- dump: data-source remove --name=ExampleDS
- base.Shell:
- - url: https://github.com/containers-tools/base
- shell: ls -l
diff --git a/samples/eap_xml.yaml b/samples/eap_xml.yaml
deleted file mode 100644
index 22af030..0000000
--- a/samples/eap_xml.yaml
+++ /dev/null
@@ -1,11 +0,0 @@
-- name: samples.jboss_cli
- description: execute samples jboss_cli steps
- changes:
- - base.File:
- - copy: standalone.xml standalone-openshift.xml
- - copy: source=standalone.xml destination=standalone2.xml
- - base.XML:
- - setup: standalone-openshift.xml
- - insert: >
- xpath=/ns:server/ns:extensions
- snippet='<extension module="org.jboss.ctt.dummy" />'
diff --git a/samples/git_standalone_xml.yaml b/samples/git_standalone_xml.yaml
deleted file mode 100644
index 8925fda..0000000
--- a/samples/git_standalone_xml.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
-- name: samples.jboss.config
- description: clone repository with standalone.xml and use it for wildky
- changes:
- - git:
- - checkout: https://github.com/l-d-j/cct-config-files path=cct-config-files
- - file:
- - move: /opt/jboss/wildfly/standalone/configuration/standalone.xml /opt/jboss/wildfly/standalone/configuration/standalone.xml.bkp
- - move: cct-config-files/wildfly/8/standalone.xml /opt/jboss/wildfly/standalone/configuration/standalone.xml
\ No newline at end of file
diff --git a/samples/jboss_cli.yaml b/samples/jboss_cli.yaml
deleted file mode 100644
index 1dfbbf7..0000000
--- a/samples/jboss_cli.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
-- name: samples.jboss_cli
- description: execute samples jboss_cli steps
- changes:
- - jboss.Cli:
- # set jboss home - uses $JBOSS_HOME by default
- - setup:
- - run_cli: data-source remove --name=ExampleDS
diff --git a/samples/wildfly_log_debug.yaml b/samples/wildfly_log_debug.yaml
deleted file mode 100644
index 3296a82..0000000
--- a/samples/wildfly_log_debug.yaml
+++ /dev/null
@@ -1,9 +0,0 @@
-- name: samples.xmledit
- description: change console logging to debug
- changes:
- - base.XML:
- - setup: /opt/jboss/wildfly/standalone/configuration/standalone.xml namespaces=ns:urn:jboss:domain:3.0|lns:urn:jboss:domain:logging:3.0
- - replace_attribute: >
- xpath=/ns:server/ns:profile/lns:subsystem/lns:console-handler/lns:level
- attribute=name
- value=DEBUG
| CCT_MODULE_PATH variable is wrongly set in script modules
this variable and the CCT_MODULE_PATH_NAME should point to directory with image.yaml | containers-tools/cct | diff --git a/tests/test_change_processor.py b/tests/test_change_processor.py
index d9e9a85..54a4fc4 100755
--- a/tests/test_change_processor.py
+++ b/tests/test_change_processor.py
@@ -50,7 +50,7 @@ class TestModule(unittest.TestCase):
}]
destination = tempfile.mkdtemp()
changeProcessor = ChangeProcessor(config, destination, '/tmp/')
- changeProcessor.process(fetch_only=False)
+ changeProcessor.process()
shutil.rmtree(destination)
if __name__ == '__main__':
diff --git a/tests/test_unit_module.py b/tests/test_unit_module.py
index b40659b..de1fb30 100644
--- a/tests/test_unit_module.py
+++ b/tests/test_unit_module.py
@@ -45,7 +45,7 @@ class TestModules(unittest.TestCase):
"artifacts": [
{
"artifact": url,
- "chksum": chksum,
+ "md5": chksum,
"name": "cct",
}
]
@@ -56,18 +56,18 @@ class TestModules(unittest.TestCase):
def test_artifacts_fetching(self):
url = "https://github.com/containers-tools/cct/archive/v0.2.0.zip"
- chksum = "md5:607468ba87490a2b8daa4b6a73168620"
+ chksum = "607468ba87490a2b8daa4b6a73168620"
self.get_artifact(url, chksum)
def test_artifacts_fetching_wrong_url(self):
url = "https://github.com/containers-tools/cct/archive/0.0.1.zip33"
- chksum = "md5:must_be_wrong_too"
+ chksum = "must_be_wrong_too"
with self.assertRaises(CCTError):
self.get_artifact(url, chksum)
def test_artifacts_fetching_wrong_chksum(self):
url = "https://github.com/containers-tools/cct/archive/0.2.0.zip"
- chksum = "md5:foo"
+ chksum = "foo"
with self.assertRaises(CCTError):
self.get_artifact(url, chksum)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_removed_files",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 4
} | 0.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"mock"
],
"pre_install": [
"apt-get update",
"apt-get install -y python3-dev"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/containers-tools/cct.git@27179b6004c7ec49335bc78c462a3918402856ce#egg=cct
coverage==7.8.0
exceptiongroup==1.2.2
iniconfig==2.1.0
lxml==5.3.1
mock==5.2.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
pytest-cov==6.0.0
PyYAML==6.0.2
tomli==2.2.1
| name: cct
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.8.0
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- lxml==5.3.1
- mock==5.2.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pytest-cov==6.0.0
- pyyaml==6.0.2
- tomli==2.2.1
prefix: /opt/conda/envs/cct
| [
"tests/test_unit_module.py::TestModules::test_artifacts_fetching_wrong_chksum",
"tests/test_unit_module.py::TestModules::test_artifacts_fetching_wrong_url"
]
| [
"tests/test_change_processor.py::TestModule::test_fetch_modules",
"tests/test_change_processor.py::TestModule::test_process_int_values",
"tests/test_change_processor.py::TestModule::test_process_string_values",
"tests/test_unit_module.py::TestModules::test_artifacts_fetching",
"tests/test_unit_module.py::TestModules::test_module_script",
"tests/test_unit_module.py::TestModules::test_module_version_conflict",
"tests/test_unit_module.py::TestModules::test_module_version_override",
"tests/test_unit_module.py::TestModules::test_moudule_deps"
]
| [
"tests/test_unit_module.py::TestModules::test_find_modules",
"tests/test_unit_module.py::TestModules::test_module_getenv",
"tests/test_unit_module.py::TestModules::test_module_getenv_form_host",
"tests/test_unit_module.py::TestModules::test_module_getenv_none",
"tests/test_unit_module.py::TestModules::test_module_getenv_override"
]
| []
| MIT License | 1,409 | [
"cct/change_processor.py",
"cct/cli/main.py",
"cct/cfg.py",
"samples/wildfly_log_debug.yaml",
"samples/eap_xml.yaml",
"samples/dummy.yaml",
"samples/git_standalone_xml.yaml",
"samples/jboss_cli.yaml",
"os.yaml",
"cct/module.py"
]
| [
"cct/change_processor.py",
"cct/cli/main.py",
"cct/cfg.py",
"samples/wildfly_log_debug.yaml",
"samples/eap_xml.yaml",
"samples/dummy.yaml",
"samples/git_standalone_xml.yaml",
"samples/jboss_cli.yaml",
"os.yaml",
"cct/module.py"
]
|
|
Duke-GCB__DukeDSClient-144 | c6a1ad6afa8af78f858a59f7f6c129501a537b83 | 2017-06-27 20:01:25 | bffebebd86d09f5924461959401ef3698b4e47d5 | diff --git a/ddsc/core/ddsapi.py b/ddsc/core/ddsapi.py
index b02dc6b..1633145 100644
--- a/ddsc/core/ddsapi.py
+++ b/ddsc/core/ddsapi.py
@@ -238,41 +238,40 @@ class DataServiceApi(object):
resp = self.http.get(url, headers=headers, params=data_str)
return self._check_err(resp, url_suffix, data, allow_pagination=False)
- def _get_single_page(self, url_suffix, data, content_type, page_num):
+ def _get_single_page(self, url_suffix, data, page_num):
"""
Send GET request to API at url_suffix with post_data adding page and per_page parameters to
retrieve a single page. Always requests with per_page=DEFAULT_RESULTS_PER_PAGE.
:param url_suffix: str URL path we are sending a GET to
:param data: object data we are sending
- :param content_type: str from ContentType that determines how we format the data
:param page_num: int: page number to fetch
:return: requests.Response containing the result
"""
data_with_per_page = dict(data)
data_with_per_page['page'] = page_num
data_with_per_page['per_page'] = DEFAULT_RESULTS_PER_PAGE
- (url, data_str, headers) = self._url_parts(url_suffix, data_with_per_page, content_type=content_type)
+ (url, data_str, headers) = self._url_parts(url_suffix, data_with_per_page,
+ content_type=ContentType.form)
resp = self.http.get(url, headers=headers, params=data_str)
return self._check_err(resp, url_suffix, data, allow_pagination=True)
- def _get_collection(self, url_suffix, data, content_type=ContentType.json):
+ def _get_collection(self, url_suffix, data):
"""
Performs GET for all pages based on x-total-pages in first response headers.
Merges the json() 'results' arrays.
If x-total-pages is missing or 1 just returns the response without fetching multiple pages.
:param url_suffix: str URL path we are sending a GET to
:param data: object data we are sending
- :param content_type: str from ContentType that determines how we format the data
:return: requests.Response containing the result
"""
- response = self._get_single_page(url_suffix, data, content_type, page_num=1)
+ response = self._get_single_page(url_suffix, data, page_num=1)
total_pages_str = response.headers.get('x-total-pages')
if total_pages_str:
total_pages = int(total_pages_str)
if total_pages > 1:
multi_response = MultiJSONResponse(base_response=response, merge_array_field_name="results")
for page in range(2, total_pages + 1):
- additional_response = self._get_single_page(url_suffix, data, content_type, page_num=page)
+ additional_response = self._get_single_page(url_suffix, data, page_num=page)
multi_response.add_response(additional_response)
return multi_response
return response
@@ -393,7 +392,7 @@ class DataServiceApi(object):
if name_contains is not None:
data['name_contains'] = name_contains
url_prefix = "/{}/{}/children".format(parent_name, parent_id)
- return self._get_collection(url_prefix, data, content_type=ContentType.form)
+ return self._get_collection(url_prefix, data)
def create_upload(self, project_id, filename, content_type, size,
hash_value, hash_alg):
@@ -525,7 +524,7 @@ class DataServiceApi(object):
data = {
"full_name_contains": full_name,
}
- return self._get_collection('/users', data, content_type=ContentType.form)
+ return self._get_collection('/users', data)
def get_all_users(self):
"""
@@ -533,7 +532,7 @@ class DataServiceApi(object):
:return: requests.Response containing the successful result
"""
data = {}
- return self._get_collection('/users', data, content_type=ContentType.form)
+ return self._get_collection('/users', data)
def get_user_by_id(self, id):
"""
@@ -620,7 +619,7 @@ class DataServiceApi(object):
:param context: str which roles do we want 'project' or 'system'
:return: requests.Response containing the successful result
"""
- return self._get_collection("/auth_roles", {"context": context}, content_type=ContentType.form)
+ return self._get_collection("/auth_roles", {"context": context})
def get_project_transfers(self, project_id):
"""
| DDSClient is creating multiple projects with the same name
Email report from David Corcoran:
DDS client is currently creating multiple projects with the same name. Can you please make it so that there is an error when this is attempted?
I also got the occasional error (see attached). It happened for a handful of the projects.

| Duke-GCB/DukeDSClient | diff --git a/ddsc/core/tests/test_ddsapi.py b/ddsc/core/tests/test_ddsapi.py
index 1715d3a..87ec866 100644
--- a/ddsc/core/tests/test_ddsapi.py
+++ b/ddsc/core/tests/test_ddsapi.py
@@ -1,7 +1,7 @@
from __future__ import absolute_import
from unittest import TestCase
import requests
-from ddsc.core.ddsapi import MultiJSONResponse, DataServiceApi, ContentType, UNEXPECTED_PAGING_DATA_RECEIVED
+from ddsc.core.ddsapi import MultiJSONResponse, DataServiceApi, UNEXPECTED_PAGING_DATA_RECEIVED
from mock import MagicMock
@@ -55,7 +55,7 @@ class TestDataServiceApi(TestCase):
json_return_value={"results": [1, 2, 3]},
num_pages=1)]
api = DataServiceApi(auth=None, url="something.com/v1/", http=mock_requests)
- response = api._get_collection(url_suffix="users", data={}, content_type=ContentType.json)
+ response = api._get_collection(url_suffix="users", data={})
self.assertEqual([1, 2, 3], response.json()["results"])
call_args_list = mock_requests.get.call_args_list
self.assertEqual(1, len(call_args_list))
@@ -64,9 +64,10 @@ class TestDataServiceApi(TestCase):
first_param = call_args[0][0]
self.assertEqual('something.com/v1/users', first_param)
dict_param = call_args[1]
- self.assertEqual('application/json', dict_param['headers']['Content-Type'])
+ self.assertEqual('application/x-www-form-urlencoded', dict_param['headers']['Content-Type'])
self.assertIn("DukeDSClient/", dict_param['headers']['User-Agent'])
- self.assertIn('"per_page": 100', dict_param['params'])
+ self.assertEqual(100, dict_param['params']['per_page'])
+ self.assertEqual(1, dict_param['params']['page'])
def test_get_collection_two_pages(self):
mock_requests = MagicMock()
@@ -79,7 +80,7 @@ class TestDataServiceApi(TestCase):
num_pages=2)
]
api = DataServiceApi(auth=None, url="something.com/v1/", http=mock_requests)
- response = api._get_collection(url_suffix="projects", data={}, content_type=ContentType.json)
+ response = api._get_collection(url_suffix="projects", data={})
self.assertEqual([1, 2, 3, 4, 5], response.json()["results"])
call_args_list = mock_requests.get.call_args_list
self.assertEqual(2, len(call_args_list))
@@ -88,18 +89,19 @@ class TestDataServiceApi(TestCase):
first_param = call_args[0][0]
self.assertEqual('something.com/v1/projects', first_param)
dict_param = call_args[1]
- self.assertEqual('application/json', dict_param['headers']['Content-Type'])
+ self.assertEqual('application/x-www-form-urlencoded', dict_param['headers']['Content-Type'])
self.assertIn("DukeDSClient/", dict_param['headers']['User-Agent'])
- self.assertIn('"per_page": 100', dict_param['params'])
+ self.assertEqual(100, dict_param['params']['per_page'])
+ self.assertEqual(1, dict_param['params']['page'])
# Check second request
call_args = call_args_list[1]
first_param = call_args[0][0]
self.assertEqual('something.com/v1/projects', first_param)
dict_param = call_args[1]
- self.assertEqual('application/json', dict_param['headers']['Content-Type'])
+ self.assertEqual('application/x-www-form-urlencoded', dict_param['headers']['Content-Type'])
self.assertIn("DukeDSClient/", dict_param['headers']['User-Agent'])
- self.assertIn('"per_page": 100', dict_param['params'])
- self.assertIn('"page": 2', dict_param['params'])
+ self.assertEqual(100, dict_param['params']['per_page'])
+ self.assertEqual(2, dict_param['params']['page'])
def test_get_collection_three_pages(self):
mock_requests = MagicMock()
@@ -115,7 +117,7 @@ class TestDataServiceApi(TestCase):
num_pages=3)
]
api = DataServiceApi(auth=None, url="something.com/v1/", http=mock_requests)
- response = api._get_collection(url_suffix="uploads", data={}, content_type=ContentType.json)
+ response = api._get_collection(url_suffix="uploads", data={})
self.assertEqual([1, 2, 3, 4, 5, 6, 7], response.json()["results"])
call_args_list = mock_requests.get.call_args_list
self.assertEqual(3, len(call_args_list))
@@ -124,27 +126,29 @@ class TestDataServiceApi(TestCase):
first_param = call_args[0][0]
self.assertEqual('something.com/v1/uploads', first_param)
dict_param = call_args[1]
- self.assertEqual('application/json', dict_param['headers']['Content-Type'])
+ self.assertEqual('application/x-www-form-urlencoded', dict_param['headers']['Content-Type'])
self.assertIn("DukeDSClient/", dict_param['headers']['User-Agent'])
- self.assertIn('"per_page": 100', dict_param['params'])
+ self.assertEqual(100, dict_param['params']['per_page'])
+ self.assertEqual(1, dict_param['params']['page'])
# Check second request
call_args = call_args_list[1]
first_param = call_args[0][0]
self.assertEqual('something.com/v1/uploads', first_param)
dict_param = call_args[1]
- self.assertEqual('application/json', dict_param['headers']['Content-Type'])
+ self.assertEqual('application/x-www-form-urlencoded', dict_param['headers']['Content-Type'])
self.assertIn("DukeDSClient/", dict_param['headers']['User-Agent'])
- self.assertIn('"per_page": 100', dict_param['params'])
- self.assertIn('"page": 2', dict_param['params'])
+ self.assertEqual(100, dict_param['params']['per_page'])
+ self.assertEqual(2, dict_param['params']['page'])
+
# Check third request
call_args = call_args_list[2]
first_param = call_args[0][0]
self.assertEqual('something.com/v1/uploads', first_param)
dict_param = call_args[1]
- self.assertEqual('application/json', dict_param['headers']['Content-Type'])
+ self.assertEqual('application/x-www-form-urlencoded', dict_param['headers']['Content-Type'])
self.assertIn("DukeDSClient/", dict_param['headers']['User-Agent'])
- self.assertIn('"per_page": 100', dict_param['params'])
- self.assertIn('"page": 3', dict_param['params'])
+ self.assertEqual(100, dict_param['params']['per_page'])
+ self.assertEqual(3, dict_param['params']['page'])
def test_put_raises_error_on_paging_response(self):
mock_requests = MagicMock()
@@ -192,7 +196,7 @@ class TestDataServiceApi(TestCase):
fake_response_with_pages(status_code=200, json_return_value={"ok": True}, num_pages=3)
]
api = DataServiceApi(auth=None, url="something.com/v1/", http=mock_requests)
- resp = api._get_single_page(url_suffix='stuff', data={}, content_type=ContentType.json, page_num=1)
+ resp = api._get_single_page(url_suffix='stuff', data={}, page_num=1)
self.assertEqual(True, resp.json()['ok'])
def test_get_auth_providers(self):
@@ -346,3 +350,37 @@ class TestDataServiceApi(TestCase):
api = DataServiceApi(auth=None, url="something.com/v1/", http=None)
self.assertIsNotNone(api.http)
self.assertEqual(type(api.http), requests.sessions.Session)
+
+ def test_get_projects(self):
+ page1 = {
+ "results": [
+ {
+ "id": "1234"
+ }
+ ]
+ }
+ page2 = {
+ "results": [
+ {
+ "id": "1235"
+ }
+ ]
+ }
+ mock_requests = MagicMock()
+ mock_requests.get.side_effect = [
+ fake_response_with_pages(status_code=200, json_return_value=page1, num_pages=2),
+ fake_response_with_pages(status_code=200, json_return_value=page2, num_pages=2),
+ ]
+ api = DataServiceApi(auth=None, url="something.com/v1", http=mock_requests)
+ resp = api.get_projects()
+ self.assertEqual(2, len(resp.json()['results']))
+ self.assertEqual("1234", resp.json()['results'][0]['id'])
+ self.assertEqual("1235", resp.json()['results'][1]['id'])
+ self.assertEqual(2, mock_requests.get.call_count)
+ first_call_second_arg = mock_requests.get.call_args_list[0][1]
+ self.assertEqual('application/x-www-form-urlencoded', first_call_second_arg['headers']['Content-Type'])
+ self.assertEqual(100, first_call_second_arg['params']['per_page'])
+ self.assertEqual(1, first_call_second_arg['params']['page'])
+ second_call_second_arg = mock_requests.get.call_args_list[0][1]
+ self.assertEqual(100, second_call_second_arg['params']['per_page'])
+ self.assertEqual(1, second_call_second_arg['params']['page'])
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_media",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 1
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"mock"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/Duke-GCB/DukeDSClient.git@c6a1ad6afa8af78f858a59f7f6c129501a537b83#egg=DukeDSClient
exceptiongroup==1.2.2
future==0.16.0
iniconfig==2.1.0
mock==5.2.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
pytz==2025.2
PyYAML==3.12
requests==2.13.0
six==1.10.0
tomli==2.2.1
| name: DukeDSClient
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- future==0.16.0
- iniconfig==2.1.0
- mock==5.2.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pytz==2025.2
- pyyaml==3.12
- requests==2.13.0
- six==1.10.0
- tomli==2.2.1
prefix: /opt/conda/envs/DukeDSClient
| [
"ddsc/core/tests/test_ddsapi.py::TestDataServiceApi::test_get_collection_one_page",
"ddsc/core/tests/test_ddsapi.py::TestDataServiceApi::test_get_collection_three_pages",
"ddsc/core/tests/test_ddsapi.py::TestDataServiceApi::test_get_collection_two_pages",
"ddsc/core/tests/test_ddsapi.py::TestDataServiceApi::test_get_projects",
"ddsc/core/tests/test_ddsapi.py::TestDataServiceApi::test_get_single_page_works_on_paging_response"
]
| []
| [
"ddsc/core/tests/test_ddsapi.py::TestMultiJSONResponse::test_pass_through_works_with_one_response",
"ddsc/core/tests/test_ddsapi.py::TestMultiJSONResponse::test_pass_through_works_with_three_responses",
"ddsc/core/tests/test_ddsapi.py::TestMultiJSONResponse::test_pass_through_works_with_two_responses",
"ddsc/core/tests/test_ddsapi.py::TestDataServiceApi::test_auth_provider_add_user",
"ddsc/core/tests/test_ddsapi.py::TestDataServiceApi::test_constructor_creates_session_when_passed_none",
"ddsc/core/tests/test_ddsapi.py::TestDataServiceApi::test_delete_raises_error_on_paging_response",
"ddsc/core/tests/test_ddsapi.py::TestDataServiceApi::test_get_auth_providers",
"ddsc/core/tests/test_ddsapi.py::TestDataServiceApi::test_get_project_transfers",
"ddsc/core/tests/test_ddsapi.py::TestDataServiceApi::test_get_single_item_raises_error_on_paging_response",
"ddsc/core/tests/test_ddsapi.py::TestDataServiceApi::test_list_auth_roles",
"ddsc/core/tests/test_ddsapi.py::TestDataServiceApi::test_post_raises_error_on_paging_response",
"ddsc/core/tests/test_ddsapi.py::TestDataServiceApi::test_put_raises_error_on_paging_response",
"ddsc/core/tests/test_ddsapi.py::TestDataServiceApi::test_relations_methods"
]
| []
| MIT License | 1,410 | [
"ddsc/core/ddsapi.py"
]
| [
"ddsc/core/ddsapi.py"
]
|
|
jupyter__nbgrader-778 | 7b9b431e873d1b787f269373140f0de31636b06c | 2017-06-27 22:57:08 | ed23f4484b084451da5b691df28031f39b2ce9ca | diff --git a/nbgrader/apps/baseapp.py b/nbgrader/apps/baseapp.py
index ab115149..825cc8ae 100644
--- a/nbgrader/apps/baseapp.py
+++ b/nbgrader/apps/baseapp.py
@@ -211,17 +211,6 @@ class NbGrader(JupyterApp):
cfg.Exchange.merge(cfg.TransferApp)
del cfg.TransferApp
- if 'BaseNbConvertApp' in cfg:
- self.log.warning(
- "Use BaseConverter in config, not BaseNbConvertApp. Outdated config:\n%s",
- '\n'.join(
- 'BaseNbConvertApp.{key} = {value!r}'.format(key=key, value=value)
- for key, value in cfg.BaseNbConvertApp.items()
- )
- )
- cfg.BaseConverter.merge(cfg.BaseNbConvertApp)
- del cfg.BaseNbConvertApp
-
super(NbGrader, self)._load_config(cfg, **kwargs)
if self.coursedir:
self.coursedir._load_config(cfg)
diff --git a/nbgrader/apps/nbgraderapp.py b/nbgrader/apps/nbgraderapp.py
index 23e84b3e..c5c22f8c 100755
--- a/nbgrader/apps/nbgraderapp.py
+++ b/nbgrader/apps/nbgraderapp.py
@@ -15,7 +15,6 @@ from .. import preprocessors
from .. import plugins
from ..coursedir import CourseDirectory
from .. import exchange
-from .. import converters
from .baseapp import nbgrader_aliases, nbgrader_flags
from . import (
NbGrader,
@@ -267,12 +266,6 @@ class NbGraderApp(NbGrader):
if hasattr(ex, "class_traits") and ex.class_traits(config=True):
classes.append(ex)
- # include all the converters
- for ex_name in converters.__all__:
- ex = getattr(converters, ex_name)
- if hasattr(ex, "class_traits") and ex.class_traits(config=True):
- classes.append(ex)
-
return classes
@catch_config_error
diff --git a/nbgrader/exchange/release.py b/nbgrader/exchange/release.py
index 6d1c89ca..3760d84a 100644
--- a/nbgrader/exchange/release.py
+++ b/nbgrader/exchange/release.py
@@ -4,7 +4,7 @@ from stat import (
S_IRUSR, S_IWUSR, S_IXUSR,
S_IRGRP, S_IWGRP, S_IXGRP,
S_IROTH, S_IWOTH, S_IXOTH,
- S_ISGID
+ S_ISGID, ST_MODE
)
from traitlets import Bool
@@ -17,6 +17,30 @@ class ExchangeRelease(Exchange):
force = Bool(False, help="Force overwrite existing files in the exchange.").tag(config=True)
+ def ensure_root(self):
+ perms = S_IRUSR|S_IWUSR|S_IXUSR|S_IRGRP|S_IWGRP|S_IXGRP|S_IROTH|S_IWOTH|S_IXOTH
+
+ # if root doesn't exist, create it and set permissions
+ if not os.path.exists(self.root):
+ self.log.warning("Creating exchange directory: {}".format(self.root))
+ try:
+ os.makedirs(self.root)
+ os.chmod(self.root, perms)
+ except PermissionError:
+ self.fail("Could not create {}, permission denied.".format(self.root))
+
+ else:
+ old_perms = oct(os.stat(self.root)[ST_MODE] & 0o777)
+ new_perms = oct(perms & 0o777)
+ if old_perms != new_perms:
+ self.log.warning(
+ "Permissions for exchange directory ({}) are invalid, changing them from {} to {}".format(
+ self.root, old_perms, new_perms))
+ try:
+ os.chmod(self.root, perms)
+ except PermissionError:
+ self.fail("Could not change permissions of {}, permission denied.".format(self.root))
+
def init_src(self):
self.src_path = self.coursedir.format_path(self.coursedir.release_directory, '.', self.coursedir.assignment_id)
if not os.path.isdir(self.src_path):
| Make it clearer how to set permissions for the exchange directory
If the exchange directory hasn't been created when `nbgrader release` is run for the first time, an error occurs:
```
$ nbgrader release --Exchange.root=/tmp/exchange ps1
[ReleaseApp | WARNING] No nbgrader_config.py file found (rerun with --debug to see where nbgrader is looking)
[ReleaseApp | CRITICAL] Unwritable directory, please contact your instructor: /tmp/exchange
[ReleaseApp | ERROR] nbgrader release failed
```
This is confusing and not helpful for instructors. Instead, if the exchange directory doesn't exist, `nbgrader release` should create the directory for instructors and set it to have the correct permissions. | jupyter/nbgrader | diff --git a/nbgrader/tests/apps/test_nbgrader_release.py b/nbgrader/tests/apps/test_nbgrader_release.py
index 93db81cb..0d8bf2dc 100644
--- a/nbgrader/tests/apps/test_nbgrader_release.py
+++ b/nbgrader/tests/apps/test_nbgrader_release.py
@@ -1,4 +1,6 @@
import os
+import shutil
+import stat
from os.path import join
from .. import run_nbgrader
@@ -55,3 +57,16 @@ class TestNbGraderRelease(BaseTestApp):
self._copy_file(join("files", "test.ipynb"), join(course_dir, "release", "ps1", "p1.ipynb"))
self._release("--assignment=ps1", exchange)
assert os.path.isfile(join(exchange, "abc101", "outbound", "ps1", "p1.ipynb"))
+
+ def test_no_exchange(self, exchange, course_dir):
+ shutil.rmtree(exchange)
+ self._copy_file(join("files", "test.ipynb"), join(course_dir, "release", "ps1", "p1.ipynb"))
+ self._release("--assignment=ps1", exchange)
+ assert os.path.isfile(join(exchange, "abc101", "outbound", "ps1", "p1.ipynb"))
+
+ def test_exchange_bad_perms(self, exchange, course_dir):
+ perms = stat.S_IRUSR|stat.S_IWUSR|stat.S_IXUSR|stat.S_IRGRP
+ os.chmod(exchange, perms)
+ self._copy_file(join("files", "test.ipynb"), join(course_dir, "release", "ps1", "p1.ipynb"))
+ self._release("--assignment=ps1", exchange)
+ assert os.path.isfile(join(exchange, "abc101", "outbound", "ps1", "p1.ipynb"))
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 3
} | 0.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -r dev-requirements.txt -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-rerunfailures",
"coverage",
"selenium",
"invoke",
"sphinx",
"codecov",
"cov-core",
"nbval"
],
"pre_install": [
"pip install -U pip wheel setuptools"
],
"python": "3.5",
"reqs_path": [
"dev-requirements.txt",
"dev-requirements-windows.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
alembic==1.7.7
anyio==3.6.2
argon2-cffi==21.3.0
argon2-cffi-bindings==21.2.0
async-generator==1.10
attrs==22.2.0
Babel==2.11.0
backcall==0.2.0
bleach==4.1.0
certifi==2021.5.30
cffi==1.15.1
charset-normalizer==2.0.12
codecov==2.1.13
comm==0.1.4
contextvars==2.4
cov-core==1.15.0
coverage==6.2
dataclasses==0.8
decorator==5.1.1
defusedxml==0.7.1
docutils==0.18.1
entrypoints==0.4
greenlet==2.0.2
idna==3.10
imagesize==1.4.1
immutables==0.19
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
invoke==2.2.0
ipykernel==5.5.6
ipython==7.16.3
ipython-genutils==0.2.0
ipywidgets==7.8.5
jedi==0.17.2
Jinja2==3.0.3
json5==0.9.16
jsonschema==3.2.0
jupyter==1.1.1
jupyter-client==7.1.2
jupyter-console==6.4.3
jupyter-core==4.9.2
jupyter-server==1.13.1
jupyterlab==3.2.9
jupyterlab-pygments==0.1.2
jupyterlab-server==2.10.3
jupyterlab_widgets==1.1.11
Mako==1.1.6
MarkupSafe==2.0.1
mistune==0.8.4
nbclassic==0.3.5
nbclient==0.5.9
nbconvert==6.0.7
nbformat==5.1.3
-e git+https://github.com/jupyter/nbgrader.git@7b9b431e873d1b787f269373140f0de31636b06c#egg=nbgrader
nbval==0.10.0
nest-asyncio==1.6.0
notebook==6.4.10
packaging==21.3
pandocfilters==1.5.1
parso==0.7.1
pexpect==4.9.0
pickleshare==0.7.5
pluggy==1.0.0
prometheus-client==0.17.1
prompt-toolkit==3.0.36
ptyprocess==0.7.0
py==1.11.0
pycparser==2.21
pyenchant==3.2.2
Pygments==2.14.0
pyparsing==3.1.4
pyrsistent==0.18.0
pytest==7.0.1
pytest-cov==4.0.0
pytest-rerunfailures==10.3
python-dateutil==2.9.0.post0
pytz==2025.2
pyzmq==25.1.2
requests==2.27.1
selenium==3.141.0
Send2Trash==1.8.3
six==1.17.0
sniffio==1.2.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-rtd-theme==2.0.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
sphinxcontrib-spelling==7.7.0
SQLAlchemy==1.4.54
terminado==0.12.1
testpath==0.6.0
tomli==1.2.3
tornado==6.1
traitlets==4.3.3
typing_extensions==4.1.1
urllib3==1.26.20
wcwidth==0.2.13
webencodings==0.5.1
websocket-client==1.3.1
widgetsnbextension==3.6.10
zipp==3.6.0
| name: nbgrader
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- alembic==1.7.7
- anyio==3.6.2
- argon2-cffi==21.3.0
- argon2-cffi-bindings==21.2.0
- async-generator==1.10
- attrs==22.2.0
- babel==2.11.0
- backcall==0.2.0
- bleach==4.1.0
- cffi==1.15.1
- charset-normalizer==2.0.12
- codecov==2.1.13
- comm==0.1.4
- contextvars==2.4
- cov-core==1.15.0
- coverage==6.2
- dataclasses==0.8
- decorator==5.1.1
- defusedxml==0.7.1
- docutils==0.18.1
- entrypoints==0.4
- greenlet==2.0.2
- idna==3.10
- imagesize==1.4.1
- immutables==0.19
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- invoke==2.2.0
- ipykernel==5.5.6
- ipython==7.16.3
- ipython-genutils==0.2.0
- ipywidgets==7.8.5
- jedi==0.17.2
- jinja2==3.0.3
- json5==0.9.16
- jsonschema==3.2.0
- jupyter==1.1.1
- jupyter-client==7.1.2
- jupyter-console==6.4.3
- jupyter-core==4.9.2
- jupyter-server==1.13.1
- jupyterlab==3.2.9
- jupyterlab-pygments==0.1.2
- jupyterlab-server==2.10.3
- jupyterlab-widgets==1.1.11
- mako==1.1.6
- markupsafe==2.0.1
- mistune==0.8.4
- nbclassic==0.3.5
- nbclient==0.5.9
- nbconvert==6.0.7
- nbformat==5.1.3
- nbval==0.10.0
- nest-asyncio==1.6.0
- notebook==6.4.10
- packaging==21.3
- pandocfilters==1.5.1
- parso==0.7.1
- pexpect==4.9.0
- pickleshare==0.7.5
- pip==21.3.1
- pluggy==1.0.0
- prometheus-client==0.17.1
- prompt-toolkit==3.0.36
- ptyprocess==0.7.0
- py==1.11.0
- pycparser==2.21
- pyenchant==3.2.2
- pygments==2.14.0
- pyparsing==3.1.4
- pyrsistent==0.18.0
- pytest==7.0.1
- pytest-cov==4.0.0
- pytest-rerunfailures==10.3
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyzmq==25.1.2
- requests==2.27.1
- selenium==3.141.0
- send2trash==1.8.3
- setuptools==59.6.0
- six==1.17.0
- sniffio==1.2.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-rtd-theme==2.0.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- sphinxcontrib-spelling==7.7.0
- sqlalchemy==1.4.54
- terminado==0.12.1
- testpath==0.6.0
- tomli==1.2.3
- tornado==6.1
- traitlets==4.3.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- wcwidth==0.2.13
- webencodings==0.5.1
- websocket-client==1.3.1
- widgetsnbextension==3.6.10
- zipp==3.6.0
prefix: /opt/conda/envs/nbgrader
| [
"nbgrader/tests/apps/test_nbgrader_release.py::TestNbGraderRelease::test_no_exchange"
]
| []
| [
"nbgrader/tests/apps/test_nbgrader_release.py::TestNbGraderRelease::test_help",
"nbgrader/tests/apps/test_nbgrader_release.py::TestNbGraderRelease::test_no_course_id",
"nbgrader/tests/apps/test_nbgrader_release.py::TestNbGraderRelease::test_release",
"nbgrader/tests/apps/test_nbgrader_release.py::TestNbGraderRelease::test_force_release",
"nbgrader/tests/apps/test_nbgrader_release.py::TestNbGraderRelease::test_release_with_assignment_flag",
"nbgrader/tests/apps/test_nbgrader_release.py::TestNbGraderRelease::test_exchange_bad_perms"
]
| []
| BSD 3-Clause "New" or "Revised" License | 1,411 | [
"nbgrader/apps/nbgraderapp.py",
"nbgrader/apps/baseapp.py",
"nbgrader/exchange/release.py"
]
| [
"nbgrader/apps/nbgraderapp.py",
"nbgrader/apps/baseapp.py",
"nbgrader/exchange/release.py"
]
|
|
zhmcclient__python-zhmcclient-329 | b6f8b75700b71d27da0303047e3c237623bca38a | 2017-06-28 14:41:03 | 9f0ff2df0f65b934e2e75b1bfdf0635677416ac7 | coveralls:
[](https://coveralls.io/builds/12173391)
Coverage decreased (-0.2%) to 87.796% when pulling **f478d0563ac7476cfe45449ec6fc6a7d257922b8 on andy/improve-objectid-filter** into **7a96fc49c5f66a7f4c6a2236c4a6ef5d7ee412ca on master**.
coveralls:
[](https://coveralls.io/builds/12178874)
Coverage decreased (-0.2%) to 87.796% when pulling **83108127c8a840c250ee69b785bf174734e8d584 on andy/improve-objectid-filter** into **7a96fc49c5f66a7f4c6a2236c4a6ef5d7ee412ca on master**.
andy-maier: +1 | diff --git a/zhmcclient/_activation_profile.py b/zhmcclient/_activation_profile.py
index bc0e538..0734caa 100644
--- a/zhmcclient/_activation_profile.py
+++ b/zhmcclient/_activation_profile.py
@@ -104,6 +104,8 @@ class ActivationProfileManager(BaseManager):
resource_class=ActivationProfile,
session=cpc.manager.session,
parent=cpc,
+ base_uri='{}/{}-activation-profiles'.format(cpc.uri, profile_type),
+ oid_prop='name', # This is an exception!
uri_prop='element-uri',
name_prop='name',
query_props=query_props)
@@ -165,27 +167,32 @@ class ActivationProfileManager(BaseManager):
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
- query_parms, client_filters = self._divide_filter_args(filter_args)
-
- resources_name = self._profile_type + '-activation-profiles'
- uri = '{}/{}{}'.format(self.cpc.uri, resources_name, query_parms)
-
resource_obj_list = []
- result = self.session.get(uri)
- if result:
- props_list = result[resources_name]
- for props in props_list:
-
- resource_obj = self.resource_class(
- manager=self,
- uri=props[self._uri_prop],
- name=props.get(self._name_prop, None),
- properties=props)
-
- if self._matches_filters(resource_obj, client_filters):
- resource_obj_list.append(resource_obj)
- if full_properties:
- resource_obj.pull_full_properties()
+ resource_obj = self._try_optimized_lookup(filter_args)
+ if resource_obj:
+ resource_obj_list.append(resource_obj)
+ # It already has full properties
+ else:
+ query_parms, client_filters = self._divide_filter_args(filter_args)
+
+ resources_name = self._profile_type + '-activation-profiles'
+ uri = '{}/{}{}'.format(self.cpc.uri, resources_name, query_parms)
+
+ result = self.session.get(uri)
+ if result:
+ props_list = result[resources_name]
+ for props in props_list:
+
+ resource_obj = self.resource_class(
+ manager=self,
+ uri=props[self._uri_prop],
+ name=props.get(self._name_prop, None),
+ properties=props)
+
+ if self._matches_filters(resource_obj, client_filters):
+ resource_obj_list.append(resource_obj)
+ if full_properties:
+ resource_obj.pull_full_properties()
self._name_uri_cache.update_from(resource_obj_list)
return resource_obj_list
diff --git a/zhmcclient/_adapter.py b/zhmcclient/_adapter.py
index bff2f35..4f1e8c3 100644
--- a/zhmcclient/_adapter.py
+++ b/zhmcclient/_adapter.py
@@ -112,6 +112,8 @@ class AdapterManager(BaseManager):
resource_class=Adapter,
session=cpc.manager.session,
parent=cpc,
+ base_uri='/api/adapters',
+ oid_prop='object-id',
uri_prop='object-uri',
name_prop='name',
query_props=query_props)
@@ -160,27 +162,32 @@ class AdapterManager(BaseManager):
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
- query_parms, client_filters = self._divide_filter_args(filter_args)
-
- resources_name = 'adapters'
- uri = '{}/{}{}'.format(self.cpc.uri, resources_name, query_parms)
-
resource_obj_list = []
- result = self.session.get(uri)
- if result:
- props_list = result[resources_name]
- for props in props_list:
-
- resource_obj = self.resource_class(
- manager=self,
- uri=props[self._uri_prop],
- name=props.get(self._name_prop, None),
- properties=props)
-
- if self._matches_filters(resource_obj, client_filters):
- resource_obj_list.append(resource_obj)
- if full_properties:
- resource_obj.pull_full_properties()
+ resource_obj = self._try_optimized_lookup(filter_args)
+ if resource_obj:
+ resource_obj_list.append(resource_obj)
+ # It already has full properties
+ else:
+ query_parms, client_filters = self._divide_filter_args(filter_args)
+
+ resources_name = 'adapters'
+ uri = '{}/{}{}'.format(self.cpc.uri, resources_name, query_parms)
+
+ result = self.session.get(uri)
+ if result:
+ props_list = result[resources_name]
+ for props in props_list:
+
+ resource_obj = self.resource_class(
+ manager=self,
+ uri=props[self._uri_prop],
+ name=props.get(self._name_prop, None),
+ properties=props)
+
+ if self._matches_filters(resource_obj, client_filters):
+ resource_obj_list.append(resource_obj)
+ if full_properties:
+ resource_obj.pull_full_properties()
self._name_uri_cache.update_from(resource_obj_list)
return resource_obj_list
@@ -242,6 +249,22 @@ class Adapter(BaseResource):
(in this case, :class:`~zhmcclient.AdapterManager`).
"""
+ # Name of property for port URIs, dependent on adapter family
+ port_uris_prop_by_family = {
+ 'ficon': 'storage-port-uris',
+ 'osa': 'network-port-uris',
+ 'roce': 'network-port-uris',
+ 'hipersockets': 'network-port-uris',
+ }
+
+ # URI segment for port URIs, dependent on adapter family
+ port_uri_segment_by_family = {
+ 'ficon': 'storage-ports',
+ 'osa': 'network-ports',
+ 'roce': 'network-ports',
+ 'hipersockets': 'network-ports',
+ }
+
def __init__(self, manager, uri, name=None, properties=None):
# This function should not go into the docs.
# manager (:class:`~zhmcclient.AdapterManager`):
@@ -259,6 +282,8 @@ class Adapter(BaseResource):
super(Adapter, self).__init__(manager, uri, name, properties)
# The manager objects for child resources (with lazy initialization):
self._ports = None
+ self._port_uris_prop = None
+ self._port_uri_segment = None
@property
def ports(self):
@@ -271,6 +296,39 @@ class Adapter(BaseResource):
self._ports = PortManager(self)
return self._ports
+ @property
+ def port_uris_prop(self):
+ """
+ :term:`string`: Name of adapter property that specifies the adapter
+ port URIs, or the empty string ('') for adapters without ports.
+
+ For example, 'network-port-uris' for a network adapter.
+ """
+ if self._port_uris_prop is None:
+ family = self.get_property('adapter-family')
+ try:
+ self._port_uris_prop = self.port_uris_prop_by_family[family]
+ except KeyError:
+ self._port_uris_prop = ''
+ return self._port_uris_prop
+
+ @property
+ def port_uri_segment(self):
+ """
+ :term:`string`: Adapter type specific URI segment for adapter port
+ URIs, or the empty string ('') for adapters without ports.
+
+ For example, 'network-ports' for a network adapter.
+ """
+ if self._port_uri_segment is None:
+ family = self.get_property('adapter-family')
+ try:
+ self._port_uri_segment = self.port_uri_segment_by_family[
+ family]
+ except KeyError:
+ self._port_uri_segment = ''
+ return self._port_uri_segment
+
@logged_api_call
def delete(self):
"""
diff --git a/zhmcclient/_cpc.py b/zhmcclient/_cpc.py
index 1713c7e..2428622 100644
--- a/zhmcclient/_cpc.py
+++ b/zhmcclient/_cpc.py
@@ -89,6 +89,8 @@ class CpcManager(BaseManager):
resource_class=Cpc,
session=client.session,
parent=None,
+ base_uri='/api/cpcs',
+ oid_prop='object-id',
uri_prop='object-uri',
name_prop='name',
query_props=query_props)
@@ -128,27 +130,32 @@ class CpcManager(BaseManager):
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
- query_parms, client_filters = self._divide_filter_args(filter_args)
-
- resources_name = 'cpcs'
- uri = '/api/{}{}'.format(resources_name, query_parms)
-
resource_obj_list = []
- result = self.session.get(uri)
- if result:
- props_list = result[resources_name]
- for props in props_list:
-
- resource_obj = self.resource_class(
- manager=self,
- uri=props[self._uri_prop],
- name=props.get(self._name_prop, None),
- properties=props)
-
- if self._matches_filters(resource_obj, client_filters):
- resource_obj_list.append(resource_obj)
- if full_properties:
- resource_obj.pull_full_properties()
+ resource_obj = self._try_optimized_lookup(filter_args)
+ if resource_obj:
+ resource_obj_list.append(resource_obj)
+ # It already has full properties
+ else:
+ query_parms, client_filters = self._divide_filter_args(filter_args)
+
+ resources_name = 'cpcs'
+ uri = '/api/{}{}'.format(resources_name, query_parms)
+
+ result = self.session.get(uri)
+ if result:
+ props_list = result[resources_name]
+ for props in props_list:
+
+ resource_obj = self.resource_class(
+ manager=self,
+ uri=props[self._uri_prop],
+ name=props.get(self._name_prop, None),
+ properties=props)
+
+ if self._matches_filters(resource_obj, client_filters):
+ resource_obj_list.append(resource_obj)
+ if full_properties:
+ resource_obj.pull_full_properties()
self._name_uri_cache.update_from(resource_obj_list)
return resource_obj_list
diff --git a/zhmcclient/_hba.py b/zhmcclient/_hba.py
index e2b7f1f..0bf0d63 100644
--- a/zhmcclient/_hba.py
+++ b/zhmcclient/_hba.py
@@ -61,6 +61,8 @@ class HbaManager(BaseManager):
resource_class=Hba,
session=partition.manager.session,
parent=partition,
+ base_uri='{}/hbas'.format(partition.uri),
+ oid_prop='element-id',
uri_prop='element-uri',
name_prop='name',
query_props=[],
diff --git a/zhmcclient/_lpar.py b/zhmcclient/_lpar.py
index 80df8f5..d794d39 100644
--- a/zhmcclient/_lpar.py
+++ b/zhmcclient/_lpar.py
@@ -72,6 +72,8 @@ class LparManager(BaseManager):
resource_class=Lpar,
session=cpc.manager.session,
parent=cpc,
+ base_uri='/api/logical-partitions',
+ oid_prop='object-id',
uri_prop='object-uri',
name_prop='name',
query_props=query_props)
@@ -120,27 +122,32 @@ class LparManager(BaseManager):
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
- query_parms, client_filters = self._divide_filter_args(filter_args)
-
- resources_name = 'logical-partitions'
- uri = '{}/{}{}'.format(self.cpc.uri, resources_name, query_parms)
-
resource_obj_list = []
- result = self.session.get(uri)
- if result:
- props_list = result[resources_name]
- for props in props_list:
-
- resource_obj = self.resource_class(
- manager=self,
- uri=props[self._uri_prop],
- name=props.get(self._name_prop, None),
- properties=props)
-
- if self._matches_filters(resource_obj, client_filters):
- resource_obj_list.append(resource_obj)
- if full_properties:
- resource_obj.pull_full_properties()
+ resource_obj = self._try_optimized_lookup(filter_args)
+ if resource_obj:
+ resource_obj_list.append(resource_obj)
+ # It already has full properties
+ else:
+ query_parms, client_filters = self._divide_filter_args(filter_args)
+
+ resources_name = 'logical-partitions'
+ uri = '{}/{}{}'.format(self.cpc.uri, resources_name, query_parms)
+
+ result = self.session.get(uri)
+ if result:
+ props_list = result[resources_name]
+ for props in props_list:
+
+ resource_obj = self.resource_class(
+ manager=self,
+ uri=props[self._uri_prop],
+ name=props.get(self._name_prop, None),
+ properties=props)
+
+ if self._matches_filters(resource_obj, client_filters):
+ resource_obj_list.append(resource_obj)
+ if full_properties:
+ resource_obj.pull_full_properties()
self._name_uri_cache.update_from(resource_obj_list)
return resource_obj_list
diff --git a/zhmcclient/_manager.py b/zhmcclient/_manager.py
index 5b6ccf2..8a11ac5 100644
--- a/zhmcclient/_manager.py
+++ b/zhmcclient/_manager.py
@@ -34,7 +34,7 @@ import warnings
from requests.utils import quote
from ._logging import get_logger, logged_api_call
-from ._exceptions import NotFound, NoUniqueMatch
+from ._exceptions import NotFound, NoUniqueMatch, HTTPError
from ._utils import repr_list
__all__ = ['BaseManager']
@@ -182,8 +182,9 @@ class BaseManager(object):
documented and may change incompatibly.
"""
- def __init__(self, resource_class, session, parent, uri_prop, name_prop,
- query_props, list_has_name=True):
+ def __init__(self, resource_class, session, parent, base_uri,
+ oid_prop, uri_prop, name_prop, query_props,
+ list_has_name=True):
# This method intentionally has no docstring, because it is internal.
#
# Parameters:
@@ -197,6 +198,16 @@ class BaseManager(object):
# Parent resource defining the scope for this manager.
# `None`, if the manager has no parent, i.e. when it manages
# top-level resources (e.g. CPC).
+ # base_uri (string):
+ # Base URI of the resources of this manager. The base URI has no
+ # trailing slash and becomes the resource URI by appending '/' and
+ # the value of the property specified in 'oid_prop'.
+ # Must not be `None`.
+ # oid_prop (string):
+ # Name of the resource property whose value is appended to the
+ # base URI to form the resource URI (e.g. 'object-id' or
+ # 'element-id').
+ # Must not be `None`.
# uri_prop (string):
# Name of the resource property that is the canonical URI path of
# the resource (e.g. 'object-uri' or 'element-uri').
@@ -222,6 +233,8 @@ class BaseManager(object):
# so we test those that are not surfaced through the init code:
assert resource_class is not None
assert session is not None
+ assert base_uri is not None
+ assert oid_prop is not None
assert uri_prop is not None
assert name_prop is not None
assert query_props is not None
@@ -229,6 +242,8 @@ class BaseManager(object):
self._resource_class = resource_class
self._session = session
self._parent = parent
+ self._base_uri = base_uri
+ self._oid_prop = oid_prop
self._uri_prop = uri_prop
self._name_prop = name_prop
self._query_props = query_props
@@ -247,6 +262,8 @@ class BaseManager(object):
" _resource_class = {_resource_class!r}\n"
" _session = {_session_classname} at 0x{_session_id:08x}\n"
" _parent = {_parent_classname} at 0x{_parent_id:08x}\n"
+ " _base_uri = {_base_uri!r}\n"
+ " _oid_prop = {_oid_prop!r}\n"
" _uri_prop = {_uri_prop!r}\n"
" _name_prop = {_name_prop!r}\n"
" _query_props = {_query_props}\n"
@@ -260,6 +277,8 @@ class BaseManager(object):
_session_id=id(self._session),
_parent_classname=self._parent.__class__.__name__,
_parent_id=id(self._parent),
+ _base_uri=self._base_uri,
+ _oid_prop=self._oid_prop,
_uri_prop=self._uri_prop,
_name_prop=self._name_prop,
_query_props=repr_list(self._query_props, indent=2),
@@ -300,6 +319,59 @@ class BaseManager(object):
"""
self._name_uri_cache.invalidate()
+ def _try_optimized_lookup(self, filter_args):
+ """
+ Try to optimize the lookup by checking whether the filter arguments
+ specify the property that is used as the last segment in the resource
+ URI, with a plain string as match value (i.e. not using regular
+ expression matching).
+
+ If so, the lookup is performed by constructing the resource URI from
+ the specified filter argument, and by issuing a Get Properties
+ operation on that URI, returning a single resource object.
+
+ Parameters:
+
+ filter_args (dict):
+ Filter arguments. For details, see :ref:`Filtering`.
+
+ Returns:
+
+ resource object, or `None` if the optimization was not possible.
+ """
+ if filter_args is not None and \
+ len(filter_args) == 1 and \
+ self._oid_prop in filter_args:
+
+ oid_match = filter_args[self._oid_prop]
+ if re.match(r'^[a-zA-Z0-9_\-]+$', oid_match):
+ # The match string is a plain string (not a reg.expression)
+
+ # Construct the resource URI from the filter property
+ # and issue a Get <Resource> Properties on that URI
+
+ uri = self._base_uri + '/' + oid_match
+
+ try:
+ props = self.session.get(uri)
+ except HTTPError as exc:
+ if exc.http_status == 404 and exc.reason == 1:
+ # No such resource
+ return None
+ raise
+
+ resource_obj = self.resource_class(
+ manager=self,
+ uri=props[self._uri_prop],
+ name=props.get(self._name_prop, None),
+ properties=props)
+
+ resource_obj._full_properties = True
+
+ return resource_obj
+
+ return None
+
def _divide_filter_args(self, filter_args):
"""
Divide the filter arguments into filter query parameters for filtering
diff --git a/zhmcclient/_nic.py b/zhmcclient/_nic.py
index 63a7c39..480b11d 100644
--- a/zhmcclient/_nic.py
+++ b/zhmcclient/_nic.py
@@ -62,6 +62,8 @@ class NicManager(BaseManager):
resource_class=Nic,
session=partition.manager.session,
parent=partition,
+ base_uri='{}/nics'.format(partition.uri),
+ oid_prop='element-id',
uri_prop='element-uri',
name_prop='name',
query_props=[],
diff --git a/zhmcclient/_partition.py b/zhmcclient/_partition.py
index f29b1b9..a66ae40 100644
--- a/zhmcclient/_partition.py
+++ b/zhmcclient/_partition.py
@@ -81,6 +81,8 @@ class PartitionManager(BaseManager):
resource_class=Partition,
session=cpc.manager.session,
parent=cpc,
+ base_uri='/api/partitions',
+ oid_prop='object-id',
uri_prop='object-uri',
name_prop='name',
query_props=query_props)
@@ -130,27 +132,34 @@ class PartitionManager(BaseManager):
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
- query_parms, client_filters = self._divide_filter_args(filter_args)
-
- resources_name = 'partitions'
- uri = '{}/{}{}'.format(self.cpc.uri, resources_name, query_parms)
resource_obj_list = []
- result = self.session.get(uri)
- if result:
- props_list = result[resources_name]
- for props in props_list:
-
- resource_obj = self.resource_class(
- manager=self,
- uri=props[self._uri_prop],
- name=props.get(self._name_prop, None),
- properties=props)
-
- if self._matches_filters(resource_obj, client_filters):
- resource_obj_list.append(resource_obj)
- if full_properties:
- resource_obj.pull_full_properties()
+
+ resource_obj = self._try_optimized_lookup(filter_args)
+ if resource_obj:
+ resource_obj_list.append(resource_obj)
+ # It already has full properties
+ else:
+ query_parms, client_filters = self._divide_filter_args(filter_args)
+
+ resources_name = 'partitions'
+ uri = '{}/{}{}'.format(self.cpc.uri, resources_name, query_parms)
+
+ result = self.session.get(uri)
+ if result:
+ props_list = result[resources_name]
+ for props in props_list:
+
+ resource_obj = self.resource_class(
+ manager=self,
+ uri=props[self._uri_prop],
+ name=props.get(self._name_prop, None),
+ properties=props)
+
+ if self._matches_filters(resource_obj, client_filters):
+ resource_obj_list.append(resource_obj)
+ if full_properties:
+ resource_obj.pull_full_properties()
self._name_uri_cache.update_from(resource_obj_list)
return resource_obj_list
diff --git a/zhmcclient/_port.py b/zhmcclient/_port.py
index f670077..1cfccc0 100644
--- a/zhmcclient/_port.py
+++ b/zhmcclient/_port.py
@@ -56,6 +56,11 @@ class PortManager(BaseManager):
resource_class=Port,
session=adapter.manager.session,
parent=adapter,
+ base_uri='',
+ # TODO: Re-enable the following when unit/test_hba.py has been
+ # converted to using the zhmcclient mock support:
+ # base_uri='{}/{}'.format(adapter.uri, adapter.port_uri_segment),
+ oid_prop='element-id',
uri_prop='element-uri',
name_prop='name',
query_props=[],
@@ -74,6 +79,8 @@ class PortManager(BaseManager):
"""
List the Ports of this Adapter.
+ If the adapter does not have any ports, an empty list is returned.
+
Authorization requirements:
* Object-access permission to this Adapter.
@@ -104,35 +111,32 @@ class PortManager(BaseManager):
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
+ uris_prop = self.adapter.port_uris_prop
+ if not uris_prop:
+ # Adapter does not have any ports
+ return []
+
+ uris = self.adapter.get_property(uris_prop)
+ assert uris is not None
+
+ # TODO: Remove the following circumvention once fixed.
+ # The following line circumvents a bug for FCP adapters that sometimes
+ # causes duplicate URIs to show up in this property:
+ uris = list(set(uris))
+
resource_obj_list = []
- storage_family = ('ficon')
- network_family = ('osa', 'roce', 'hipersockets')
- if self.adapter.get_property('adapter-family') in storage_family:
- uris = self.adapter.get_property('storage-port-uris')
-
- # TODO: Remove the following circumvention once fixed.
- # The following line circumvents a bug that sometimes causes
- # duplicate URIs to show up in this property, by reducing the
- # array to the unique URIs:
- uris = list(set(uris))
-
- elif self.adapter.get_property('adapter-family') in network_family:
- uris = self.adapter.get_property('network-port-uris')
- else:
- return resource_obj_list
- if uris:
- for uri in uris:
-
- resource_obj = self.resource_class(
- manager=self,
- uri=uri,
- name=None,
- properties=None)
-
- if self._matches_filters(resource_obj, filter_args):
- resource_obj_list.append(resource_obj)
- if full_properties:
- resource_obj.pull_full_properties()
+ for uri in uris:
+
+ resource_obj = self.resource_class(
+ manager=self,
+ uri=uri,
+ name=None,
+ properties=None)
+
+ if self._matches_filters(resource_obj, filter_args):
+ resource_obj_list.append(resource_obj)
+ if full_properties:
+ resource_obj.pull_full_properties()
self._name_uri_cache.update_from(resource_obj_list)
return resource_obj_list
diff --git a/zhmcclient/_virtual_function.py b/zhmcclient/_virtual_function.py
index 81beecd..a4738d5 100644
--- a/zhmcclient/_virtual_function.py
+++ b/zhmcclient/_virtual_function.py
@@ -60,6 +60,8 @@ class VirtualFunctionManager(BaseManager):
resource_class=VirtualFunction,
session=partition.manager.session,
parent=partition,
+ base_uri='{}/virtual-functions'.format(partition.uri),
+ oid_prop='element-id',
uri_prop='element-uri',
name_prop='name',
query_props=[],
diff --git a/zhmcclient/_virtual_switch.py b/zhmcclient/_virtual_switch.py
index df8695a..7efbd74 100644
--- a/zhmcclient/_virtual_switch.py
+++ b/zhmcclient/_virtual_switch.py
@@ -72,6 +72,8 @@ class VirtualSwitchManager(BaseManager):
resource_class=VirtualSwitch,
session=cpc.manager.session,
parent=cpc,
+ base_uri='/api/virtual-switches',
+ oid_prop='object-id',
uri_prop='object-uri',
name_prop='name',
query_props=query_props)
@@ -121,27 +123,32 @@ class VirtualSwitchManager(BaseManager):
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
- query_parms, client_filters = self._divide_filter_args(filter_args)
-
- resources_name = 'virtual-switches'
- uri = '{}/{}{}'.format(self.cpc.uri, resources_name, query_parms)
-
resource_obj_list = []
- result = self.session.get(uri)
- if result:
- props_list = result[resources_name]
- for props in props_list:
-
- resource_obj = self.resource_class(
- manager=self,
- uri=props[self._uri_prop],
- name=props.get(self._name_prop, None),
- properties=props)
-
- if self._matches_filters(resource_obj, client_filters):
- resource_obj_list.append(resource_obj)
- if full_properties:
- resource_obj.pull_full_properties()
+ resource_obj = self._try_optimized_lookup(filter_args)
+ if resource_obj:
+ resource_obj_list.append(resource_obj)
+ # It already has full properties
+ else:
+ query_parms, client_filters = self._divide_filter_args(filter_args)
+
+ resources_name = 'virtual-switches'
+ uri = '{}/{}{}'.format(self.cpc.uri, resources_name, query_parms)
+
+ result = self.session.get(uri)
+ if result:
+ props_list = result[resources_name]
+ for props in props_list:
+
+ resource_obj = self.resource_class(
+ manager=self,
+ uri=props[self._uri_prop],
+ name=props.get(self._name_prop, None),
+ properties=props)
+
+ if self._matches_filters(resource_obj, client_filters):
+ resource_obj_list.append(resource_obj)
+ if full_properties:
+ resource_obj.pull_full_properties()
self._name_uri_cache.update_from(resource_obj_list)
return resource_obj_list
| Performance issue with "find()" when filtering on "object-id"
Is "find" method is iterates over items?
For example
cpc.adapters.find(**{'object-id': 'xxxxxxxx'})
If it iterates over items then we should have DPM API to filter item for better performance.
| zhmcclient/python-zhmcclient | diff --git a/tests/unit/test_adapter.py b/tests/unit/test_adapter.py
index 98c1518..e126499 100755
--- a/tests/unit/test_adapter.py
+++ b/tests/unit/test_adapter.py
@@ -276,7 +276,7 @@ class AdapterTests(unittest.TestCase):
self.add_standard_osa()
self.add_standard_hipersocket()
filter_args = {
- 'name': self.osa1_name + 'foo',
+ 'name': self.osa1_name + '_notfound',
}
adapters = self.cpc.adapters.list(filter_args=filter_args)
@@ -304,7 +304,7 @@ class AdapterTests(unittest.TestCase):
self.add_standard_osa()
self.add_standard_hipersocket()
filter_args = {
- 'object-id': self.osa1_id + 'foo',
+ 'object-id': self.osa1_id + '_notfound',
}
adapters = self.cpc.adapters.list(filter_args=filter_args)
@@ -336,7 +336,7 @@ class AdapterTests(unittest.TestCase):
self.add_standard_hipersocket()
filter_args = {
'name': self.osa1_name,
- 'object-id': self.osa1_id + 'foo',
+ 'object-id': self.osa1_id + '_notfound',
}
adapters = self.cpc.adapters.list(filter_args=filter_args)
@@ -350,7 +350,7 @@ class AdapterTests(unittest.TestCase):
self.add_standard_osa()
self.add_standard_hipersocket()
filter_args = {
- 'name': self.osa1_name + 'foo',
+ 'name': self.osa1_name + '_notfound',
'object-id': self.osa1_id,
}
@@ -365,8 +365,8 @@ class AdapterTests(unittest.TestCase):
self.add_standard_osa()
self.add_standard_hipersocket()
filter_args = {
- 'name': self.osa1_name + 'foo',
- 'object-id': self.osa1_id + 'foo',
+ 'name': self.osa1_name + '_notfound',
+ 'object-id': self.osa1_id + '_notfound',
}
adapters = self.cpc.adapters.list(filter_args=filter_args)
@@ -379,11 +379,9 @@ class AdapterTests(unittest.TestCase):
self.add_standard_osa()
self.add_standard_hipersocket()
filter_args = {
- 'name': [self.osa1_name, self.hs2_name + 'foo'],
+ 'name': [self.osa1_name, self.hs2_name + '_notfound'],
}
- # import pdb; pdb.set_trace()
-
adapters = self.cpc.adapters.list(filter_args=filter_args)
self.assertEqual(len(adapters), 1)
@@ -396,7 +394,7 @@ class AdapterTests(unittest.TestCase):
self.add_standard_osa()
self.add_standard_hipersocket()
filter_args = {
- 'name': [self.hs2_name + 'foo', self.osa1_name],
+ 'name': [self.hs2_name + '_notfound', self.osa1_name],
}
adapters = self.cpc.adapters.list(filter_args=filter_args)
diff --git a/tests/unit/test_manager.py b/tests/unit/test_manager.py
index 2e23d15..54085aa 100755
--- a/tests/unit/test_manager.py
+++ b/tests/unit/test_manager.py
@@ -55,6 +55,8 @@ class MyManager(BaseManager):
resource_class=MyResource,
session=session,
parent=None, # a top-level resource
+ base_uri='/api/myresources/',
+ oid_prop='fake_object_id',
uri_prop='fake_uri_prop',
name_prop='fake_name_prop',
query_props=[])
@@ -165,6 +167,8 @@ class Manager1Tests(unittest.TestCase):
resource_class=MyResource,
session=self.session,
parent=None, # a top-level resource
+ base_uri='/api/myresources/',
+ oid_prop='fake_object_id',
uri_prop='fake_uri_prop',
name_prop='fake_name_prop',
query_props=[])
diff --git a/tests/unit/test_port.py b/tests/unit/test_port.py
index 7219c21..ec0cbec 100755
--- a/tests/unit/test_port.py
+++ b/tests/unit/test_port.py
@@ -143,9 +143,7 @@ class PortTests(unittest.TestCase):
self.assertEqual(len(ports), len(uris))
for idx, port in enumerate(ports):
- self.assertEqual(
- port.properties['element-uri'],
- uris[idx])
+ self.assertTrue(port.properties['element-uri'] in uris)
self.assertFalse(port.full_properties)
self.assertEqual(port.manager, port_mgr)
diff --git a/tests/unit/test_resource.py b/tests/unit/test_resource.py
index 6cc62cc..110c4d3 100755
--- a/tests/unit/test_resource.py
+++ b/tests/unit/test_resource.py
@@ -52,6 +52,8 @@ class MyManager(BaseManager):
resource_class=MyResource,
session=session,
parent=None, # a top-level resource
+ base_uri='/api/myresources/',
+ oid_prop='fake_object_id',
uri_prop='fake-uri-prop',
name_prop='fake-name-prop',
query_props=['qp1', 'qp2'])
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 11
} | 0.13 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": [
"requirements.txt",
"dev-requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
anyio==3.6.2
argon2-cffi==21.3.0
argon2-cffi-bindings==21.2.0
args==0.1.0
async-generator==1.10
attrs==22.2.0
Babel==2.11.0
backcall==0.2.0
backports-abc==0.5
backports.functools-lru-cache==1.6.6
backports.shutil-get-terminal-size==1.0.0
bleach==4.1.0
certifi==2021.5.30
cffi==1.15.1
charset-normalizer==2.0.12
click==8.0.4
click-repl==0.3.0
click-spinner==0.1.10
clint==0.5.1
colorama==0.4.5
comm==0.1.4
configparser==5.2.0
contextvars==2.4
coverage==6.2
cryptography==40.0.2
dataclasses==0.8
decorator==5.1.1
defusedxml==0.7.1
docopt==0.6.2
docutils==0.18.1
entrypoints==0.4
enum34==1.1.10
flake8==3.9.2
gitdb==4.0.9
gitdb2==4.0.2
GitPython==3.1.18
html5lib==1.1
idna==3.10
imagesize==1.4.1
immutables==0.19
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
ipykernel==5.5.6
ipython==7.16.3
ipython-genutils==0.2.0
ipywidgets==7.8.5
isort==5.10.1
jedi==0.17.2
jeepney==0.7.1
Jinja2==3.0.3
json5==0.9.16
jsonschema==3.2.0
jupyter==1.1.1
jupyter-client==7.1.2
jupyter-console==6.4.3
jupyter-core==4.9.2
jupyter-server==1.13.1
jupyterlab==3.2.9
jupyterlab-pygments==0.1.2
jupyterlab-server==2.10.3
jupyterlab_widgets==1.1.11
keyring==23.4.1
lazy-object-proxy==1.7.1
MarkupSafe==2.0.1
mccabe==0.6.1
mistune==0.8.4
mock==5.2.0
nbclassic==0.3.5
nbclient==0.5.9
nbconvert==6.0.7
nbformat==5.1.3
nest-asyncio==1.6.0
nose==1.3.7
notebook==6.4.10
numpy==1.19.5
packaging==21.3
pandocfilters==1.5.1
parso==0.7.1
pathlib2==2.3.7.post1
pbr==6.1.1
pexpect==4.9.0
pickleshare==0.7.5
pkginfo==1.10.0
pluggy==1.0.0
progressbar2==3.55.0
prometheus-client==0.17.1
prompt-toolkit==3.0.36
ptyprocess==0.7.0
py==1.11.0
pycodestyle==2.7.0
pycparser==2.21
pyflakes==2.3.1
Pygments==2.14.0
pyparsing==3.1.4
pyrsistent==0.18.0
pytest==7.0.1
pytest-cov==4.0.0
python-coveralls==2.9.3
python-dateutil==2.9.0.post0
python-utils==3.5.2
pytz==2025.2
PyYAML==6.0.1
pyzmq==25.1.2
qtconsole==5.2.2
QtPy==2.0.1
readme-renderer==34.0
requests==2.27.1
requests-mock==1.12.1
requests-toolbelt==1.0.0
rfc3986==1.5.0
SecretStorage==3.3.3
Send2Trash==1.8.3
simplegeneric==0.8.1
singledispatch==3.7.0
six==1.17.0
smmap==5.0.0
smmap2==3.0.1
sniffio==1.2.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-git==11.0.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
stomp.py==8.1.0
tabulate==0.8.10
terminado==0.12.1
testfixtures==7.2.2
testpath==0.6.0
tomli==1.2.3
tornado==6.1
tqdm==4.64.1
traitlets==4.3.3
twine==3.8.0
typing_extensions==4.1.1
urllib3==1.26.20
wcwidth==0.2.13
webencodings==0.5.1
websocket-client==1.3.1
widgetsnbextension==3.6.10
wrapt==1.16.0
-e git+https://github.com/zhmcclient/python-zhmcclient.git@b6f8b75700b71d27da0303047e3c237623bca38a#egg=zhmcclient
zipp==3.6.0
| name: python-zhmcclient
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- anyio==3.6.2
- argon2-cffi==21.3.0
- argon2-cffi-bindings==21.2.0
- args==0.1.0
- async-generator==1.10
- attrs==22.2.0
- babel==2.11.0
- backcall==0.2.0
- backports-abc==0.5
- backports-functools-lru-cache==1.6.6
- backports-shutil-get-terminal-size==1.0.0
- bleach==4.1.0
- cffi==1.15.1
- charset-normalizer==2.0.12
- click==8.0.4
- click-repl==0.3.0
- click-spinner==0.1.10
- clint==0.5.1
- colorama==0.4.5
- comm==0.1.4
- configparser==5.2.0
- contextvars==2.4
- coverage==6.2
- cryptography==40.0.2
- dataclasses==0.8
- decorator==5.1.1
- defusedxml==0.7.1
- docopt==0.6.2
- docutils==0.18.1
- entrypoints==0.4
- enum34==1.1.10
- flake8==3.9.2
- gitdb==4.0.9
- gitdb2==4.0.2
- gitpython==3.1.18
- html5lib==1.1
- idna==3.10
- imagesize==1.4.1
- immutables==0.19
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- ipykernel==5.5.6
- ipython==7.16.3
- ipython-genutils==0.2.0
- ipywidgets==7.8.5
- isort==5.10.1
- jedi==0.17.2
- jeepney==0.7.1
- jinja2==3.0.3
- json5==0.9.16
- jsonschema==3.2.0
- jupyter==1.1.1
- jupyter-client==7.1.2
- jupyter-console==6.4.3
- jupyter-core==4.9.2
- jupyter-server==1.13.1
- jupyterlab==3.2.9
- jupyterlab-pygments==0.1.2
- jupyterlab-server==2.10.3
- jupyterlab-widgets==1.1.11
- keyring==23.4.1
- lazy-object-proxy==1.7.1
- markupsafe==2.0.1
- mccabe==0.6.1
- mistune==0.8.4
- mock==5.2.0
- nbclassic==0.3.5
- nbclient==0.5.9
- nbconvert==6.0.7
- nbformat==5.1.3
- nest-asyncio==1.6.0
- nose==1.3.7
- notebook==6.4.10
- numpy==1.19.5
- packaging==21.3
- pandocfilters==1.5.1
- parso==0.7.1
- pathlib2==2.3.7.post1
- pbr==6.1.1
- pexpect==4.9.0
- pickleshare==0.7.5
- pkginfo==1.10.0
- pluggy==1.0.0
- progressbar2==3.55.0
- prometheus-client==0.17.1
- prompt-toolkit==3.0.36
- ptyprocess==0.7.0
- py==1.11.0
- pycodestyle==2.7.0
- pycparser==2.21
- pyflakes==2.3.1
- pygments==2.14.0
- pyparsing==3.1.4
- pyrsistent==0.18.0
- pytest==7.0.1
- pytest-cov==4.0.0
- python-coveralls==2.9.3
- python-dateutil==2.9.0.post0
- python-utils==3.5.2
- pytz==2025.2
- pyyaml==6.0.1
- pyzmq==25.1.2
- qtconsole==5.2.2
- qtpy==2.0.1
- readme-renderer==34.0
- requests==2.27.1
- requests-mock==1.12.1
- requests-toolbelt==1.0.0
- rfc3986==1.5.0
- secretstorage==3.3.3
- send2trash==1.8.3
- simplegeneric==0.8.1
- singledispatch==3.7.0
- six==1.17.0
- smmap==5.0.0
- smmap2==3.0.1
- sniffio==1.2.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-git==11.0.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- stomp-py==8.1.0
- tabulate==0.8.10
- terminado==0.12.1
- testfixtures==7.2.2
- testpath==0.6.0
- tomli==1.2.3
- tornado==6.1
- tqdm==4.64.1
- traitlets==4.3.3
- twine==3.8.0
- typing-extensions==4.1.1
- urllib3==1.26.20
- wcwidth==0.2.13
- webencodings==0.5.1
- websocket-client==1.3.1
- widgetsnbextension==3.6.10
- wrapt==1.16.0
- zipp==3.6.0
prefix: /opt/conda/envs/python-zhmcclient
| [
"tests/unit/test_manager.py::Manager1Tests::test_flush",
"tests/unit/test_manager.py::Manager1Tests::test_init_properties",
"tests/unit/test_manager.py::Manager1Tests::test_invalidate_cache",
"tests/unit/test_manager.py::Manager1Tests::test_list_not_implemented",
"tests/unit/test_manager.py::Manager1Tests::test_repr",
"tests/unit/test_manager.py::Manager2Tests::test_find_by_name_none",
"tests/unit/test_manager.py::Manager2Tests::test_find_by_name_one",
"tests/unit/test_manager.py::Manager2Tests::test_find_int_none",
"tests/unit/test_manager.py::Manager2Tests::test_find_int_one",
"tests/unit/test_manager.py::Manager2Tests::test_find_int_two",
"tests/unit/test_manager.py::Manager2Tests::test_find_name_none",
"tests/unit/test_manager.py::Manager2Tests::test_find_name_one",
"tests/unit/test_manager.py::Manager2Tests::test_find_str_none",
"tests/unit/test_manager.py::Manager2Tests::test_find_str_one",
"tests/unit/test_manager.py::Manager2Tests::test_find_str_two",
"tests/unit/test_manager.py::Manager2Tests::test_findall_int_none",
"tests/unit/test_manager.py::Manager2Tests::test_findall_int_one",
"tests/unit/test_manager.py::Manager2Tests::test_findall_int_two",
"tests/unit/test_manager.py::Manager2Tests::test_findall_name_none",
"tests/unit/test_manager.py::Manager2Tests::test_findall_name_one",
"tests/unit/test_manager.py::Manager2Tests::test_findall_str_none",
"tests/unit/test_manager.py::Manager2Tests::test_findall_str_one",
"tests/unit/test_manager.py::Manager2Tests::test_findall_str_one_and",
"tests/unit/test_manager.py::Manager2Tests::test_findall_str_two",
"tests/unit/test_manager.py::Manager2Tests::test_findall_str_two_or",
"tests/unit/test_manager.py::NameUriCacheTests::test_delete_existing",
"tests/unit/test_manager.py::NameUriCacheTests::test_delete_non_existing",
"tests/unit/test_manager.py::NameUriCacheTests::test_delete_none",
"tests/unit/test_manager.py::NameUriCacheTests::test_get_auto_invalidate",
"tests/unit/test_manager.py::NameUriCacheTests::test_get_manual_invalidate",
"tests/unit/test_manager.py::NameUriCacheTests::test_get_no_invalidate",
"tests/unit/test_manager.py::NameUriCacheTests::test_get_non_existing",
"tests/unit/test_manager.py::NameUriCacheTests::test_initial",
"tests/unit/test_manager.py::NameUriCacheTests::test_refresh_empty",
"tests/unit/test_manager.py::NameUriCacheTests::test_refresh_populated",
"tests/unit/test_manager.py::NameUriCacheTests::test_update_empty",
"tests/unit/test_manager.py::NameUriCacheTests::test_update_empty_empty",
"tests/unit/test_manager.py::NameUriCacheTests::test_update_empty_none",
"tests/unit/test_manager.py::NameUriCacheTests::test_update_from_empty",
"tests/unit/test_manager.py::NameUriCacheTests::test_update_from_populated_modify_name",
"tests/unit/test_manager.py::NameUriCacheTests::test_update_populated_modify",
"tests/unit/test_manager.py::NameUriCacheTests::test_update_populated_new",
"tests/unit/test_resource.py::InitTests::test_empty_name",
"tests/unit/test_resource.py::InitTests::test_empty_no_name",
"tests/unit/test_resource.py::InitTests::test_invalid_type",
"tests/unit/test_resource.py::InitTests::test_prop_case",
"tests/unit/test_resource.py::InitTests::test_repr",
"tests/unit/test_resource.py::InitTests::test_simple",
"tests/unit/test_resource.py::InitTests::test_str",
"tests/unit/test_resource.py::PropertySetTests::test_add_to_empty",
"tests/unit/test_resource.py::PropertySetTests::test_replace_one_add_one",
"tests/unit/test_resource.py::PropertyDelTests::test_clear",
"tests/unit/test_resource.py::PropertyDelTests::test_del_all_input",
"tests/unit/test_resource.py::PropertyDelTests::test_del_invalid",
"tests/unit/test_resource.py::PropertyDelTests::test_del_one",
"tests/unit/test_resource.py::ManagerDivideFilterTests::test_empty",
"tests/unit/test_resource.py::ManagerDivideFilterTests::test_none",
"tests/unit/test_resource.py::ManagerDivideFilterTests::test_one_integer_cf",
"tests/unit/test_resource.py::ManagerDivideFilterTests::test_one_integer_qp",
"tests/unit/test_resource.py::ManagerDivideFilterTests::test_one_str_dash_name_qp",
"tests/unit/test_resource.py::ManagerDivideFilterTests::test_one_str_reserved_name_qp",
"tests/unit/test_resource.py::ManagerDivideFilterTests::test_one_str_reserved_val_cf",
"tests/unit/test_resource.py::ManagerDivideFilterTests::test_one_str_reserved_val_qp",
"tests/unit/test_resource.py::ManagerDivideFilterTests::test_one_string_cf",
"tests/unit/test_resource.py::ManagerDivideFilterTests::test_one_string_qp",
"tests/unit/test_resource.py::ManagerDivideFilterTests::test_two_cf_qp",
"tests/unit/test_resource.py::ManagerDivideFilterTests::test_two_qp",
"tests/unit/test_resource.py::ManagerDivideFilterTests::test_two_qp_cf",
"tests/unit/test_resource.py::ManagerDivideFilterTests::test_two_str_reserved_val_qp",
"tests/unit/test_resource.py::ManagerDivideFilterTests::test_two_two_qp"
]
| []
| [
"tests/unit/test_adapter.py::AdapterTests::test_manager_create_hipersocket",
"tests/unit/test_adapter.py::AdapterTests::test_manager_initial_attrs",
"tests/unit/test_adapter.py::AdapterTests::test_manager_list_default",
"tests/unit/test_adapter.py::AdapterTests::test_manager_list_filter_name2_found1",
"tests/unit/test_adapter.py::AdapterTests::test_manager_list_filter_name2_found2",
"tests/unit/test_adapter.py::AdapterTests::test_manager_list_filter_name2_found3",
"tests/unit/test_adapter.py::AdapterTests::test_manager_list_filter_name_found1",
"tests/unit/test_adapter.py::AdapterTests::test_manager_list_filter_name_found2",
"tests/unit/test_adapter.py::AdapterTests::test_manager_list_filter_name_id_found",
"tests/unit/test_adapter.py::AdapterTests::test_manager_list_filter_name_id_notfound1",
"tests/unit/test_adapter.py::AdapterTests::test_manager_list_filter_name_id_notfound2",
"tests/unit/test_adapter.py::AdapterTests::test_manager_list_filter_name_id_notfound3",
"tests/unit/test_adapter.py::AdapterTests::test_manager_list_filter_name_notfound",
"tests/unit/test_adapter.py::AdapterTests::test_manager_list_filter_name_reg1_found",
"tests/unit/test_adapter.py::AdapterTests::test_manager_list_filter_name_reg2_found",
"tests/unit/test_adapter.py::AdapterTests::test_manager_list_filter_name_reg3_found",
"tests/unit/test_adapter.py::AdapterTests::test_manager_list_filter_name_reg4_found",
"tests/unit/test_adapter.py::AdapterTests::test_manager_list_filter_name_reg5_found",
"tests/unit/test_adapter.py::AdapterTests::test_manager_list_filter_oid_found",
"tests/unit/test_adapter.py::AdapterTests::test_manager_list_filter_oid_notfound",
"tests/unit/test_adapter.py::AdapterTests::test_manager_list_full",
"tests/unit/test_adapter.py::AdapterTests::test_manager_list_short",
"tests/unit/test_adapter.py::AdapterTests::test_resource_delete",
"tests/unit/test_adapter.py::AdapterTests::test_resource_repr",
"tests/unit/test_adapter.py::AdapterTests::test_resource_update_name",
"tests/unit/test_adapter.py::AdapterTests::test_resource_update_not_fetched",
"tests/unit/test_adapter.py::AdapterTests::test_resource_update_nothing",
"tests/unit/test_port.py::PortTests::test_init",
"tests/unit/test_port.py::PortTests::test_list_filter_elementid_ok",
"tests/unit/test_port.py::PortTests::test_list_filter_name_ok",
"tests/unit/test_port.py::PortTests::test_list_full_ok",
"tests/unit/test_port.py::PortTests::test_list_short_ok",
"tests/unit/test_port.py::PortTests::test_update_properties"
]
| []
| Apache License 2.0 | 1,412 | [
"zhmcclient/_nic.py",
"zhmcclient/_manager.py",
"zhmcclient/_virtual_switch.py",
"zhmcclient/_virtual_function.py",
"zhmcclient/_cpc.py",
"zhmcclient/_partition.py",
"zhmcclient/_adapter.py",
"zhmcclient/_activation_profile.py",
"zhmcclient/_lpar.py",
"zhmcclient/_hba.py",
"zhmcclient/_port.py"
]
| [
"zhmcclient/_nic.py",
"zhmcclient/_manager.py",
"zhmcclient/_virtual_switch.py",
"zhmcclient/_virtual_function.py",
"zhmcclient/_cpc.py",
"zhmcclient/_partition.py",
"zhmcclient/_adapter.py",
"zhmcclient/_activation_profile.py",
"zhmcclient/_lpar.py",
"zhmcclient/_hba.py",
"zhmcclient/_port.py"
]
|
celery__billiard-230 | bfe2dc6387853595ef8877809c1404fe4959519d | 2017-06-28 15:37:39 | 471d38e72df9cf380be90f89d0dde2735073b851 | Birne94: Python 2.7 tests (cpython und pypy) appear to fail for some unrelated issue, values might be broken on non Python 3.
`OSError: [Errno 17] File exists: '/tmp/pymp-JfdEz9/pym-2339-cp7Vsb'`
listingmirror: @Birne94 Any idea how this would interact with the "CELERY_WORKER_MAX_MEMORY_PER_CHILD" property? Like are internal properties set with a BufferedWrapper?
I just started using CELERY_WORKER_MAX_MEMORY_PER_CHILD, and I was confused what was going on. I run with 10 subprocesses. When any one of the 10 hit the limit, it seems like all 10 would get killed instead of just the one that used too much memory. Randomly trying out your branch to see what it does...
thedrow: The tests fail for Python 2.7 which is a blocker for merging this PR.
The test `test_issue_229` was added in this PR so it means that something is wrong with the current implementation. We could branch on Python 2/3 if needed but this needs to be fixed.
@Birne94 Can you please take a look? | diff --git a/CHANGES.txt b/CHANGES.txt
index 9e6c09e..e2366da 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -1,3 +1,16 @@
+3.5.0.3 - 2017-07-16
+--------------------
+
+- Adds Process._authkey alias to .authkey for 2.7 compat.
+- Remove superfluous else clause from max_memory_per_child_check.
+- Document and test all supported Python versions.
+- Extend 'Process' to be compatible with < Py3.5.
+- Use a properly initialized logger in pool.py error logging.
+- _trywaitkill can now kill a whole process group if the worker process declares itself as a group leader.
+- Fix cpython issue 14881 (See http://bugs.python.org/issue14881).
+- Fix for a crash on windows.
+- Fix messaging in case of worker exceeds max memory.
+
3.5.0.2 - 2016-10-03
--------------------
diff --git a/billiard/__init__.py b/billiard/__init__.py
index ada838a..9c7c92a 100644
--- a/billiard/__init__.py
+++ b/billiard/__init__.py
@@ -22,7 +22,7 @@ from __future__ import absolute_import
import sys
from . import context
-VERSION = (3, 5, 0, 2)
+VERSION = (3, 5, 0, 3)
__version__ = '.'.join(map(str, VERSION[0:4])) + "".join(VERSION[4:])
__author__ = 'R Oudkerk / Python Software Foundation'
__author_email__ = '[email protected]'
diff --git a/billiard/heap.py b/billiard/heap.py
index 278bfe5..b7581ce 100644
--- a/billiard/heap.py
+++ b/billiard/heap.py
@@ -73,11 +73,12 @@ else:
self.size = size
self.fd = fd
if fd == -1:
- self.fd, name = tempfile.mkstemp(
- prefix='pym-%d-' % (os.getpid(), ),
- dir=util.get_temp_dir(),
- )
if PY3:
+ self.fd, name = tempfile.mkstemp(
+ prefix='pym-%d-' % (os.getpid(),),
+ dir=util.get_temp_dir(),
+ )
+
os.unlink(name)
util.Finalize(self, os.close, (self.fd,))
with io.open(self.fd, 'wb', closefd=False) as f:
@@ -90,6 +91,10 @@ else:
f.write(b'\0' * (size % bs))
assert f.tell() == size
else:
+ name = tempfile.mktemp(
+ prefix='pym-%d-' % (os.getpid(),),
+ dir=util.get_temp_dir(),
+ )
self.fd = os.open(
name, os.O_RDWR | os.O_CREAT | os.O_EXCL, 0o600,
)
diff --git a/billiard/pool.py b/billiard/pool.py
index 2734f2a..1bb9d10 100644
--- a/billiard/pool.py
+++ b/billiard/pool.py
@@ -47,7 +47,7 @@ from .five import Empty, Queue, range, values, reraise, monotonic
from .util import Finalize, debug
MAXMEM_USED_FMT = """\
-child process exiting after exceeding memory limit ({0}KiB / {0}KiB)
+child process exiting after exceeding memory limit ({0}KiB / {1}KiB)
"""
PY3 = sys.version_info[0] == 3
diff --git a/billiard/sharedctypes.py b/billiard/sharedctypes.py
index 6334661..97675df 100644
--- a/billiard/sharedctypes.py
+++ b/billiard/sharedctypes.py
@@ -155,7 +155,7 @@ def rebuild_ctype(type_, wrapper, length):
obj = type_.from_buffer(buf)
else:
obj = type_.from_address(wrapper.get_address())
- obj._wrapper = wrapper
+ obj._wrapper = wrapper
return obj
#
| multiple billiard.Value instances sharing the same state
**Problem description**
When using multiple instances of `billiard.Value` and changing one variable's value, the other one is changed as well. In the example below when I update the variable `value`, `done` is set to the same value as well. When replacing `billiard` with `multiprocessing` the example works as expected.
**Example**
See [below](#issuecomment-311695221) for an easier example.
```python
import ctypes
import time
from billiard import Pool, Value
def foo():
for i in range(10):
value.value = i
time.sleep(1)
done.value = True
def bar():
while True:
print(value.value, '\t', done.value)
time.sleep(0.5)
if __name__ == '__main__':
value = Value(ctypes.c_int, 0)
done = Value(ctypes.c_bool, False)
with Pool() as pool:
pool.apply_async(bar)
pool.apply(foo)
```
**Expected output**
```
0 False
0 False
1 False
1 False
1 False
2 False
3 False
3 False
4 False
4 False
5 False
5 False
6 False
6 False
7 False
7 False
8 False
8 False
9 False
9 False
9 True
```
**Actual output**
```
0 False
0 False
1 True
1 True
2 True
2 True
3 True
3 True
4 True
4 True
5 True
5 True
6 True
6 True
7 True
7 True
8 True
8 True
9 True
9 True
```
**Versions**
```shell
% pip freeze | grep billiard
billiard==3.5.0.2
% python --version
Python 3.6.0
% uname -a
Darwin HOST 16.4.0 Darwin Kernel Version 16.4.0: Thu Dec 22 22:53:21 PST 2016; root:xnu-3789.41.3~3/RELEASE_X86_64 x86_64
% sw_vers -productVersion
10.12.3
``` | celery/billiard | diff --git a/t/unit/test_values.py b/t/unit/test_values.py
new file mode 100644
index 0000000..4b0bfc2
--- /dev/null
+++ b/t/unit/test_values.py
@@ -0,0 +1,77 @@
+from __future__ import absolute_import
+import pytest
+
+from billiard import Value, RawValue, Lock, Process
+
+
+class test_values:
+
+ codes_values = [
+ ('i', 4343, 24234),
+ ('d', 3.625, -4.25),
+ ('h', -232, 234),
+ ('c', 'x'.encode('latin'), 'y'.encode('latin'))
+ ]
+
+ def test_issue_229(self):
+ """Test fix for issue #229"""
+
+ a = Value('i', 0)
+ b = Value('i', 0)
+
+ a.value = 5
+ assert a.value == 5
+ assert b.value == 0
+
+ @classmethod
+ def _test(cls, values):
+ for sv, cv in zip(values, cls.codes_values):
+ sv.value = cv[2]
+
+ def test_value(self, raw=False):
+ if raw:
+ values = [RawValue(code, value)
+ for code, value, _ in self.codes_values]
+ else:
+ values = [Value(code, value)
+ for code, value, _ in self.codes_values]
+
+ for sv, cv in zip(values, self.codes_values):
+ assert sv.value == cv[1]
+
+ proc = Process(target=self._test, args=(values,))
+ proc.daemon = True
+ proc.start()
+ proc.join()
+
+ for sv, cv in zip(values, self.codes_values):
+ assert sv.value == cv[2]
+
+ def test_rawvalue(self):
+ self.test_value(raw=True)
+
+ def test_getobj_getlock(self):
+ val1 = Value('i', 5)
+ lock1 = val1.get_lock()
+ obj1 = val1.get_obj()
+
+ val2 = Value('i', 5, lock=None)
+ lock2 = val2.get_lock()
+ obj2 = val2.get_obj()
+
+ lock = Lock()
+ val3 = Value('i', 5, lock=lock)
+ lock3 = val3.get_lock()
+ obj3 = val3.get_obj()
+ assert lock == lock3
+
+ arr4 = Value('i', 5, lock=False)
+ assert not hasattr(arr4, 'get_lock')
+ assert not hasattr(arr4, 'get_obj')
+
+ with pytest.raises(AttributeError):
+ Value('i', 5, lock='navalue')
+
+ arr5 = RawValue('i', 5)
+ assert not hasattr(arr5, 'get_lock')
+ assert not hasattr(arr5, 'get_obj')
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 5
} | 3.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"case"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements/test.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
-e git+https://github.com/celery/billiard.git@bfe2dc6387853595ef8877809c1404fe4959519d#egg=billiard
case==1.5.3
certifi==2021.5.30
importlib-metadata==4.8.3
iniconfig==1.1.1
linecache2==1.0.0
mock==5.2.0
nose==1.3.7
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
six==1.17.0
tomli==1.2.3
traceback2==1.4.0
typing_extensions==4.1.1
unittest2==1.1.0
zipp==3.6.0
| name: billiard
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- argparse==1.4.0
- attrs==22.2.0
- case==1.5.3
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- linecache2==1.0.0
- mock==5.2.0
- nose==1.3.7
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- six==1.17.0
- tomli==1.2.3
- traceback2==1.4.0
- typing-extensions==4.1.1
- unittest2==1.1.0
- zipp==3.6.0
prefix: /opt/conda/envs/billiard
| [
"t/unit/test_values.py::test_values::test_issue_229",
"t/unit/test_values.py::test_values::test_value",
"t/unit/test_values.py::test_values::test_rawvalue"
]
| []
| [
"t/unit/test_values.py::test_values::test_getobj_getlock"
]
| []
| BSD License | 1,413 | [
"CHANGES.txt",
"billiard/sharedctypes.py",
"billiard/pool.py",
"billiard/__init__.py",
"billiard/heap.py"
]
| [
"CHANGES.txt",
"billiard/sharedctypes.py",
"billiard/pool.py",
"billiard/__init__.py",
"billiard/heap.py"
]
|
cloudant__python-cloudant-306 | 56795a119f7f8b884b001f9b2f740d67d550ea41 | 2017-06-28 16:49:39 | eda73e429f404db7c22c1ccd4c265b5c70063dae | diff --git a/docs/getting_started.rst b/docs/getting_started.rst
index 17fe3e6..f4ce130 100644
--- a/docs/getting_started.rst
+++ b/docs/getting_started.rst
@@ -88,6 +88,30 @@ following statements hold true:
connect=True,
auto_renew=True)
+
+************************************
+Identity and Access Management (IAM)
+************************************
+
+IBM Cloud Identity & Access Management enables you to securely authenticate
+users and control access to all cloud resources consistently in the IBM Bluemix
+Cloud Platform.
+
+See `IBM Cloud Identity and Access Management <https://console.bluemix.net/docs/services/Cloudant/guides/iam.html#ibm-cloud-identity-and-access-management>`_
+for more information.
+
+The production IAM token service at *https://iam.bluemix.net/oidc/token* is used
+by default. You can set an ``IAM_TOKEN_URL`` environment variable to override
+this.
+
+You can easily connect to your Cloudant account using an IAM API key:
+
+.. code-block:: python
+
+ # Authenticate using an IAM API key
+ client = Cloudant.iam(ACCOUNT_NAME, API_KEY, connect=True)
+
+
****************
Resource sharing
****************
diff --git a/src/cloudant/_2to3.py b/src/cloudant/_2to3.py
index 2e52af9..5c7d412 100644
--- a/src/cloudant/_2to3.py
+++ b/src/cloudant/_2to3.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2016 IBM. All rights reserved.
+# Copyright (c) 2016, 2017 IBM. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -39,7 +39,9 @@ if PY2:
# pylint: disable=wrong-import-position,no-name-in-module,import-error,unused-import
from urllib import quote as url_quote, quote_plus as url_quote_plus
from urlparse import urlparse as url_parse
+ from urlparse import urljoin as url_join
from ConfigParser import RawConfigParser
+ from cookielib import Cookie
def iteritems_(adict):
"""
@@ -60,9 +62,11 @@ if PY2:
return itr.next()
else:
from urllib.parse import urlparse as url_parse # pylint: disable=wrong-import-position,no-name-in-module,import-error,ungrouped-imports
+ from urllib.parse import urljoin as url_join # pylint: disable=wrong-import-position,no-name-in-module,import-error,ungrouped-imports
from urllib.parse import quote as url_quote # pylint: disable=wrong-import-position,no-name-in-module,import-error,ungrouped-imports
from urllib.parse import quote_plus as url_quote_plus # pylint: disable=wrong-import-position,no-name-in-module,import-error,ungrouped-imports
from configparser import RawConfigParser # pylint: disable=wrong-import-position,no-name-in-module,import-error
+ from http.cookiejar import Cookie # pylint: disable=wrong-import-position,no-name-in-module,import-error
def iteritems_(adict):
"""
diff --git a/src/cloudant/__init__.py b/src/cloudant/__init__.py
index 8cb9f75..04131db 100644
--- a/src/cloudant/__init__.py
+++ b/src/cloudant/__init__.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-# Copyright (c) 2015 IBM. All rights reserved.
+# Copyright (c) 2015, 2017 IBM. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -62,6 +62,35 @@ def cloudant(user, passwd, **kwargs):
yield cloudant_session
cloudant_session.disconnect()
[email protected]
+def cloudant_iam(account_name, api_key, **kwargs):
+ """
+ Provides a context manager to create a Cloudant session using IAM
+ authentication and provide access to databases, docs etc.
+
+ :param account_name: Cloudant account name.
+ :param api_key: IAM authentication API key.
+
+ For example:
+
+ .. code-block:: python
+
+ # cloudant context manager
+ from cloudant import cloudant_iam
+
+ with cloudant_iam(ACCOUNT_NAME, API_KEY) as client:
+ # Context handles connect() and disconnect() for you.
+ # Perform library operations within this context. Such as:
+ print client.all_dbs()
+ # ...
+
+ """
+ cloudant_session = Cloudant.iam(account_name, api_key, **kwargs)
+
+ cloudant_session.connect()
+ yield cloudant_session
+ cloudant_session.disconnect()
+
@contextlib.contextmanager
def cloudant_bluemix(vcap_services, instance_name=None, **kwargs):
"""
diff --git a/src/cloudant/_common_util.py b/src/cloudant/_common_util.py
index 6dcf331..fe2e068 100644
--- a/src/cloudant/_common_util.py
+++ b/src/cloudant/_common_util.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-# Copyright (c) 2015, 2016, 2017 IBM Corp. All rights reserved.
+# Copyright (c) 2015, 2017 IBM Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -17,13 +17,14 @@ Module containing miscellaneous classes, functions, and constants used
throughout the library.
"""
+import os
import sys
import platform
from collections import Sequence
import json
-from requests import Session
+from requests import RequestException, Session
-from ._2to3 import LONGTYPE, STRTYPE, NONETYPE, UNITYPE, iteritems_, url_parse
+from ._2to3 import LONGTYPE, STRTYPE, NONETYPE, UNITYPE, iteritems_, url_join
from .error import CloudantArgumentError, CloudantException
# Library Constants
@@ -276,6 +277,7 @@ def append_response_error_content(response, **kwargs):
# Classes
+
class _Code(str):
"""
Wraps a ``str`` object as a _Code object providing the means to handle
@@ -287,66 +289,212 @@ class _Code(str):
return str.__new__(cls, code.encode('utf8'))
return str.__new__(cls, code)
-class InfiniteSession(Session):
+
+class ClientSession(Session):
+ """
+ This class extends Session and provides a default timeout.
+ """
+
+ def __init__(self, **kwargs):
+ super(ClientSession, self).__init__()
+ self._timeout = kwargs.get('timeout', None)
+
+ def request(self, method, url, **kwargs): # pylint: disable=W0221
+ """
+ Overrides ``requests.Session.request`` to set the timeout.
+ """
+ resp = super(ClientSession, self).request(
+ method, url, timeout=self._timeout, **kwargs)
+
+ return resp
+
+
+class CookieSession(ClientSession):
"""
- This class provides for the ability to automatically renew session login
- information in the event of expired session authentication.
+ This class extends ClientSession and provides cookie authentication.
"""
def __init__(self, username, password, server_url, **kwargs):
- super(InfiniteSession, self).__init__()
+ super(CookieSession, self).__init__(**kwargs)
self._username = username
self._password = password
- self._server_url = server_url
- self._timeout = kwargs.get('timeout', None)
+ self._auto_renew = kwargs.get('auto_renew', False)
+ self._session_url = url_join(server_url, '_session')
+
+ def info(self):
+ """
+ Get cookie based login user information.
+ """
+ resp = self.get(self._session_url)
+ resp.raise_for_status()
+
+ return resp.json()
+
+ def login(self):
+ """
+ Perform cookie based user login.
+ """
+ resp = super(CookieSession, self).request(
+ 'POST',
+ self._session_url,
+ data={'name': self._username, 'password': self._password},
+ )
+ resp.raise_for_status()
+
+ def logout(self):
+ """
+ Logout cookie based user.
+ """
+ resp = super(CookieSession, self).request('DELETE', self._session_url)
+ resp.raise_for_status()
def request(self, method, url, **kwargs): # pylint: disable=W0221
"""
- Overrides ``requests.Session.request`` to perform a POST to the
- _session endpoint to renew Session cookie authentication settings and
- then retry the original request, if necessary.
+ Overrides ``requests.Session.request`` to renew the cookie and then
+ retry the original request (if required).
"""
- resp = super(InfiniteSession, self).request(
- method, url, timeout=self._timeout, **kwargs)
- path = url_parse(url).path.lower()
- post_to_session = method.upper() == 'POST' and path == '/_session'
+ resp = super(CookieSession, self).request(method, url, **kwargs)
+
+ if not self._auto_renew:
+ return resp
+
is_expired = any((
resp.status_code == 403 and
resp.json().get('error') == 'credentials_expired',
resp.status_code == 401
))
- if not post_to_session and is_expired:
- super(InfiniteSession, self).request(
- 'POST',
- '/'.join([self._server_url, '_session']),
- data={'name': self._username, 'password': self._password},
- headers={'Content-Type': 'application/x-www-form-urlencoded'}
- )
- resp = super(InfiniteSession, self).request(
- method, url, timeout=self._timeout, **kwargs)
+
+ if is_expired:
+ self.login()
+ resp = super(CookieSession, self).request(method, url, **kwargs)
return resp
-class ClientSession(Session):
+ def set_credentials(self, username, password):
+ """
+ Set a new username and password.
+
+ :param str username: New username.
+ :param str password: New password.
+ """
+ if username is not None:
+ self._username = username
+
+ if password is not None:
+ self._password = password
+
+
+class IAMSession(ClientSession):
"""
- This class extends Session and provides a default timeout.
+ This class extends ClientSession and provides IAM authentication.
"""
- def __init__(self, username, password, server_url, **kwargs):
- super(ClientSession, self).__init__()
- self._username = username
- self._password = password
- self._server_url = server_url
- self._timeout = kwargs.get('timeout', None)
+ def __init__(self, api_key, server_url, **kwargs):
+ super(IAMSession, self).__init__(**kwargs)
+ self._api_key = api_key
+ self._auto_renew = kwargs.get('auto_renew', False)
+ self._session_url = url_join(server_url, '_iam_session')
+ self._token_url = os.environ.get(
+ 'IAM_TOKEN_URL', 'https://iam.bluemix.net/oidc/token')
+
+ def info(self):
+ """
+ Get IAM cookie based login user information.
+ """
+ resp = self.get(self._session_url)
+ resp.raise_for_status()
+
+ return resp.json()
+
+ def login(self):
+ """
+ Perform IAM cookie based user login.
+ """
+ access_token = self._get_access_token()
+ try:
+ super(IAMSession, self).request(
+ 'POST',
+ self._session_url,
+ headers={'Content-Type': 'application/json'},
+ data=json.dumps({'access_token': access_token})
+ ).raise_for_status()
+
+ except RequestException:
+ raise CloudantException(
+ 'Failed to exchange IAM token with Cloudant')
+
+ def logout(self):
+ """
+ Logout IAM cookie based user.
+ """
+ self.cookies.clear()
def request(self, method, url, **kwargs): # pylint: disable=W0221
"""
- Overrides ``requests.Session.request`` to set the timeout.
+ Overrides ``requests.Session.request`` to renew the IAM cookie
+ and then retry the original request (if required).
"""
- resp = super(ClientSession, self).request(
- method, url, timeout=self._timeout, **kwargs)
+ # The CookieJar API prevents callers from getting an individual Cookie
+ # object by name.
+ # We are forced to use the only exposed method of discarding expired
+ # cookies from the CookieJar. Internally this involves iterating over
+ # the entire CookieJar and calling `.is_expired()` on each Cookie
+ # object.
+ self.cookies.clear_expired_cookies()
+
+ if self._auto_renew and 'IAMSession' not in self.cookies.keys():
+ self.login()
+
+ resp = super(IAMSession, self).request(method, url, **kwargs)
+
+ if not self._auto_renew:
+ return resp
+
+ if resp.status_code == 401:
+ self.login()
+ resp = super(IAMSession, self).request(method, url, **kwargs)
+
return resp
+ def set_credentials(self, username, api_key):
+ """
+ Set a new IAM API key.
+
+ :param str username: Username parameter is unused.
+ :param str api_key: New IAM API key.
+ """
+ if api_key is not None:
+ self._api_key = api_key
+
+ def _get_access_token(self):
+ """
+ Get IAM access token using API key.
+ """
+ err = 'Failed to contact IAM token service'
+ try:
+ resp = super(IAMSession, self).request(
+ 'POST',
+ self._token_url,
+ auth=('bx', 'bx'), # required for user API keys
+ headers={'Accepts': 'application/json'},
+ data={
+ 'grant_type': 'urn:ibm:params:oauth:grant-type:apikey',
+ 'response_type': 'cloud_iam',
+ 'apikey': self._api_key
+ }
+ )
+ err = resp.json().get('errorMessage', err)
+ resp.raise_for_status()
+
+ return resp.json()['access_token']
+
+ except KeyError:
+ raise CloudantException('Invalid response from IAM token service')
+
+ except RequestException:
+ raise CloudantException(err)
+
+
class CloudFoundryService(object):
""" Manages Cloud Foundry service configuration. """
diff --git a/src/cloudant/client.py b/src/cloudant/client.py
index 7c69c46..3a1360c 100755
--- a/src/cloudant/client.py
+++ b/src/cloudant/client.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-# Copyright (C) 2015, 2016, 2017 IBM Corp. All rights reserved.
+# Copyright (C) 2015, 2017 IBM Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -29,10 +29,10 @@ from .error import (
from ._common_util import (
USER_AGENT,
append_response_error_content,
- InfiniteSession,
ClientSession,
- CloudFoundryService)
-
+ CloudFoundryService,
+ CookieSession,
+ IAMSession)
class CouchDB(dict):
"""
@@ -67,6 +67,10 @@ class CouchDB(dict):
`Requests library timeout argument
<http://docs.python-requests.org/en/master/user/quickstart/#timeouts>`_.
but will apply to every request made using this client.
+ :param bool use_iam: Keyword argument, if set to True performs
+ IAM authentication with server. Default is False.
+ Use :func:`~cloudant.client.CouchDB.iam` to construct an IAM
+ authenticated client.
"""
_DATABASE_CLASS = CouchDatabase
@@ -74,7 +78,6 @@ class CouchDB(dict):
super(CouchDB, self).__init__()
self._user = user
self._auth_token = auth_token
- self._client_session = None
self.server_url = kwargs.get('url')
self._client_user_header = None
self.admin_party = admin_party
@@ -83,6 +86,7 @@ class CouchDB(dict):
self._timeout = kwargs.get('timeout', None)
self.r_session = None
self._auto_renew = kwargs.get('auto_renew', False)
+ self._use_iam = kwargs.get('use_iam', False)
connect_to_couch = kwargs.get('connect', False)
if connect_to_couch and self._DATABASE_CLASS == CouchDatabase:
self.connect()
@@ -93,29 +97,34 @@ class CouchDB(dict):
authentication if necessary.
"""
if self.r_session:
- return
+ self.session_logout()
- if self._auto_renew and not self.admin_party:
- self.r_session = InfiniteSession(
- self._user,
+ if self.admin_party:
+ self.r_session = ClientSession(timeout=self._timeout)
+ elif self._use_iam:
+ self.r_session = IAMSession(
self._auth_token,
self.server_url,
+ auto_renew=self._auto_renew,
timeout=self._timeout
)
else:
- self.r_session = ClientSession(
+ self.r_session = CookieSession(
self._user,
self._auth_token,
self.server_url,
+ auto_renew=self._auto_renew,
timeout=self._timeout
)
+
# If a Transport Adapter was supplied add it to the session
if self.adapter is not None:
self.r_session.mount(self.server_url, self.adapter)
if self._client_user_header is not None:
self.r_session.headers.update(self._client_user_header)
- self.session_login(self._user, self._auth_token)
- self._client_session = self.session()
+
+ self.session_login()
+
# Utilize an event hook to append to the response message
# using :func:`~cloudant.common_util.append_response_error_content`
self.r_session.hooks['response'].append(append_response_error_content)
@@ -124,7 +133,9 @@ class CouchDB(dict):
"""
Ends a client authentication session, performs a logout and a clean up.
"""
- self.session_logout()
+ if self.r_session:
+ self.session_logout()
+
self.r_session = None
self.clear()
@@ -137,11 +148,8 @@ class CouchDB(dict):
"""
if self.admin_party:
return None
- sess_url = '/'.join((self.server_url, '_session'))
- resp = self.r_session.get(sess_url)
- resp.raise_for_status()
- sess_data = resp.json()
- return sess_data
+
+ return self.r_session.info()
def session_cookie(self):
"""
@@ -153,26 +161,16 @@ class CouchDB(dict):
return None
return self.r_session.cookies.get('AuthSession')
- def session_login(self, user, passwd):
+ def session_login(self, user=None, passwd=None):
"""
Performs a session login by posting the auth information
to the _session endpoint.
-
- :param str user: Username used to connect.
- :param str passwd: Passcode used to connect.
"""
if self.admin_party:
return
- sess_url = '/'.join((self.server_url, '_session'))
- resp = self.r_session.post(
- sess_url,
- data={
- 'name': user,
- 'password': passwd
- },
- headers={'Content-Type': 'application/x-www-form-urlencoded'}
- )
- resp.raise_for_status()
+
+ self.r_session.set_credentials(user, passwd)
+ self.r_session.login()
def session_logout(self):
"""
@@ -181,9 +179,8 @@ class CouchDB(dict):
"""
if self.admin_party:
return
- sess_url = '/'.join((self.server_url, '_session'))
- resp = self.r_session.delete(sess_url)
- resp.raise_for_status()
+
+ self.r_session.logout()
def basic_auth_str(self):
"""
@@ -783,3 +780,18 @@ class Cloudant(CouchDB):
service.password,
url=service.url,
**kwargs)
+
+ @classmethod
+ def iam(cls, account_name, api_key, **kwargs):
+ """
+ Create a Cloudant client that uses IAM authentication.
+
+ :param account_name: Cloudant account name.
+ :param api_key: IAM authentication API key.
+ """
+ return cls(None,
+ api_key,
+ account=account_name,
+ auto_renew=kwargs.get('auto_renew', True),
+ use_iam=True,
+ **kwargs)
| Support IAM integration
Cloudant client libraries need to be able to exchange an IAM API key for an IAM access token. The user of the client library provides the API key. The access token is obtained by calling an IAM endpoint. The access token is time limited, like a cookie, so refresh logic also needs writing. | cloudant/python-cloudant | diff --git a/tests/unit/auth_renewal_tests.py b/tests/unit/auth_renewal_tests.py
index 799783d..3d9b7cc 100644
--- a/tests/unit/auth_renewal_tests.py
+++ b/tests/unit/auth_renewal_tests.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-# Copyright (c) 2016 IBM. All rights reserved.
+# Copyright (c) 2016, 2017 IBM. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -23,14 +23,14 @@ import os
import requests
import time
-from cloudant._common_util import InfiniteSession
+from cloudant._common_util import CookieSession
from .unit_t_db_base import UnitTestDbBase
@unittest.skipIf(os.environ.get('ADMIN_PARTY') == 'true', 'Skipping - Admin Party mode')
class AuthRenewalTests(UnitTestDbBase):
"""
- Auto renewal tests primarily testing the InfiniteSession functionality
+ Auto renewal tests primarily testing the CookieSession functionality
"""
def setUp(self):
@@ -62,10 +62,10 @@ class AuthRenewalTests(UnitTestDbBase):
db_2_auth_session = db_2.r_session.cookies.get('AuthSession')
doc_auth_session = doc.r_session.cookies.get('AuthSession')
- self.assertIsInstance(self.client.r_session, InfiniteSession)
- self.assertIsInstance(db.r_session, InfiniteSession)
- self.assertIsInstance(db_2.r_session, InfiniteSession)
- self.assertIsInstance(doc.r_session, InfiniteSession)
+ self.assertIsInstance(self.client.r_session, CookieSession)
+ self.assertIsInstance(db.r_session, CookieSession)
+ self.assertIsInstance(db_2.r_session, CookieSession)
+ self.assertIsInstance(doc.r_session, CookieSession)
self.assertIsNotNone(auth_session)
self.assertTrue(
auth_session ==
diff --git a/tests/unit/client_tests.py b/tests/unit/client_tests.py
index b2dbcbf..796e5fc 100644
--- a/tests/unit/client_tests.py
+++ b/tests/unit/client_tests.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-# Copyright (c) 2015, 2016, 2017 IBM Corp. All rights reserved.
+# Copyright (c) 2015, 2017 IBM Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -28,13 +28,14 @@ import sys
import os
import datetime
-from requests import ConnectTimeout
+from requests import ConnectTimeout, HTTPError
+from time import sleep
from cloudant import cloudant, cloudant_bluemix, couchdb, couchdb_admin_party
from cloudant.client import Cloudant, CouchDB
from cloudant.error import CloudantArgumentError, CloudantClientException
from cloudant.feed import Feed, InfiniteFeed
-from cloudant._common_util import InfiniteSession
+from cloudant._common_util import CookieSession
from .unit_t_db_base import UnitTestDbBase
from .. import bytes_, str_
@@ -163,7 +164,7 @@ class ClientTests(UnitTestDbBase):
def test_auto_renew_enabled(self):
"""
- Test that InfiniteSession is used when auto_renew is enabled.
+ Test that CookieSession is used when auto_renew is enabled.
"""
try:
self.set_up_client(auto_renew=True)
@@ -171,13 +172,13 @@ class ClientTests(UnitTestDbBase):
if os.environ.get('ADMIN_PARTY') == 'true':
self.assertIsInstance(self.client.r_session, requests.Session)
else:
- self.assertIsInstance(self.client.r_session, InfiniteSession)
+ self.assertIsInstance(self.client.r_session, CookieSession)
finally:
self.client.disconnect()
def test_auto_renew_enabled_with_auto_connect(self):
"""
- Test that InfiniteSession is used when auto_renew is enabled along with
+ Test that CookieSession is used when auto_renew is enabled along with
an auto_connect.
"""
try:
@@ -185,7 +186,7 @@ class ClientTests(UnitTestDbBase):
if os.environ.get('ADMIN_PARTY') == 'true':
self.assertIsInstance(self.client.r_session, requests.Session)
else:
- self.assertIsInstance(self.client.r_session, InfiniteSession)
+ self.assertIsInstance(self.client.r_session, CookieSession)
finally:
self.client.disconnect()
@@ -492,6 +493,30 @@ class CloudantClientTests(UnitTestDbBase):
Cloudant specific client unit tests
"""
+ def test_cloudant_session_login(self):
+ """
+ Test that the Cloudant client session successfully authenticates.
+ """
+ self.client.connect()
+ old_cookie = self.client.session_cookie()
+
+ sleep(5) # ensure we get a different cookie back
+
+ self.client.session_login()
+ self.assertNotEqual(self.client.session_cookie(), old_cookie)
+
+ def test_cloudant_session_login_with_new_credentials(self):
+ """
+ Test that the Cloudant client session fails to authenticate when
+ passed incorrect credentials.
+ """
+ self.client.connect()
+
+ with self.assertRaises(HTTPError) as cm:
+ self.client.session_login('invalid-user-123', 'pa$$w0rd01')
+
+ self.assertTrue(str(cm.exception).find('Name or password is incorrect'))
+
def test_cloudant_context_helper(self):
"""
Test that the cloudant context helper works as expected.
diff --git a/tests/unit/iam_auth_tests.py b/tests/unit/iam_auth_tests.py
new file mode 100644
index 0000000..a65d3f0
--- /dev/null
+++ b/tests/unit/iam_auth_tests.py
@@ -0,0 +1,352 @@
+#!/usr/bin/env python
+# Copyright (c) 2017 IBM. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Unit tests for IAM authentication. """
+import time
+import unittest
+import json
+import mock
+
+from cloudant._2to3 import Cookie
+from cloudant._common_util import IAMSession
+from cloudant.client import Cloudant
+
+MOCK_API_KEY = 'CqbrIYzdO3btWV-5t4teJLY_etfT_dkccq-vO-5vCXSo'
+
+MOCK_ACCESS_TOKEN = ('eyJraWQiOiIyMDE3MDQwMi0wMDowMDowMCIsImFsZyI6IlJTMjU2In0.e'
+ 'yJpYW1faWQiOiJJQk1pZC0yNzAwMDdHRjBEIiwiaWQiOiJJQk1pZC0yNz'
+ 'AwMDdHRjBEIiwicmVhbG1pZCI6IklCTWlkIiwiaWRlbnRpZmllciI6IjI'
+ '3MDAwN0dGMEQiLCJnaXZlbl9uYW1lIjoiVG9tIiwiZmFtaWx5X25hbWUi'
+ 'OiJCbGVuY2giLCJuYW1lIjoiVG9tIEJsZW5jaCIsImVtYWlsIjoidGJsZ'
+ 'W5jaEB1ay5pYm0uY29tIiwic3ViIjoidGJsZW5jaEB1ay5pYm0uY29tIi'
+ 'wiYWNjb3VudCI6eyJic3MiOiI1ZTM1ZTZhMjlmYjJlZWNhNDAwYWU0YzN'
+ 'lMWZhY2Y2MSJ9LCJpYXQiOjE1MDA0NjcxMDIsImV4cCI6MTUwMDQ3MDcw'
+ 'MiwiaXNzIjoiaHR0cHM6Ly9pYW0ubmcuYmx1ZW1peC5uZXQvb2lkYy90b'
+ '2tlbiIsImdyYW50X3R5cGUiOiJ1cm46aWJtOnBhcmFtczpvYXV0aDpncm'
+ 'FudC10eXBlOmFwaWtleSIsInNjb3BlIjoib3BlbmlkIiwiY2xpZW50X2l'
+ 'kIjoiZGVmYXVsdCJ9.XAPdb5K4n2nYih-JWTWBGoKkxTXM31c1BB1g-Ci'
+ 'auc2LxuoNXVTyz_mNqf1zQL07FUde1Cb_dwrbotjickNcxVPost6byQzt'
+ 'fc0mRF1x2S6VR8tn7SGiRmXBjLofkTh1JQq-jutp2MS315XbTG6K6m16u'
+ 'YzL9qfMnRvQHxsZWErzfPiJx-Trg_j7OX-qNFjdNUGnRpU7FmULy0r7Rx'
+ 'Ld8mhG-M1yxVzRBAZzvM63s0XXfMnk1oLi-BuUUTqVOdrM0KyYMWfD0Q7'
+ '2PTo4Exa17V-R_73Nq8VPCwpOvZcwKRA2sPTVgTMzU34max8b5kpTzVGJ'
+ '6SXSItTVOUdAygZBng')
+
+MOCK_OIDC_TOKEN_RESPONSE = {
+ 'access_token': MOCK_ACCESS_TOKEN,
+ 'refresh_token': ('MO61FKNvVRWkSa4vmBZqYv_Jt1kkGMUc-XzTcNnR-GnIhVKXHUWxJVV3'
+ 'RddE8Kqh3X_TZRmyK8UySIWKxoJ2t6obUSUalPm90SBpTdoXtaljpNyo'
+ 'rmqCCYPROnk6JBym72ikSJqKHHEZVQkT0B5ggZCwPMnKagFj0ufs-VIh'
+ 'CF97xhDxDKcIPMWG02xxPuESaSTJJug7e_dUDoak_ZXm9xxBmOTRKwOx'
+ 'n5sTKthNyvVpEYPE7jIHeiRdVDOWhN5LomgCn3TqFCLpMErnqwgNYbyC'
+ 'Bd9rNm-alYKDb6Jle4njuIBpXxQPb4euDwLd1osApaSME3nEarFWqRBz'
+ 'hjoqCe1Kv564s_rY7qzD1nHGvKOdpSa0ZkMcfJ0LbXSQPs7gBTSVrBFZ'
+ 'qwlg-2F-U3Cto62-9qRR_cEu_K9ZyVwL4jWgOlngKmxV6Ku4L5mHp4Kg'
+ 'EJSnY_78_V2nm64E--i2ZA1FhiKwIVHDOivVNhggE9oabxg54vd63glp'
+ '4GfpNnmZsMOUYG9blJJpH4fDX4Ifjbw-iNBD7S2LRpP8b8vG9pb4WioG'
+ 'zN43lE5CysveKYWrQEZpThznxXlw1snDu_A48JiL3Lrvo1LobLhF3zFV'
+ '-kQ='),
+ 'token_type': 'Bearer',
+ 'expires_in': 3600, # 60mins
+ 'expiration': 1500470702 # Wed Jul 19 14:25:02 2017
+}
+
+
+class IAMAuthTests(unittest.TestCase):
+ """ Unit tests for IAM authentication. """
+
+ @staticmethod
+ def _mock_cookie(expires_secs=300):
+ return Cookie(
+ version=0,
+ name='IAMSession',
+ value=('SQJCaUQxMqEfMEAyRKU6UopLVXceS0c9RPuQgDArCEYoN3l_TEY4gdf-DJ7'
+ '4sHfjcNEUVjfdOvA'),
+ port=None,
+ port_specified=False,
+ domain='localhost',
+ domain_specified=False,
+ domain_initial_dot=False,
+ path="/",
+ path_specified=True,
+ secure=True,
+ expires=int(time.time() + expires_secs),
+ discard=False,
+ comment=None,
+ comment_url=None,
+ rest={'HttpOnly': None},
+ rfc2109=True)
+
+ def test_iam_set_credentials(self):
+ iam = IAMSession(MOCK_API_KEY, 'http://127.0.0.1:5984')
+ self.assertEquals(iam._api_key, MOCK_API_KEY)
+
+ new_api_key = 'some_new_api_key'
+ iam.set_credentials(None, new_api_key)
+
+ self.assertEquals(iam._api_key, new_api_key)
+
+ @mock.patch('cloudant._common_util.ClientSession.request')
+ def test_iam_get_access_token(self, m_req):
+ m_response = mock.MagicMock()
+ m_response.json.return_value = MOCK_OIDC_TOKEN_RESPONSE
+ m_req.return_value = m_response
+
+ iam = IAMSession(MOCK_API_KEY, 'http://127.0.0.1:5984')
+ access_token = iam._get_access_token()
+
+ m_req.assert_called_once_with(
+ 'POST',
+ iam._token_url,
+ auth=('bx', 'bx'),
+ headers={'Accepts': 'application/json'},
+ data={
+ 'grant_type': 'urn:ibm:params:oauth:grant-type:apikey',
+ 'response_type': 'cloud_iam',
+ 'apikey': MOCK_API_KEY
+ }
+ )
+
+ self.assertEqual(access_token, MOCK_ACCESS_TOKEN)
+ self.assertTrue(m_response.raise_for_status.called)
+ self.assertTrue(m_response.json.called)
+
+ @mock.patch('cloudant._common_util.ClientSession.request')
+ @mock.patch('cloudant._common_util.IAMSession._get_access_token')
+ def test_iam_login(self, m_token, m_req):
+ m_token.return_value = MOCK_ACCESS_TOKEN
+ m_response = mock.MagicMock()
+ m_req.return_value = m_response
+
+ iam = IAMSession(MOCK_API_KEY, 'http://127.0.0.1:5984')
+ iam.login()
+
+ m_req.assert_called_once_with(
+ 'POST',
+ iam._session_url,
+ headers={'Content-Type': 'application/json'},
+ data=json.dumps({'access_token': MOCK_ACCESS_TOKEN})
+ )
+
+ self.assertEqual(m_token.call_count, 1)
+ self.assertTrue(m_response.raise_for_status.called)
+
+ def test_iam_logout(self):
+ iam = IAMSession(MOCK_API_KEY, 'http://127.0.0.1:5984')
+ # add a valid cookie to jar
+ iam.cookies.set_cookie(self._mock_cookie())
+ self.assertEqual(len(iam.cookies.keys()), 1)
+ iam.logout()
+ self.assertEqual(len(iam.cookies.keys()), 0)
+
+ @mock.patch('cloudant._common_util.ClientSession.get')
+ def test_iam_get_session_info(self, m_get):
+ m_info = {'ok': True, 'info': {'authentication_db': '_users'}}
+
+ m_response = mock.MagicMock()
+ m_response.json.return_value = m_info
+ m_get.return_value = m_response
+
+ iam = IAMSession(MOCK_API_KEY, 'http://127.0.0.1:5984')
+ info = iam.info()
+
+ m_get.assert_called_once_with(iam._session_url)
+
+ self.assertEqual(info, m_info)
+ self.assertTrue(m_response.raise_for_status.called)
+
+ @mock.patch('cloudant._common_util.IAMSession.login')
+ @mock.patch('cloudant._common_util.ClientSession.request')
+ def test_iam_first_request(self, m_req, m_login):
+ # mock 200
+ m_response_ok = mock.MagicMock()
+ type(m_response_ok).status_code = mock.PropertyMock(return_value=200)
+ m_response_ok.json.return_value = {'ok': True}
+
+ m_req.return_value = m_response_ok
+
+ iam = IAMSession(MOCK_API_KEY, 'http://127.0.0.1:5984', auto_renew=True)
+ iam.login()
+
+ self.assertEqual(m_login.call_count, 1)
+ self.assertEqual(m_req.call_count, 0)
+
+ # add a valid cookie to jar
+ iam.cookies.set_cookie(self._mock_cookie())
+
+ resp = iam.request('GET', 'http://127.0.0.1:5984/mydb1')
+
+ self.assertEqual(m_login.call_count, 1)
+ self.assertEqual(m_req.call_count, 1)
+ self.assertEqual(resp.status_code, 200)
+
+ @mock.patch('cloudant._common_util.IAMSession.login')
+ @mock.patch('cloudant._common_util.ClientSession.request')
+ def test_iam_renew_cookie_on_expiry(self, m_req, m_login):
+ # mock 200
+ m_response_ok = mock.MagicMock()
+ type(m_response_ok).status_code = mock.PropertyMock(return_value=200)
+ m_response_ok.json.return_value = {'ok': True}
+
+ m_req.return_value = m_response_ok
+
+ iam = IAMSession(MOCK_API_KEY, 'http://127.0.0.1:5984', auto_renew=True)
+ iam.login()
+
+ # add an expired cookie to jar
+ iam.cookies.set_cookie(self._mock_cookie(expires_secs=-300))
+
+ resp = iam.request('GET', 'http://127.0.0.1:5984/mydb1')
+
+ self.assertEqual(m_login.call_count, 2)
+ self.assertEqual(m_req.call_count, 1)
+ self.assertEqual(resp.status_code, 200)
+
+ @mock.patch('cloudant._common_util.IAMSession.login')
+ @mock.patch('cloudant._common_util.ClientSession.request')
+ def test_iam_renew_cookie_on_401_success(self, m_req, m_login):
+ # mock 200
+ m_response_ok = mock.MagicMock()
+ type(m_response_ok).status_code = mock.PropertyMock(return_value=200)
+ m_response_ok.json.return_value = {'ok': True}
+ # mock 401
+ m_response_bad = mock.MagicMock()
+ type(m_response_bad).status_code = mock.PropertyMock(return_value=401)
+
+ m_req.side_effect = [m_response_bad, m_response_ok, m_response_ok]
+
+ iam = IAMSession(MOCK_API_KEY, 'http://127.0.0.1:5984', auto_renew=True)
+ iam.login()
+ self.assertEqual(m_login.call_count, 1)
+
+ # add a valid cookie to jar
+ iam.cookies.set_cookie(self._mock_cookie())
+
+ resp = iam.request('GET', 'http://127.0.0.1:5984/mydb1')
+ self.assertEqual(resp.status_code, 200)
+ self.assertEqual(m_login.call_count, 2)
+ self.assertEqual(m_req.call_count, 2)
+
+ resp = iam.request('GET', 'http://127.0.0.1:5984/mydb1')
+ self.assertEqual(resp.status_code, 200)
+ self.assertEqual(m_login.call_count, 2)
+ self.assertEqual(m_req.call_count, 3)
+
+ @mock.patch('cloudant._common_util.IAMSession.login')
+ @mock.patch('cloudant._common_util.ClientSession.request')
+ def test_iam_renew_cookie_on_401_failure(self, m_req, m_login):
+ # mock 401
+ m_response_bad = mock.MagicMock()
+ type(m_response_bad).status_code = mock.PropertyMock(return_value=401)
+
+ m_req.return_value = m_response_bad
+
+ iam = IAMSession(MOCK_API_KEY, 'http://127.0.0.1:5984', auto_renew=True)
+ iam.login()
+ self.assertEqual(m_login.call_count, 1)
+
+ # add a valid cookie to jar
+ iam.cookies.set_cookie(self._mock_cookie())
+
+ resp = iam.request('GET', 'http://127.0.0.1:5984/mydb1')
+ self.assertEqual(resp.status_code, 401)
+ self.assertEqual(m_login.call_count, 2)
+ self.assertEqual(m_req.call_count, 2)
+
+ resp = iam.request('GET', 'http://127.0.0.1:5984/mydb1')
+ self.assertEqual(resp.status_code, 401)
+ self.assertEqual(m_login.call_count, 3)
+ self.assertEqual(m_req.call_count, 4)
+
+ @mock.patch('cloudant._common_util.IAMSession.login')
+ @mock.patch('cloudant._common_util.ClientSession.request')
+ def test_iam_renew_cookie_disabled(self, m_req, m_login):
+ # mock 401
+ m_response_bad = mock.MagicMock()
+ type(m_response_bad).status_code = mock.PropertyMock(return_value=401)
+
+ m_req.return_value = m_response_bad
+
+ iam = IAMSession(MOCK_API_KEY, 'http://127.0.0.1:5984', auto_renew=False)
+ iam.login()
+ self.assertEqual(m_login.call_count, 1)
+
+ resp = iam.request('GET', 'http://127.0.0.1:5984/mydb1')
+ self.assertEqual(resp.status_code, 401)
+ self.assertEqual(m_login.call_count, 1) # no attempt to renew
+ self.assertEqual(m_req.call_count, 1)
+
+ resp = iam.request('GET', 'http://127.0.0.1:5984/mydb1')
+ self.assertEqual(resp.status_code, 401)
+ self.assertEqual(m_login.call_count, 1) # no attempt to renew
+ self.assertEqual(m_req.call_count, 2)
+
+ @mock.patch('cloudant._common_util.IAMSession.login')
+ @mock.patch('cloudant._common_util.ClientSession.request')
+ def test_iam_client_create(self, m_req, m_login):
+ # mock 200
+ m_response_ok = mock.MagicMock()
+ type(m_response_ok).status_code = mock.PropertyMock(return_value=200)
+ m_response_ok.json.return_value = ['animaldb']
+
+ m_req.return_value = m_response_ok
+
+ # create IAM client
+ client = Cloudant.iam('foo', MOCK_API_KEY)
+ client.connect()
+
+ # add a valid cookie to jar
+ client.r_session.cookies.set_cookie(self._mock_cookie())
+
+ dbs = client.all_dbs()
+
+ self.assertEqual(m_login.call_count, 1)
+ self.assertEqual(m_req.call_count, 1)
+ self.assertEqual(dbs, ['animaldb'])
+
+ @mock.patch('cloudant._common_util.IAMSession.login')
+ @mock.patch('cloudant._common_util.IAMSession.set_credentials')
+ def test_iam_client_session_login(self, m_set, m_login):
+ # create IAM client
+ client = Cloudant.iam('foo', MOCK_API_KEY)
+ client.connect()
+
+ # add a valid cookie to jar
+ client.r_session.cookies.set_cookie(self._mock_cookie())
+
+ client.session_login()
+
+ m_set.assert_called_with(None, None)
+ self.assertEqual(m_login.call_count, 2)
+ self.assertEqual(m_set.call_count, 2)
+
+ @mock.patch('cloudant._common_util.IAMSession.login')
+ @mock.patch('cloudant._common_util.IAMSession.set_credentials')
+ def test_iam_client_session_login_with_new_credentials(self, m_set, m_login):
+ # create IAM client
+ client = Cloudant.iam('foo', MOCK_API_KEY)
+ client.connect()
+
+ # add a valid cookie to jar
+ client.r_session.cookies.set_cookie(self._mock_cookie())
+
+ client.session_login('bar', 'baz') # new creds
+
+ m_set.assert_called_with('bar', 'baz')
+ self.assertEqual(m_login.call_count, 2)
+ self.assertEqual(m_set.call_count, 2)
+
+
+if __name__ == '__main__':
+ unittest.main()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 3,
"issue_text_score": 2,
"test_score": 1
},
"num_modified_files": 5
} | 2.6 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"mock==1.3.0",
"nose",
"sphinx",
"pylint",
"flaky",
"pytest"
],
"pre_install": null,
"python": "3.5",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
astroid==2.11.7
attrs==22.2.0
Babel==2.11.0
certifi==2021.5.30
charset-normalizer==2.0.12
-e git+https://github.com/cloudant/python-cloudant.git@56795a119f7f8b884b001f9b2f740d67d550ea41#egg=cloudant
dill==0.3.4
docutils==0.18.1
flaky==3.8.1
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
iniconfig==1.1.1
isort==5.10.1
Jinja2==3.0.3
lazy-object-proxy==1.7.1
MarkupSafe==2.0.1
mccabe==0.7.0
mock==1.3.0
nose==1.3.7
packaging==21.3
pbr==6.1.1
platformdirs==2.4.0
pluggy==1.0.0
py==1.11.0
Pygments==2.14.0
pylint==2.13.9
pyparsing==3.1.4
pytest==7.0.1
pytz==2025.2
requests==2.27.1
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
tomli==1.2.3
typed-ast==1.5.5
typing_extensions==4.1.1
urllib3==1.26.20
wrapt==1.16.0
zipp==3.6.0
| name: python-cloudant
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- astroid==2.11.7
- attrs==22.2.0
- babel==2.11.0
- charset-normalizer==2.0.12
- dill==0.3.4
- docutils==0.18.1
- flaky==3.8.1
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- isort==5.10.1
- jinja2==3.0.3
- lazy-object-proxy==1.7.1
- markupsafe==2.0.1
- mccabe==0.7.0
- mock==1.3.0
- nose==1.3.7
- packaging==21.3
- pbr==6.1.1
- platformdirs==2.4.0
- pluggy==1.0.0
- py==1.11.0
- pygments==2.14.0
- pylint==2.13.9
- pyparsing==3.1.4
- pytest==7.0.1
- pytz==2025.2
- requests==2.27.1
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- tomli==1.2.3
- typed-ast==1.5.5
- typing-extensions==4.1.1
- urllib3==1.26.20
- wrapt==1.16.0
- zipp==3.6.0
prefix: /opt/conda/envs/python-cloudant
| [
"tests/unit/client_tests.py::CloudantClientExceptionTests::test_raise_using_invalid_code",
"tests/unit/client_tests.py::CloudantClientExceptionTests::test_raise_with_proper_code_and_args",
"tests/unit/client_tests.py::CloudantClientExceptionTests::test_raise_without_args",
"tests/unit/client_tests.py::CloudantClientExceptionTests::test_raise_without_code",
"tests/unit/client_tests.py::ClientTests::test_constructor_with_url",
"tests/unit/iam_auth_tests.py::IAMAuthTests::test_iam_client_create",
"tests/unit/iam_auth_tests.py::IAMAuthTests::test_iam_client_session_login",
"tests/unit/iam_auth_tests.py::IAMAuthTests::test_iam_client_session_login_with_new_credentials",
"tests/unit/iam_auth_tests.py::IAMAuthTests::test_iam_first_request",
"tests/unit/iam_auth_tests.py::IAMAuthTests::test_iam_get_access_token",
"tests/unit/iam_auth_tests.py::IAMAuthTests::test_iam_get_session_info",
"tests/unit/iam_auth_tests.py::IAMAuthTests::test_iam_login",
"tests/unit/iam_auth_tests.py::IAMAuthTests::test_iam_logout",
"tests/unit/iam_auth_tests.py::IAMAuthTests::test_iam_renew_cookie_disabled",
"tests/unit/iam_auth_tests.py::IAMAuthTests::test_iam_renew_cookie_on_401_failure",
"tests/unit/iam_auth_tests.py::IAMAuthTests::test_iam_renew_cookie_on_401_success",
"tests/unit/iam_auth_tests.py::IAMAuthTests::test_iam_renew_cookie_on_expiry",
"tests/unit/iam_auth_tests.py::IAMAuthTests::test_iam_set_credentials"
]
| [
"tests/unit/client_tests.py::ClientTests::test_set_non_db_value_via_setitem",
"tests/unit/client_tests.py::ClientTests::test_all_dbs",
"tests/unit/client_tests.py::ClientTests::test_auto_connect",
"tests/unit/client_tests.py::ClientTests::test_auto_renew_enabled",
"tests/unit/client_tests.py::ClientTests::test_auto_renew_enabled_with_auto_connect",
"tests/unit/client_tests.py::ClientTests::test_basic_auth_str",
"tests/unit/client_tests.py::ClientTests::test_connect",
"tests/unit/client_tests.py::ClientTests::test_couchdb_context_helper",
"tests/unit/client_tests.py::ClientTests::test_create_db_via_setitem",
"tests/unit/client_tests.py::ClientTests::test_create_delete_database",
"tests/unit/client_tests.py::ClientTests::test_create_existing_database",
"tests/unit/client_tests.py::ClientTests::test_db_updates_feed_call",
"tests/unit/client_tests.py::ClientTests::test_delete_cached_db_object_via_delitem",
"tests/unit/client_tests.py::ClientTests::test_delete_non_existing_database",
"tests/unit/client_tests.py::ClientTests::test_delete_remote_db_via_delitem",
"tests/unit/client_tests.py::ClientTests::test_get_cached_db_object_via_get",
"tests/unit/client_tests.py::ClientTests::test_get_db_via_getitem",
"tests/unit/client_tests.py::ClientTests::test_get_non_existing_db_via_getitem",
"tests/unit/client_tests.py::ClientTests::test_get_remote_db_via_get",
"tests/unit/client_tests.py::ClientTests::test_keys",
"tests/unit/client_tests.py::ClientTests::test_local_set_db_value_via_setitem",
"tests/unit/client_tests.py::ClientTests::test_multiple_connect",
"tests/unit/client_tests.py::ClientTests::test_session",
"tests/unit/client_tests.py::ClientTests::test_session_cookie"
]
| []
| []
| Apache License 2.0 | 1,414 | [
"src/cloudant/_2to3.py",
"src/cloudant/_common_util.py",
"docs/getting_started.rst",
"src/cloudant/client.py",
"src/cloudant/__init__.py"
]
| [
"src/cloudant/_2to3.py",
"src/cloudant/_common_util.py",
"docs/getting_started.rst",
"src/cloudant/client.py",
"src/cloudant/__init__.py"
]
|
|
typesafehub__conductr-cli-514 | 5d05e5dec17319326326f78453465829b55fb446 | 2017-06-28 21:40:49 | 39719b38ec6fc0f598756700a8a815b56bd8bc59 | diff --git a/1 b/1
new file mode 100644
index 0000000..e69de29
diff --git a/conductr_cli/resolvers/bintray_resolver.py b/conductr_cli/resolvers/bintray_resolver.py
index 4501b87..b01da9e 100644
--- a/conductr_cli/resolvers/bintray_resolver.py
+++ b/conductr_cli/resolvers/bintray_resolver.py
@@ -15,7 +15,7 @@ BINTRAY_API_BASE_URL = 'https://api.bintray.com'
BINTRAY_DOWNLOAD_BASE_URL = 'https://dl.bintray.com'
BINTRAY_DOWNLOAD_REALM = 'Bintray'
BINTRAY_CREDENTIAL_FILE_PATH = '{}/.lightbend/commercial.credentials'.format(os.path.expanduser('~'))
-BINTRAY_PROPERTIES_RE = re.compile('^\s*(\S+)\s*=\s*([\S]+)\s*$')
+BINTRAY_PROPERTIES_RE = re.compile('^\s*(\S+)\s*=\s*((\S|\S+\s+\S+)+)\s*$')
BINTRAY_LIGHTBEND_ORG = 'lightbend'
BINTRAY_CONDUCTR_COMMERCIAL_REPO = 'commercial-releases'
BINTRAY_CONDUCTR_GENERIC_REPO = 'generic'
@@ -168,12 +168,16 @@ def load_bintray_credentials(raise_error=True, disable_instructions=False):
with open(BINTRAY_CREDENTIAL_FILE_PATH, 'r') as cred_file:
lines = [line.replace('\n', '') for line in cred_file.readlines()]
data = dict()
+ realm = BINTRAY_DOWNLOAD_REALM
for line in lines:
match = BINTRAY_PROPERTIES_RE.match(line)
if match is not None:
try:
key, value = match.group(1, 2)
- data[key] = value
+ if key == 'realm':
+ realm = value
+ elif realm == BINTRAY_DOWNLOAD_REALM:
+ data[key] = value
except IndexError:
pass
| Credentials file can only contain one section
My credentials file are defined as follows:
$ cat .lightbend/commercial.credentials
realm = Bintray
host = dl.bintray.com
user = [my username]
password = [my password]
realm = Bintray API Realm
host = api.bintray.com
user = [csp user]
password = [csp password]
I took the template from https://github.com/yuchaoran2011/csp-provisioning/blob/master/csp-environment/ansible/roles/install_conductr_cli/templates/commercial.credentials.j2
But when I ran:
`sandbox run 2.1.1 --feature visualization`,
I got the following error:
|------------------------------------------------|
| Starting ConductR |
|------------------------------------------------|
Bintray credentials loaded from /home/ubuntu/.lightbend/commercial.credentials
Error: Unable to fetch ConductR core artifact 2.1.1 from Bintray.
Error: Please specify a valid ConductR version.
Error: The latest version can be found on: https://www.lightbend.com/product/conductr/developer
Removing the last 4 lines of the credentials file solved the problem for me. | typesafehub/conductr-cli | diff --git a/conductr_cli/resolvers/test/test_bintray_resolver.py b/conductr_cli/resolvers/test/test_bintray_resolver.py
index 4790058..9cc48c3 100644
--- a/conductr_cli/resolvers/test/test_bintray_resolver.py
+++ b/conductr_cli/resolvers/test/test_bintray_resolver.py
@@ -1018,6 +1018,29 @@ class TestLoadBintrayCredentials(TestCase):
exists_mock.assert_called_with('{}/.lightbend/commercial.credentials'.format(os.path.expanduser('~')))
open_mock.assert_called_with('{}/.lightbend/commercial.credentials'.format(os.path.expanduser('~')), 'r')
+ def test_success_multiple_realms(self):
+ bintray_credential_file = (
+ 'realm = Bintray\n'
+ 'user = user1\n'
+ 'password = sec=ret\n'
+ 'realm = Bintray API Realm\n'
+ 'user = user2\n'
+ 'password = sec=ret2\n'
+ )
+
+ exists_mock = MagicMock(return_value=True)
+ open_mock = MagicMock(return_value=io.StringIO(bintray_credential_file))
+
+ with patch('os.path.exists', exists_mock), \
+ patch('builtins.open', open_mock):
+ realm, username, password = bintray_resolver.load_bintray_credentials()
+ self.assertEqual('Bintray', realm)
+ self.assertEqual('user1', username)
+ self.assertEqual('sec=ret', password)
+
+ exists_mock.assert_called_with('{}/.lightbend/commercial.credentials'.format(os.path.expanduser('~')))
+ open_mock.assert_called_with('{}/.lightbend/commercial.credentials'.format(os.path.expanduser('~')), 'r')
+
def test_credential_file_not_having_username_password(self):
bintray_credential_file = strip_margin(
"""|dummy = yes
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 1
} | 1.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"tox",
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | argcomplete==3.6.1
arrow==1.3.0
boto3==1.37.23
botocore==1.37.23
cachetools==5.5.2
certifi==2025.1.31
chardet==5.2.0
charset-normalizer==3.4.1
colorama==0.4.6
-e git+https://github.com/typesafehub/conductr-cli.git@5d05e5dec17319326326f78453465829b55fb446#egg=conductr_cli
distlib==0.3.9
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
filelock==3.18.0
idna==3.10
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
jmespath==1.0.1
jsonschema==2.6.0
packaging @ file:///croot/packaging_1734472117206/work
pager==3.3
platformdirs==4.3.7
pluggy @ file:///croot/pluggy_1733169602837/work
prettytable==0.7.2
psutil==5.9.8
Pygments==2.19.1
pyhocon==0.3.35
PyJWT==1.4.2
pyparsing==3.2.3
pyproject-api==1.9.0
pyreadline==2.1
pytest @ file:///croot/pytest_1738938843180/work
python-dateutil==2.9.0.post0
requests==2.32.3
requests-toolbelt==1.0.0
s3transfer==0.11.4
six==1.17.0
sseclient==0.0.14
toml==0.10.2
tomli==2.2.1
tox==4.25.0
types-python-dateutil==2.9.0.20241206
typing_extensions==4.13.0
urllib3==1.26.20
virtualenv==20.29.3
www-authenticate==0.9.2
| name: conductr-cli
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- argcomplete==3.6.1
- arrow==1.3.0
- boto3==1.37.23
- botocore==1.37.23
- cachetools==5.5.2
- certifi==2025.1.31
- chardet==5.2.0
- charset-normalizer==3.4.1
- colorama==0.4.6
- distlib==0.3.9
- filelock==3.18.0
- idna==3.10
- jmespath==1.0.1
- jsonschema==2.6.0
- pager==3.3
- platformdirs==4.3.7
- prettytable==0.7.2
- psutil==5.9.8
- pygments==2.19.1
- pyhocon==0.3.35
- pyjwt==1.4.2
- pyparsing==3.2.3
- pyproject-api==1.9.0
- pyreadline==2.1
- python-dateutil==2.9.0.post0
- requests==2.32.3
- requests-toolbelt==1.0.0
- s3transfer==0.11.4
- six==1.17.0
- sseclient==0.0.14
- toml==0.10.2
- tomli==2.2.1
- tox==4.25.0
- types-python-dateutil==2.9.0.20241206
- typing-extensions==4.13.0
- urllib3==1.26.20
- virtualenv==20.29.3
- www-authenticate==0.9.2
prefix: /opt/conda/envs/conductr-cli
| [
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestLoadBintrayCredentials::test_success_multiple_realms"
]
| []
| [
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestResolveBundle::test_bintray_version_found",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestResolveBundle::test_bintray_version_not_found",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestResolveBundle::test_connection_error",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestResolveBundle::test_failure_http_error",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestResolveBundle::test_failure_http_error_repo_not_found",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestResolveBundle::test_failure_malformed_bundle_uri",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestResolveBundleConfiguration::test_bintray_version_found",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestResolveBundleConfiguration::test_bintray_version_not_found",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestResolveBundleConfiguration::test_failure_connection_error",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestResolveBundleConfiguration::test_failure_http_error",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestResolveBundleConfiguration::test_failure_http_error_repo_not_found",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestResolveBundleConfiguration::test_failure_malformed_bundle_uri",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestLoadBundleFromCache::test_bintray_version_found",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestLoadBundleFromCache::test_bintray_version_not_found",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestLoadBundleFromCache::test_bundle",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestLoadBundleFromCache::test_failure_connection_error",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestLoadBundleFromCache::test_failure_http_error",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestLoadBundleFromCache::test_failure_http_error_repo_not_found",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestLoadBundleFromCache::test_failure_malformed_bundle_uri",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestLoadBundleFromCache::test_file",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestLoadBundleFromCache::test_file_exists_but_no_conf_do_bintray",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestLoadBundleConfigurationFromCache::test_bintray_version_found",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestLoadBundleConfigurationFromCache::test_bintray_version_not_found",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestLoadBundleConfigurationFromCache::test_failure_connection_error",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestLoadBundleConfigurationFromCache::test_failure_http_error",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestLoadBundleConfigurationFromCache::test_failure_http_error_repo_not_found",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestLoadBundleConfigurationFromCache::test_failure_malformed_bundle_uri",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestLoadBundleConfigurationFromCache::test_file",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestBintrayResolveVersion::test_failure_multiple_versions_found",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestBintrayResolveVersion::test_failure_version_not_found",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestBintrayResolveVersion::test_success",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestBintrayResolveVersionLatest::test_failure_latest_version_malformed",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestBintrayResolveVersionLatest::test_latest_version_from_attribute_names",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestBintrayResolveVersionLatest::test_latest_version_from_attribute_names_not_found",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestBintrayResolveVersionLatest::test_success",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestBintrayResolveVersionLatestCompatibilityVersion::test_no_version",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestBintrayResolveVersionLatestCompatibilityVersion::test_success",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestResolveBundleVersion::test_connection_error",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestResolveBundleVersion::test_http_error",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestResolveBundleVersion::test_malformed_bundle_uri_error",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestResolveBundleVersion::test_resolved_version_found",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestResolveBundleVersion::test_resolved_version_not_found",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestContinuousDeliveryUri::test_return_none_if_input_is_from_different_resolver",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestContinuousDeliveryUri::test_return_none_if_input_is_invalid",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestContinuousDeliveryUri::test_return_none_if_input_is_none",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestContinuousDeliveryUri::test_return_uri",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestLoadBintrayCredentials::test_credential_file_not_having_username_password",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestLoadBintrayCredentials::test_missing_credential_file",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestLoadBintrayCredentials::test_success",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestLoadBintrayCredentials::test_success_whitespace",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestGetJson::test_get_json",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestGetJson::test_get_json_missing_password",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestGetJson::test_get_json_missing_username",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestGetJson::test_get_json_no_credentials",
"conductr_cli/resolvers/test/test_bintray_resolver.py::TestSupportedSchemes::test_supported_schemes"
]
| []
| Apache License 2.0 | 1,415 | [
"conductr_cli/resolvers/bintray_resolver.py",
"1"
]
| [
"conductr_cli/resolvers/bintray_resolver.py",
"1"
]
|
|
palantir__python-language-server-69 | df7d399499b0a31fd03da72c84428c7f8957935b | 2017-06-29 10:03:16 | ac405e6ff8d886bc79d7e47b1104b10f2383f4bc | diff --git a/pyls/python_ls.py b/pyls/python_ls.py
index 0cc0e40..08d4fed 100644
--- a/pyls/python_ls.py
+++ b/pyls/python_ls.py
@@ -36,7 +36,7 @@ class PythonLanguageServer(LanguageServer):
'signatureHelpProvider': {
'triggerCharacters': ['(', ',']
},
- 'textDocumentSync': lsp.TextDocumentSyncKind.FULL
+ 'textDocumentSync': lsp.TextDocumentSyncKind.INCREMENTAL
}
def initialize(self, root_path, init_opts, _process_id):
@@ -111,11 +111,12 @@ class PythonLanguageServer(LanguageServer):
self.lint(textDocument['uri'])
def m_text_document__did_change(self, contentChanges=None, textDocument=None, **_kwargs):
- # Since we're using a FULL document sync, there is only one change containing the whole file
- # TODO: debounce, or should this be someone else's responsibility? Probably
- self.workspace.put_document(
- textDocument['uri'], contentChanges[0]['text'], version=textDocument.get('version')
- )
+ for change in contentChanges:
+ self.workspace.update_document(
+ textDocument['uri'],
+ change,
+ version=textDocument.get('version')
+ )
self.lint(textDocument['uri'])
def m_text_document__did_save(self, textDocument=None, **_kwargs):
diff --git a/pyls/workspace.py b/pyls/workspace.py
index f7a5b06..b8381a9 100644
--- a/pyls/workspace.py
+++ b/pyls/workspace.py
@@ -1,4 +1,5 @@
# Copyright 2017 Palantir Technologies, Inc.
+import io
import logging
import os
import re
@@ -44,6 +45,10 @@ class Workspace(object):
def rm_document(self, doc_uri):
self._docs.pop(doc_uri)
+ def update_document(self, doc_uri, change, version=None):
+ self._docs[doc_uri].apply_change(change)
+ self._docs[doc_uri].version = version
+
def apply_edit(self, edit):
# Note that lang_server.call currently doesn't return anything
return self._lang_server.call(self.M_APPLY_EDIT, {'edit': edit})
@@ -98,8 +103,45 @@ class Document(object):
return f.read()
return self._source
+ def apply_change(self, change):
+ """Apply a change to the document."""
+ text = change['text']
+ change_range = change.get('range')
+
+ if not change_range:
+ # The whole file has changed
+ self._source = text
+ return
+
+ start_line = change_range['start']['line']
+ start_col = change_range['start']['character']
+ end_line = change_range['end']['line']
+ end_col = change_range['end']['character']
+
+ new = io.StringIO()
+ # Iterate over the existing document until we hit the edit range,
+ # at which point we write the new text, then loop until we hit
+ # the end of the range and continue writing.
+ for i, line in enumerate(self.lines):
+ if i < start_line:
+ new.write(line)
+ continue
+
+ if i > end_line:
+ new.write(line)
+ continue
+
+ if i == start_line:
+ new.write(line[:start_col])
+ new.write(text)
+
+ if i == end_line:
+ new.write(line[end_col:])
+
+ self._source = new.getvalue()
+
def word_at_position(self, position):
- """ Get the word under the cursor returning the start and end positions """
+ """Get the word under the cursor returning the start and end positions."""
line = self.lines[position['line']]
i = position['character']
# Split word in two
| Support incremental TextDocumentSyncKind
Right now we get full text files each time they change. This is obviously bad.
I wonder if we could then propagate these changes to only re-lint the changed parts of the file? | palantir/python-language-server | diff --git a/test/test_workspace.py b/test/test_workspace.py
index f30afa7..fa262ae 100644
--- a/test/test_workspace.py
+++ b/test/test_workspace.py
@@ -51,3 +51,32 @@ def test_non_root_project(pyls):
pyls.workspace.put_document(test_uri, 'assert True')
test_doc = pyls.workspace.get_document(test_uri)
assert project_root in pyls.workspace.syspath_for_path(test_doc.path)
+
+
+def test_document_line_edit():
+ doc = workspace.Document('file:///uri', u'itshelloworld')
+ doc.apply_change({
+ 'text': u'goodbye',
+ 'range': {
+ 'start': {'line': 0, 'character': 3},
+ 'end': {'line': 0, 'character': 8}
+ }
+ })
+ assert doc.source == u'itsgoodbyeworld'
+
+
+def test_document_multiline_edit():
+ old = [
+ "def hello(a, b):\n",
+ " print a\n",
+ " print b\n"
+ ]
+ doc = workspace.Document('file:///uri', u''.join(old))
+ doc.apply_change({'text': u'print a, b', 'range': {
+ 'start': {'line': 1, 'character': 4},
+ 'end': {'line': 2, 'character': 11}
+ }})
+ assert doc.lines == [
+ "def hello(a, b):\n",
+ " print a, b\n"
+ ]
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 2
} | 0.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"coverage",
"pytest-cov"
],
"pre_install": [],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | cachetools==5.5.2
chardet==5.2.0
colorama==0.4.6
configparser==7.2.0
coverage==7.8.0
distlib==0.3.9
exceptiongroup==1.2.2
filelock==3.18.0
future==1.0.0
iniconfig==2.1.0
jedi==0.19.2
json-rpc==1.15.0
packaging==24.2
parso==0.8.4
platformdirs==4.3.7
pluggy==1.5.0
pycodestyle==2.13.0
pyflakes==3.3.1
pyproject-api==1.9.0
pytest==8.3.5
pytest-cov==6.0.0
-e git+https://github.com/palantir/python-language-server.git@df7d399499b0a31fd03da72c84428c7f8957935b#egg=python_language_server
tomli==2.2.1
tox==4.25.0
typing_extensions==4.13.0
versioneer==0.29
virtualenv==20.29.3
yapf==0.43.0
| name: python-language-server
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cachetools==5.5.2
- chardet==5.2.0
- colorama==0.4.6
- configparser==7.2.0
- coverage==7.8.0
- distlib==0.3.9
- exceptiongroup==1.2.2
- filelock==3.18.0
- future==1.0.0
- iniconfig==2.1.0
- jedi==0.19.2
- json-rpc==1.15.0
- packaging==24.2
- parso==0.8.4
- platformdirs==4.3.7
- pluggy==1.5.0
- pycodestyle==2.13.0
- pyflakes==3.3.1
- pyproject-api==1.9.0
- pytest==8.3.5
- pytest-cov==6.0.0
- tomli==2.2.1
- tox==4.25.0
- typing-extensions==4.13.0
- versioneer==0.29
- virtualenv==20.29.3
- yapf==0.43.0
prefix: /opt/conda/envs/python-language-server
| [
"test/test_workspace.py::test_document_line_edit",
"test/test_workspace.py::test_document_multiline_edit"
]
| []
| [
"test/test_workspace.py::test_local",
"test/test_workspace.py::test_put_document",
"test/test_workspace.py::test_get_document",
"test/test_workspace.py::test_rm_document",
"test/test_workspace.py::test_bad_get_document",
"test/test_workspace.py::test_uri_like",
"test/test_workspace.py::test_non_root_project"
]
| []
| MIT License | 1,416 | [
"pyls/python_ls.py",
"pyls/workspace.py"
]
| [
"pyls/python_ls.py",
"pyls/workspace.py"
]
|
|
smarter-travel-media__warthog-20 | 0748fe97aff223b7f5cac008c856e3273b6a0343 | 2017-06-29 18:24:35 | 0748fe97aff223b7f5cac008c856e3273b6a0343 | diff --git a/warthog/client.py b/warthog/client.py
index ff3d0fd..ec94955 100644
--- a/warthog/client.py
+++ b/warthog/client.py
@@ -120,7 +120,7 @@ class CommandFactory(object):
self._transport_factory(), scheme_host, session_id, server)
-def _get_default_cmd_factory(verify, ssl_version):
+def _get_default_cmd_factory(verify, ssl_version, retries):
"""Get a :class:`CommandFactory` instance configured to use the provided TLS
version and cert verification policy
@@ -129,39 +129,17 @@ def _get_default_cmd_factory(verify, ssl_version):
:param int ssl_version: :mod:`ssl` module constant for specifying which SSL or
TLS version to use for connecting to the load balancer over HTTPS, ``None``
to use the default.
+ :param int retries: The maximum number of times to retry operations on transient
+ network errors.
:return: Default command factory for building new commands to interact
with the A10 load balancer.
:rtype: WarthogCommandFactory
"""
return CommandFactory(warthog.transport.get_transport_factory(
- verify=verify, ssl_version=ssl_version
+ verify=verify, ssl_version=ssl_version, retries=retries
))
[email protected]
-def session_context(scheme_host, username, password, commands):
- """Context manager that makes a request to start an authenticated session, yields the
- session ID, and then closes the session afterwards.
-
- :param basestring scheme_host: Scheme, host, and port combination of the load balancer.
- :param basestring username: Name of the user to authenticate with.
- :param basestring password: Password for the user to authenticate with.
- :param CommandFactory commands: Factory instance for creating new commands
- for starting and ending sessions with the load balancer.
- :return: The session ID of the newly established session.
- """
- session = None
- try:
- start_cmd = commands.get_session_start(scheme_host, username, password)
- session = start_cmd.send()
-
- yield session
- finally:
- if session is not None:
- end_cmd = commands.get_session_end(scheme_host, session)
- end_cmd.send()
-
-
class WarthogClient(object):
"""Client for interacting with an A10 load balancer to get the status
of nodes managed by it, enable them, and disable them.
@@ -172,27 +150,25 @@ class WarthogClient(object):
Removed .disabled_context() method.
"""
_logger = warthog.core.get_log()
- _default_wait_interval = 2.0
# pylint: disable=too-many-arguments
def __init__(self, scheme_host, username, password,
verify=None,
ssl_version=None,
- wait_interval=_default_wait_interval,
+ network_retries=None,
commands=None):
"""Set the load balancer scheme/host/port combination, username and password
to use for connecting and authenticating with the load balancer.
- Optionally, whether or not to verify certificates when using HTTPS may be
- toggled. This can enable you to use a self signed certificate for the load
- balancer while still using HTTPS.
+ Whether or not to verify certificates when using HTTPS may be toggled via the
+ ``verify`` parameter. This can enable you to use a self signed certificate for
+ the load balancer while still using HTTPS.
- Optionally, the version of SSL or TLS to use may be specified as a :mod:`ssl`
- module protocol constant.
+ The version of SSL or TLS to use may be specified as a :mod:`ssl` module protocol
+ constant via the ``ssl_version`` parameter.
- Optionally, the amount of time to wait between retries of various operations
- and the factory used for creating commands may be set. If the interval between
- retries is not supplied, the default is two seconds.
+ The maximum number of times to retry network operations on transient errors
+ can be specified via the ``network_retries`` parameter.
If the command factory is not supplied, a default instance will be used. The
command factory is responsible for creating new :class:`requests.Session` instances
@@ -207,6 +183,16 @@ class WarthogClient(object):
Added the optional ``ssl_version`` parameter to make use of alternate SSL
or TLS versions easier.
+ .. versionchanged:: 2.0.0
+ Added the optional ``network_retries`` parameter to make use of retry logic on
+ transient network errors. A non-zero number of retries is used by default if
+ this is not specified. Previously, no retries were attempted on transient network
+ errors.
+
+ .. versionchanged:: 2.0.0
+ Removed the optional ``wait_interval`` parameter. This is now passed directly
+ as an argument to :meth:`enable_node` or :meth:`disable_node` methods.
+
:param basestring scheme_host: Scheme, host, and port combination of the load balancer.
:param basestring username: Name of the user to authenticate with.
:param basestring password: Password for the user to authenticate with.
@@ -216,23 +202,36 @@ class WarthogClient(object):
:param int|None ssl_version: :mod:`ssl` module constant for specifying which version of
SSL or TLS to use when connecting to the load balancer over HTTPS, ``None`` to use
the library default. The default is to use TLSv1.2.
- :param float wait_interval: How long (in seconds) to wait between each retry of
- various operations (waiting for nodes to transition, waiting for connections
- to close, etc.).
+ :param int|None network_retries: Maximum number of times to retry network operations on
+ transient network errors. Default is to retry network operations a non-zero number
+ of times.
:param CommandFactory commands: Factory instance for creating new commands for
starting and ending sessions with the load balancer.
"""
self._scheme_host = scheme_host
self._username = username
self._password = password
- self._interval = wait_interval
self._commands = commands if commands is not None else \
- _get_default_cmd_factory(verify, ssl_version)
+ _get_default_cmd_factory(verify, ssl_version, network_retries)
+ @contextlib.contextmanager
def _session_context(self):
- """Get a new context manager that starts and ends a session with the load balancer."""
+ """Context manager that makes a request to start an authenticated session, yields the
+ session ID, and then closes the session afterwards.
+
+ :return: The session ID of the newly established session.
+ """
self._logger.debug('Creating new session context for %s', self._scheme_host)
- return session_context(self._scheme_host, self._username, self._password, self._commands)
+ session = None
+ try:
+ start_cmd = self._commands.get_session_start(self._scheme_host, self._username, self._password)
+ session = start_cmd.send()
+
+ yield session
+ finally:
+ if session is not None:
+ end_cmd = self._commands.get_session_end(self._scheme_host, session)
+ end_cmd.send()
def get_status(self, server):
"""Get the current status of the given server, at the node level.
@@ -248,12 +247,11 @@ class WarthogClient(object):
operation.
:raises warthog.exceptions.WarthogNoSuchNodeError: If the load balancer does
not recognize the given hostname.
- :raises warthog.exceptions.WarthogNodeStatusError: If there are any other
+ :raises warthog.exceptions.WarthogApiError: If there are any other
problems getting the status of the given server.
"""
with self._session_context() as session:
- cmd = self._commands.get_server_status(
- self._scheme_host, session, server)
+ cmd = self._commands.get_server_status(self._scheme_host, session, server)
return cmd.send()
def get_connections(self, server):
@@ -271,28 +269,31 @@ class WarthogClient(object):
operation.
:raises warthog.exceptions.WarthogNoSuchNodeError: If the load balancer does
not recognize the given hostname.
- :raises warthog.exceptions.WarthogNodeStatusError: If there are any other
+ :raises warthog.exceptions.WarthogApiError: If there are any other
problems getting the active connections for the given server.
.. versionadded:: 0.4.0
"""
with self._session_context() as session:
- cmd = self._commands.get_active_connections(
- self._scheme_host, session, server)
+ cmd = self._commands.get_active_connections(self._scheme_host, session, server)
return cmd.send()
- def disable_server(self, server, max_retries=5):
+ def disable_server(self, server, max_retries=5, wait_interval=2.0):
"""Disable a server at the node level, optionally retrying when there are transient
errors and waiting for the number of active connections to the server to reach zero.
- If ``max_retries`` is zero, no attempt will be made to retry on transient errors
- or to wait until there are no active connections to the server, the method will
- try a single time to disable the server and then return immediately.
+ If ``max_retries`` is zero, no attempt will be made to wait until there are no active
+ connections to the server, the method will try a single time to disable the server and
+ then return immediately.
+
+ .. versionchanged:: 2.0.0
+ Added the optional ``wait_interval`` parameter.
:param basestring server: Hostname of the server to disable
- :param int max_retries: Max number of times to sleep and retry when encountering
- some sort of transient error when disabling the server and while waiting for
+ :param int max_retries: Max number of times to sleep and retry while waiting for
the number of active connections to a server to reach zero.
+ :param float wait_interval: How long (in seconds) to wait between each check to
+ see if the number of active connections to a server has reached zero.
:return: True if the server was disabled, false otherwise.
:rtype: bool
:raises warthog.exceptions.WarthogAuthFailureError: If authentication with
@@ -300,26 +301,23 @@ class WarthogClient(object):
operation.
:raises warthog.exceptions.WarthogNoSuchNodeError: If the load balancer does
not recognize the given hostname.
- :raises warthog.exceptions.WarthogNodeDisableError: If there are any other
+ :raises warthog.exceptions.WarthogApiError: If there are any other
problems disabling the given server.
"""
with self._session_context() as session:
disable = self._commands.get_disable_server(self._scheme_host, session, server)
- self._try_repeatedly(disable.send, max_retries)
+ disable.send()
active = self._commands.get_active_connections(self._scheme_host, session, server)
- self._wait_for_connections(active.send, max_retries)
+ self._wait_for_connections(active.send, max_retries, wait_interval)
- status = self._commands.get_server_status(
- self._scheme_host, session, server)
+ status = self._commands.get_server_status(self._scheme_host, session, server)
return warthog.core.STATUS_DISABLED == status.send()
- # NOTE: there's a fair amount of duplicate code between this method and _wait_for_status
- # and we could consolidate them to one method that just accepts a function and waits for
- # it to return true and then break. But, this way we have more useful debug information
- # logged at the expense of duplicate code.
- # pylint: disable=missing-docstring
- def _wait_for_connections(self, conn_method, max_retries):
+ def _wait_for_connections(self, conn_method, max_retries, interval):
+ """Repeatedly execute a command to get the number of active connections until
+ the number of active connections drops to zero or we run out of retries.
+ """
retries = 0
while retries < max_retries:
@@ -328,21 +326,26 @@ class WarthogClient(object):
break
self._logger.debug(
- "Connections still active: %s, sleeping for %s seconds...", conns, self._interval)
- time.sleep(self._interval)
+ "Connections still active: %s, sleeping for %s seconds...", conns, interval)
+ time.sleep(interval)
retries += 1
- def enable_server(self, server, max_retries=5):
+ def enable_server(self, server, max_retries=5, wait_interval=2.0):
"""Enable a server at the node level, optionally retrying when there are transient
errors and waiting for the server to enter the expected, enabled state.
- If ``max_retries`` is zero, no attempt will be made to retry on transient errors
- or to wait until the server enters the expected, enabled state, the method will
- try a single time to enable the server then return immediately.
+ If ``max_retries`` is zero, no attempt will be made to wait until the server enters
+ the expected, enabled state, the method will try a single time to enable the server
+ then return immediately.
+
+ .. versionchanged:: 2.0.0
+ Added the optional ``wait_interval`` parameter.
:param basestring server: Hostname of the server to enable
- :param int max_retries: Max number of times to sleep and retry when encountering
- some transient error while trying to enable the server
+ :param int max_retries: Max number of times to sleep and retry while waiting for
+ the server to enter the "enabled" state.
+ :param float wait_interval: How long (in seconds) to wait between each check to
+ see if the server has entered the "enabled" state.
:return: True if the server was enabled, false otherwise
:rtype: bool
:raises warthog.exceptions.WarthogAuthFailureError: If authentication with
@@ -350,20 +353,22 @@ class WarthogClient(object):
operation.
:raises warthog.exceptions.WarthogNoSuchNodeError: If the load balancer does
not recognize the given hostname.
- :raises warthog.exceptions.WarthogNodeEnableError: If there are any other
+ :raises warthog.exceptions.WarthogApiError: If there are any other
problems enabling the given server.
"""
with self._session_context() as session:
enable = self._commands.get_enable_server(self._scheme_host, session, server)
- self._try_repeatedly(enable.send, max_retries)
+ enable.send()
status = self._commands.get_server_status(self._scheme_host, session, server)
- self._wait_for_enable(status.send, max_retries)
+ self._wait_for_enable(status.send, max_retries, wait_interval)
return warthog.core.STATUS_ENABLED == status.send()
- # pylint: disable=missing-docstring
- def _wait_for_enable(self, status_method, max_retries):
+ def _wait_for_enable(self, status_method, max_retries, interval):
+ """Repeatedly execute a command to get the status of a node until the node
+ becomes enabled or we run out of retries.
+ """
retries = 0
while retries < max_retries:
@@ -373,28 +378,6 @@ class WarthogClient(object):
self._logger.debug(
"Server is not yet enabled (%s), sleeping for %s seconds...",
- status, self._interval)
- time.sleep(self._interval)
+ status, interval)
+ time.sleep(interval)
retries += 1
-
- def _try_repeatedly(self, method, max_retries):
- """Execute a method, retrying if it fails due to a transient error
- up to a given number of times, with the instance-wide interval in
- between each try.
- """
- retries = 0
-
- while True:
- try:
- return method()
- except warthog.exceptions.WarthogApiError as e:
- if e.api_code not in warthog.core.TRANSIENT_ERRORS or retries >= max_retries:
- raise
- self._logger.debug(
- "Encountered transient error %s - %s, retrying... ", e.api_code, e.api_msg)
- time.sleep(self._interval)
- retries += 1
-
-
-# NOTE: This alias is only for transitioning to the v3 API
-CommandFactory3 = CommandFactory
diff --git a/warthog/core.py b/warthog/core.py
index 147d1f1..912f48b 100644
--- a/warthog/core.py
+++ b/warthog/core.py
@@ -32,8 +32,6 @@ ERROR_CODE_NO_SUCH_SERVER = 1023460352
ERROR_CODE_BAD_PERMISSION = 419545856
-TRANSIENT_ERRORS = frozenset([])
-
_PATH_AUTH = '/axapi/v3/auth'
_PATH_LOGOFF = '/axapi/v3/logoff'
diff --git a/warthog/transport.py b/warthog/transport.py
index 776d2c8..84134fb 100644
--- a/warthog/transport.py
+++ b/warthog/transport.py
@@ -21,8 +21,7 @@ import requests
from requests.adapters import (
HTTPAdapter,
DEFAULT_POOLBLOCK,
- DEFAULT_POOLSIZE,
- DEFAULT_RETRIES)
+ DEFAULT_POOLSIZE)
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from requests.packages.urllib3.poolmanager import PoolManager
@@ -36,28 +35,34 @@ DEFAULT_SSL_VERSION = warthog.ssl.PROTOCOL_TLSv1_2
# Default to verifying SSL/TLS certs because "safe by default" is a good idea.
DEFAULT_CERT_VERIFY = True
+# Default number of times to retry on transient network errors like connection
+# timeouts or DNS timeouts.
+DEFAULT_RETRIES = 5
-def get_transport_factory(verify=None, ssl_version=None):
+
+def get_transport_factory(verify=None, ssl_version=None, retries=None):
"""Get a new callable that returns :class:`requests.Session` instances that
have been configured according to the given parameters.
:class:`requests.Session` instances are then used for interacting with the API
of the load balancer over HTTP or HTTPS.
- It is typically not required for user code to call this function directly unless
- you have special requirements such as needing to bypass HTTPS certificate validation
- because you use a self signed certificate.
-
.. versionchanged:: 0.10.0
Using the requests/urllib3 default is no longer an option. Passing a ``None`` value
for ``ssl_version`` will result in using the Warthog default (TLS v1).
+ .. versionchanged:: 2.0.0
+ Added the ``retries`` parameter and default it to a number greater than zero.
+
:param bool|None verify: Should SSL certificates by verified when connecting
over HTTPS? Default is ``True``. If you have chosen not to verify certificates
warnings about this emitted by the requests library will be suppressed.
:param int|None ssl_version: Explicit version of SSL to use for HTTPS connections
to an A10 load balancer. The version is a constant as specified by the
:mod:`ssl` module. The default is TLSv1.
+ :param int|None retries: The maximum number of times to retry operations on transient
+ network errors. Note this only applies to cases where we haven't yet sent any
+ data to the server (e.g. connection errors, DNS errors, etc.)
:return: A callable to return new configured session instances for making HTTP(S)
requests
:rtype: callable
@@ -67,15 +72,23 @@ def get_transport_factory(verify=None, ssl_version=None):
# just pass `None` and we'll pick the default here.
verify = verify if verify is not None else DEFAULT_CERT_VERIFY
ssl_version = ssl_version if ssl_version is not None else DEFAULT_SSL_VERSION
+ retries = retries if retries is not None else DEFAULT_RETRIES
# pylint: disable=missing-docstring
def factory():
transport = requests.Session()
- transport.mount('https://', VersionedSSLAdapter(ssl_version))
+ transport.mount('https://', VersionedSSLAdapter(ssl_version, max_retries=retries))
if not verify:
transport.verify = False
+ transport.mount('http://', HTTPAdapter(
+ max_retries=retries,
+ pool_connections=DEFAULT_POOLSIZE,
+ pool_maxsize=DEFAULT_POOLSIZE,
+ pool_block=DEFAULT_POOLBLOCK
+ ))
+
return transport
# Make sure that we suppress warnings about invalid certs since the user
@@ -100,7 +113,8 @@ class VersionedSSLAdapter(HTTPAdapter):
pool_connections=pool_connections,
pool_maxsize=pool_maxsize,
max_retries=max_retries,
- pool_block=pool_block)
+ pool_block=pool_block
+ )
def init_poolmanager(self, connections, maxsize, block=False, **pool_kwargs):
# pylint: disable=attribute-defined-outside-init
| Retry commands on requests ConnectionError and ConnectTimeout
These are pretty standard network-related nonsense that can cause issues during deploys and are safe to retry a few times. This issue came up when failing between members of a redundant LB pair. | smarter-travel-media/warthog | diff --git a/test/test_client.py b/test/test_client.py
index 342f9a5..2c80aee 100644
--- a/test/test_client.py
+++ b/test/test_client.py
@@ -1,9 +1,8 @@
# -*- coding: utf-8 -*-
-import logging
-
-import pytest
import mock
+import pytest
+
import warthog.client
import warthog.core
import warthog.exceptions
@@ -59,15 +58,13 @@ def disable_cmd():
return mock.Mock(spec=warthog.core.NodeDisableCommand)
[email protected]
-def client():
- return mock.Mock(spec=warthog.client.WarthogClient)
-
-
def test_session_context_enter_yields_session(commands, start_cmd):
start_cmd.send.return_value = '1234'
- context = warthog.client.session_context(SCHEME_HOST, 'user', 'password', commands)
+ client = warthog.client.WarthogClient(
+ SCHEME_HOST, 'user', 'password', commands=commands)
+
+ context = client._session_context()
session = context.__enter__()
assert '1234' == session, 'Did not get expected session ID'
@@ -78,7 +75,10 @@ def test_session_context_enter_yields_session(commands, start_cmd):
def test_session_context_exit_closes_previous_session(commands, start_cmd, end_cmd):
start_cmd.send.return_value = '1234'
- context = warthog.client.session_context(SCHEME_HOST, 'user', 'password', commands)
+ client = warthog.client.WarthogClient(
+ SCHEME_HOST, 'user', 'password', commands=commands)
+
+ context = client._session_context()
context.__enter__()
context.__exit__(None, None, None)
@@ -88,92 +88,100 @@ def test_session_context_exit_closes_previous_session(commands, start_cmd, end_c
def test_session_context_exception_in_context_propagated(commands, start_cmd):
start_cmd.send.return_value = '1234'
- context = warthog.client.session_context(SCHEME_HOST, 'user', 'password', commands)
+
+ client = warthog.client.WarthogClient(
+ SCHEME_HOST, 'user', 'password', commands=commands)
+
+ context = client._session_context()
with pytest.raises(RuntimeError):
with context:
raise RuntimeError("AHH!")
-class TestWarthogClient(object):
- def test_get_status(self, commands, start_cmd, end_cmd, status_cmd):
- start_cmd.send.return_value = '1234'
- status_cmd.send.return_value = 'down'
-
- client = warthog.client.WarthogClient(
- SCHEME_HOST, 'user', 'password', wait_interval=0.1, commands=commands)
+def test_get_status(commands, start_cmd, end_cmd, status_cmd):
+ start_cmd.send.return_value = '1234'
+ status_cmd.send.return_value = 'down'
- status = client.get_status('app1.example.com')
+ client = warthog.client.WarthogClient(
+ SCHEME_HOST, 'user', 'password', commands=commands)
- assert 'down' == status, 'Did not get expected status'
- assert end_cmd.send.called, 'Session end .send() did not get called'
+ status = client.get_status('app1.example.com')
- def test_get_connections(self, commands, start_cmd, end_cmd, conn_cmd):
- start_cmd.send.return_value = '1234'
- conn_cmd.send.return_value = 42
+ assert 'down' == status, 'Did not get expected status'
+ assert end_cmd.send.called, 'Session end .send() did not get called'
- client = warthog.client.WarthogClient(
- SCHEME_HOST, 'user', 'password', wait_interval=0.1, commands=commands)
- connections = client.get_connections('app1.example.com')
+def test_get_connections(commands, start_cmd, end_cmd, conn_cmd):
+ start_cmd.send.return_value = '1234'
+ conn_cmd.send.return_value = 42
- assert 42 == connections, 'Did not get expected active connections'
- assert end_cmd.send.called, 'Session end .send() did not get called'
+ client = warthog.client.WarthogClient(
+ SCHEME_HOST, 'user', 'password', commands=commands)
- def test_disable_server_no_active_connections(self, commands, start_cmd, end_cmd,
- status_cmd, conn_cmd, disable_cmd):
- start_cmd.send.return_value = '1234'
- disable_cmd.send.return_value = True
- conn_cmd.send.return_value = 0
- status_cmd.send.return_value = 'disabled'
+ connections = client.get_connections('app1.example.com')
- client = warthog.client.WarthogClient(
- SCHEME_HOST, 'user', 'password', wait_interval=0.1, commands=commands)
+ assert 42 == connections, 'Did not get expected active connections'
+ assert end_cmd.send.called, 'Session end .send() did not get called'
- disabled = client.disable_server('app1.example.com')
- assert disabled, 'Server did not end up disabled'
- assert end_cmd.send.called, 'Session end .send() did not get called'
+def test_disable_server_no_active_connections(commands, start_cmd, end_cmd,
+ status_cmd, conn_cmd, disable_cmd):
+ start_cmd.send.return_value = '1234'
+ disable_cmd.send.return_value = True
+ conn_cmd.send.return_value = 0
+ status_cmd.send.return_value = 'disabled'
- def test_disable_server_with_active_connections(self, commands, start_cmd, end_cmd,
- status_cmd, conn_cmd, disable_cmd):
- start_cmd.send.return_value = '1234'
- disable_cmd.send.return_value = True
- conn_cmd.send.return_value = [42, 3, 0]
- status_cmd.send.return_value = 'disabled'
+ client = warthog.client.WarthogClient(
+ SCHEME_HOST, 'user', 'password', commands=commands)
- client = warthog.client.WarthogClient(
- SCHEME_HOST, 'user', 'password', wait_interval=0.1, commands=commands)
+ disabled = client.disable_server('app1.example.com')
- disabled = client.disable_server('app1.example.com')
+ assert disabled, 'Server did not end up disabled'
+ assert end_cmd.send.called, 'Session end .send() did not get called'
- assert disabled, 'Server did not end up disabled'
- assert end_cmd.send.called, 'Session end .send() did not get called'
- def test_disable_server_never_gets_disabled(self, commands, start_cmd, end_cmd,
+def test_disable_server_with_active_connections(commands, start_cmd, end_cmd,
status_cmd, conn_cmd, disable_cmd):
- start_cmd.send.return_value = '1234'
- disable_cmd.send.return_value = True
- conn_cmd.send.return_value = 42
- status_cmd.send.return_value = 'enabled'
+ start_cmd.send.return_value = '1234'
+ disable_cmd.send.return_value = True
+ conn_cmd.send.return_value = [42, 3, 0]
+ status_cmd.send.return_value = 'disabled'
+
+ client = warthog.client.WarthogClient(
+ SCHEME_HOST, 'user', 'password', commands=commands)
+
+ disabled = client.disable_server('app1.example.com', wait_interval=0.1)
- client = warthog.client.WarthogClient(
- SCHEME_HOST, 'user', 'password', wait_interval=0.1, commands=commands)
+ assert disabled, 'Server did not end up disabled'
+ assert end_cmd.send.called, 'Session end .send() did not get called'
- disabled = client.disable_server('app1.example.com')
- assert not disabled, 'Server ended up disabled'
- assert end_cmd.send.called, 'Session end .send() did not get called'
+def test_disable_server_never_gets_disabled(commands, start_cmd, end_cmd,
+ status_cmd, conn_cmd, disable_cmd):
+ start_cmd.send.return_value = '1234'
+ disable_cmd.send.return_value = True
+ conn_cmd.send.return_value = 42
+ status_cmd.send.return_value = 'enabled'
+
+ client = warthog.client.WarthogClient(
+ SCHEME_HOST, 'user', 'password', commands=commands)
+
+ disabled = client.disable_server('app1.example.com', wait_interval=0.1)
- def test_enable_server(self, commands, start_cmd, end_cmd, status_cmd, enable_cmd):
- start_cmd.send.return_value = '1234'
- enable_cmd.send.return_value = True
- status_cmd.send.side_effect = ['down', 'down', 'enabled', 'enabled']
+ assert not disabled, 'Server ended up disabled'
+ assert end_cmd.send.called, 'Session end .send() did not get called'
+
+
+def test_enable_server(commands, start_cmd, end_cmd, status_cmd, enable_cmd):
+ start_cmd.send.return_value = '1234'
+ enable_cmd.send.return_value = True
+ status_cmd.send.side_effect = ['down', 'down', 'enabled', 'enabled']
- client = warthog.client.WarthogClient(
- SCHEME_HOST, 'user', 'password', wait_interval=0.1, commands=commands)
+ client = warthog.client.WarthogClient(
+ SCHEME_HOST, 'user', 'password', commands=commands)
- enabled = client.enable_server('app1.example.com')
+ enabled = client.enable_server('app1.example.com', wait_interval=0.1)
- assert enabled, 'Server did not end up enabled'
- assert end_cmd.send.called, 'Session end .send() did not get called'
+ assert enabled, 'Server did not end up enabled'
+ assert end_cmd.send.called, 'Session end .send() did not get called'
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 3
} | 1.999 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio",
"mock"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | click==6.7
coverage==7.8.0
exceptiongroup==1.2.2
execnet==2.1.1
iniconfig==2.1.0
mock==5.2.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
requests==2.11.1
tomli==2.2.1
typing_extensions==4.13.0
-e git+https://github.com/smarter-travel-media/warthog.git@0748fe97aff223b7f5cac008c856e3273b6a0343#egg=warthog
| name: warthog
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- click==6.7
- coverage==7.8.0
- exceptiongroup==1.2.2
- execnet==2.1.1
- iniconfig==2.1.0
- mock==5.2.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- requests==2.11.1
- tomli==2.2.1
- typing-extensions==4.13.0
prefix: /opt/conda/envs/warthog
| [
"test/test_client.py::test_disable_server_with_active_connections",
"test/test_client.py::test_disable_server_never_gets_disabled",
"test/test_client.py::test_enable_server"
]
| []
| [
"test/test_client.py::test_session_context_enter_yields_session",
"test/test_client.py::test_session_context_exit_closes_previous_session",
"test/test_client.py::test_session_context_exception_in_context_propagated",
"test/test_client.py::test_get_status",
"test/test_client.py::test_get_connections",
"test/test_client.py::test_disable_server_no_active_connections"
]
| []
| MIT License | 1,417 | [
"warthog/core.py",
"warthog/transport.py",
"warthog/client.py"
]
| [
"warthog/core.py",
"warthog/transport.py",
"warthog/client.py"
]
|
|
terryyin__lizard-191 | 48de756b52b92705f2127353b54d5a4ddac71187 | 2017-06-29 21:53:30 | 48de756b52b92705f2127353b54d5a4ddac71187 | diff --git a/lizard_languages/clike.py b/lizard_languages/clike.py
index 33e1c3d..2c1af01 100644
--- a/lizard_languages/clike.py
+++ b/lizard_languages/clike.py
@@ -155,7 +155,7 @@ class CLikeNestingStackStates(CodeStateMachine):
if token == "template":
self._state = self._template_declaration
- elif token in ("struct", "class", "namespace"):
+ elif token in ("struct", "class", "namespace", "union"):
self._state = self._read_namespace
elif token == "{":
| Lizard not handling functions within unions correctly
For the following code:
```c++
namespace foo{
void myFunction() { }
union bar{
void mySecondFunction() { }
};
class dog{
void bark() { }
};
};
```
Lizard generates the following output:

This shows that the long_name generated by lizard includes the namespace or the class that a function is contained in but does not contain the union the function is contained in. This inconsistency can cause issues for code analysis tools. | terryyin/lizard | diff --git a/test/test_languages/testCAndCPP.py b/test/test_languages/testCAndCPP.py
index cd5569b..41a1b13 100644
--- a/test/test_languages/testCAndCPP.py
+++ b/test/test_languages/testCAndCPP.py
@@ -461,6 +461,11 @@ class Test_c_cpp_lizard(unittest.TestCase):
self.assertEqual(1, len(result))
self.assertEqual("A::foo", result[0].name)
+ def test_union_as_qualifier(self):
+ """Union as namespace for functions."""
+ result = get_cpp_function_list("union A { void foo() {} };")
+ self.assertEqual(1, len(result))
+ self.assertEqual("A::foo", result[0].name)
class Test_cpp11_Attributes(unittest.TestCase):
"""C++11 extendable attributes can appear pretty much anywhere."""
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_media"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/base.txt",
"dev_requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | astroid==3.3.9
dill==0.3.9
exceptiongroup==1.2.2
iniconfig==2.1.0
isort==6.0.1
-e git+https://github.com/terryyin/lizard.git@48de756b52b92705f2127353b54d5a4ddac71187#egg=lizard
mccabe==0.7.0
mock==5.2.0
nose==1.3.7
packaging==24.2
platformdirs==4.3.7
pluggy==1.5.0
pycodestyle==2.13.0
pylint==3.3.6
pytest==8.3.5
tomli==2.2.1
tomlkit==0.13.2
typing_extensions==4.13.0
| name: lizard
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- astroid==3.3.9
- dill==0.3.9
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- isort==6.0.1
- mccabe==0.7.0
- mock==5.2.0
- nose==1.3.7
- packaging==24.2
- platformdirs==4.3.7
- pluggy==1.5.0
- pycodestyle==2.13.0
- pylint==3.3.6
- pytest==8.3.5
- tomli==2.2.1
- tomlkit==0.13.2
- typing-extensions==4.13.0
prefix: /opt/conda/envs/lizard
| [
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_union_as_qualifier"
]
| [
"test/test_languages/testCAndCPP.py::Test_Big::test_typedef"
]
| [
"test/test_languages/testCAndCPP.py::Test_C_Token_extension::test_connecting_marcro",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_1",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_braket_that_is_not_a_namespace",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_class_with_inheritance",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_complicated_c_function",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_constructor_initialization_list",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_constructor_initialization_list_noexcept",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_constructor_initializer_list",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_constructor_uniform_initialization",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_destructor_implementation",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_double_nested_template",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_double_slash_within_string",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_empty",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_dec_followed_with_one_word_is_ok",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_dec_with_noexcept",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_dec_with_throw",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_declaration_is_not_counted",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_name_class",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_operator",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_that_returns_function_pointers",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_with_1_param",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_with_content",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_with_no_param",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_with_param",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_with_strang_param",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_function_with_strang_param2",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_global_var_constructor",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_inline_operator",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_less_then_is_not_template",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_namespace_alias",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_nested_class",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_nested_class_middle",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_nested_template",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_nested_template_function",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_nested_unnamed_namespace",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_no_function",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_non_function_initializer_list",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_non_function_uniform_initialization",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_not_old_style_c_function",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_old_style_c_function",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_old_style_c_function_has_semicolon",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_one_function",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_one_function_in_class",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_one_function_with_const",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_one_function_with_namespace",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_one_function_with_noexcept",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_one_function_with_throw",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_one_macro_in_class",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_only_word_can_be_function_name",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_operator_overloading",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_operator_overloading_shift",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_operator_overloading_with_namespace",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_operator_with_complicated_name",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_parentheses_before_function",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_pre_class",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_ref_qualifiers",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_struct_in_param",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_struct_in_return_type",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_stupid_macro_before_function",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_template_as_part_of_function_name",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_template_as_reference",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_template_class",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_template_class_full_specialization",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_template_class_partial_specialization",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_template_function",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_template_function_specialization",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_template_with_pointer",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_template_with_reference",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_template_with_reference_as_reference",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_trailing_return_type",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_two_function",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_two_simplest_function",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_typedef_is_not_old_style_c_function",
"test/test_languages/testCAndCPP.py::Test_c_cpp_lizard::test_underscore",
"test/test_languages/testCAndCPP.py::Test_cpp11_Attributes::test_class",
"test/test_languages/testCAndCPP.py::Test_cpp11_Attributes::test_control_structures",
"test/test_languages/testCAndCPP.py::Test_cpp11_Attributes::test_function",
"test/test_languages/testCAndCPP.py::Test_cpp11_Attributes::test_function_parameters",
"test/test_languages/testCAndCPP.py::Test_cpp11_Attributes::test_function_return_type",
"test/test_languages/testCAndCPP.py::Test_cpp11_Attributes::test_namespace",
"test/test_languages/testCAndCPP.py::Test_Preprocessing::test_content_macro_should_be_ignored",
"test/test_languages/testCAndCPP.py::Test_Preprocessing::test_preprocessor_is_not_function",
"test/test_languages/testCAndCPP.py::Test_Preprocessing::test_preprocessors_should_be_ignored_outside_function_implementation",
"test/test_languages/testCAndCPP.py::Test_Big::test_trouble",
"test/test_languages/testCAndCPP.py::Test_Dialects::test_cuda_kernel_launch"
]
| []
| MIT License | 1,418 | [
"lizard_languages/clike.py"
]
| [
"lizard_languages/clike.py"
]
|
|
melexis__warnings-plugin-28 | e45c72adea46a8595cc426368e38090a7553f40c | 2017-06-30 08:18:03 | 0c7e730a491d32ad90f258439715fb6507be37f2 | diff --git a/README.rst b/README.rst
index 7b37e9c..6c66f05 100644
--- a/README.rst
+++ b/README.rst
@@ -10,22 +10,10 @@
:target: https://codecov.io/gh/melexis/warnings-plugin
:alt: Code Coverage
-.. image:: https://codeclimate.com/github/melexis/warnings-plugin/badges/gpa.svg
- :target: https://codeclimate.com/github/melexis/warnings-plugin
- :alt: Code Climate Status
-
-.. image:: https://codeclimate.com/github/melexis/warnings-plugin/badges/issue_count.svg
- :target: https://codeclimate.com/github/melexis/warnings-plugin
- :alt: Issue Count
-
.. image:: https://requires.io/github/melexis/warnings-plugin/requirements.svg?branch=master
:target: https://requires.io/github/melexis/warnings-plugin/requirements/?branch=master
:alt: Requirements Status
-.. image:: https://img.shields.io/badge/contributions-welcome-brightgreen.svg?style=flat
- :target: https://github.com/melexis/warnings-plugin/issues
- :alt: Contributions welcome
-
============================
Command line warnings-plugin
@@ -99,6 +87,11 @@ that case command will look like:
Help prints all currently supported commands and their usages.
+The command returns (shell $? variable):
+
+- value 0 when the number of counted warnings is within the supplied minimum and maximum limits: ok,
+- number of counted warnings (positive) when the counter number is not within those limit.
+
----------------------------
Parse for Sphinx warnings
----------------------------
diff --git a/docs/conf.py b/docs/conf.py
index 02a5c31..1f6c773 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -62,8 +62,8 @@ napoleon_use_param = False
if 'nt' in os.name:
plantuml_path = subprocess.check_output(["where", "/F", "plantuml.jar"])
if not plantuml_path :
- print("Can't find 'plantuml.jar' file.")
- print("You need to add path to 'plantuml.jar' file to your PATH variable.")
+ print "Can't find 'plantuml.jar' file."
+ print "You need to add path to 'plantuml.jar' file to your PATH variable."
sys.exit(os.strerror(errno.EPERM))
plantuml = plantuml_path.decode("utf-8")
plantuml = plantuml.rstrip('\n\r')
@@ -73,8 +73,8 @@ if 'nt' in os.name:
else:
plantuml_path = subprocess.check_output(["whereis", "-u", "plantuml"])
if not plantuml_path :
- print("Can't find 'plantuml.jar' file.")
- print("You need to add path to 'plantuml.jar' file to your PATH variable.")
+ print "Can't find 'plantuml.jar' file."
+ print "You need to add path to 'plantuml.jar' file to your PATH variable."
sys.exit(os.strerror(errno.EPERM))
diff --git a/src/mlx/warnings.py b/src/mlx/warnings.py
index 70a8474..ccbb45a 100644
--- a/src/mlx/warnings.py
+++ b/src/mlx/warnings.py
@@ -94,14 +94,14 @@ class WarningsChecker(object):
''' Function for checking whether the warning count is within the configured limits
Returns:
- int: 0 if the amount of warnings is within limits. 1 otherwise
+ int: 0 if the amount of warnings is within limits. the count of warnings otherwise
'''
if self.count > self.warn_max:
print("Number of warnings ({count}) is higher than the maximum limit ({max}). Returning error code 1.".format(count=self.count, max=self.warn_max))
- return 1
+ return self.count
elif self.count < self.warn_min:
print("Number of warnings ({count}) is lower than the minimum limit ({min}). Returning error code 1.".format(count=self.count, min=self.warn_min))
- return 1
+ return self.count
else:
print("Number of warnings ({count}) is between limits {min} and {max}. Well done.".format(count=self.count, min=self.warn_min, max=self.warn_max))
return 0
| Return code of main() could/should be number of warnings
When count of warnings is not within limit min/max, the return code of main() could be the number of actual warnings found. This way programs can take the return value from shell and do something with it. | melexis/warnings-plugin | diff --git a/tests/test_limits.py b/tests/test_limits.py
index 7a6b1c9..9e477a8 100644
--- a/tests/test_limits.py
+++ b/tests/test_limits.py
@@ -45,7 +45,7 @@ class TestLimits(TestCase):
warnings.check('testfile.c:12: warning: group test: ignoring title "Some test functions" that does not match old title "Some freaky test functions"')
self.assertEqual(warnings.return_count(), 2)
warnings.set_maximum(1)
- self.assertEqual(warnings.return_check_limits(), 1)
+ self.assertEqual(warnings.return_check_limits(), 2)
warnings.set_maximum(2)
self.assertEqual(warnings.return_check_limits(), 0)
@@ -56,7 +56,7 @@ class TestLimits(TestCase):
warnings.check('testfile.c:6: warning: group test: ignoring title "Some test functions" that does not match old title "Some freaky test functions"')
self.assertEqual(warnings.return_count(), 2)
# default behavior
- self.assertEqual(warnings.return_check_limits(), 1)
+ self.assertEqual(warnings.return_check_limits(), 2)
# to set minimum we need to make maximum higher
warnings.set_maximum(10)
@@ -64,7 +64,7 @@ class TestLimits(TestCase):
if x <= 3:
self.assertEqual(warnings.return_check_limits(), 0)
else:
- self.assertEqual(warnings.return_check_limits(), 1)
+ self.assertEqual(warnings.return_check_limits(), 2)
warnings.set_minimum(x)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 3
} | 0.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
-e git+https://github.com/melexis/warnings-plugin.git@e45c72adea46a8595cc426368e38090a7553f40c#egg=mlx.warnings
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
pytest @ file:///croot/pytest_1738938843180/work
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
| name: warnings-plugin
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
prefix: /opt/conda/envs/warnings-plugin
| [
"tests/test_limits.py::TestLimits::test_return_values_maximum_increase",
"tests/test_limits.py::TestLimits::test_return_values_minimum_increase"
]
| []
| [
"tests/test_limits.py::TestLimits::test_return_values_maximum_decrease",
"tests/test_limits.py::TestLimits::test_set_maximum",
"tests/test_limits.py::TestLimits::test_set_minimum",
"tests/test_limits.py::TestLimits::test_set_minimum_fail"
]
| []
| Apache License 2.0 | 1,420 | [
"README.rst",
"src/mlx/warnings.py",
"docs/conf.py"
]
| [
"README.rst",
"src/mlx/warnings.py",
"docs/conf.py"
]
|
|
zopefoundation__zope.app.onlinehelp-6 | 0cd750a10af18f630a3dd3560bbd3139a438360e | 2017-06-30 18:41:05 | 0cd750a10af18f630a3dd3560bbd3139a438360e | diff --git a/CHANGES.rst b/CHANGES.rst
index f212edd..b9ae31f 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -2,11 +2,18 @@
CHANGES
=========
-4.0.2 (unreleased)
+4.1.0 (unreleased)
==================
-- Nothing changed yet.
+- The help namespace no longer modifies the global help object on
+ traversal. Instead it returns a new proxy object. This makes it
+ thread-safe. See `issue 4
+ <https://github.com/zopefoundation/zope.app.onlinehelp/issues/4>`_.
+- ``getTopicFor`` now really returns the first found topic in the
+ event that the object implements multiple interfaces that have
+ registered topics for the given view. Previously it would return the
+ topic for the least-specific interface.
4.0.1 (2017-05-21)
==================
diff --git a/src/zope/app/onlinehelp/__init__.py b/src/zope/app/onlinehelp/__init__.py
index 9998679..5ee40bf 100644
--- a/src/zope/app/onlinehelp/__init__.py
+++ b/src/zope/app/onlinehelp/__init__.py
@@ -20,9 +20,10 @@ __docformat__ = 'restructuredtext'
import os
-import zope
from zope.component import getUtilitiesFor
from zope.interface import providedBy
+from zope.proxy import ProxyBase
+from zope.proxy import non_overridable
from zope.testing import cleanup
from zope.app.onlinehelp.interfaces import IOnlineHelpTopic
@@ -34,6 +35,25 @@ path = os.path.join(os.path.dirname(__file__),
'help', 'welcome.stx')
globalhelp = OnlineHelp('Online Help', path)
+class _TraversedOnlineHelpProxy(ProxyBase):
+ """
+ A proxy around the globalhelp object that is returned when we
+ traverse through the helpNamespace.
+
+ It adds the ``context`` attribute to the context that was traversed
+ through.
+ """
+ __slots__ = ('context',)
+
+ def __init__(self, context):
+ self.context = context
+ ProxyBase.__init__(self, globalhelp)
+
+ @non_overridable
+ def __reduce__(self, proto=None):
+ raise TypeError("Not picklable")
+ __reduce_ex__ = __reduce__
+
class helpNamespace(object):
""" help namespace handler """
@@ -42,12 +62,14 @@ class helpNamespace(object):
self.context = context
def traverse(self, name, ignored):
- """Used to traverse to an online help topic.
- Returns the global `OnlineHelp` instance with the traversal
- context.
"""
- globalhelp.context = self.context
- return globalhelp
+ Used to traverse to an online help topic.
+
+ Returns a proxy for the global :class::`~.OnlineHelp` instance
+ with the traversal context.
+ """
+ return _TraversedOnlineHelpProxy(self.context)
+
def getTopicFor(obj, view=None):
"""Determine topic for an object and optionally a view.
@@ -115,19 +137,45 @@ def getTopicFor(obj, view=None):
>>> getTopicFor(Dummy2()) is None
True
- """
- topic = None
- for interface in providedBy(obj):
- for t in getUtilitiesFor(IOnlineHelpTopic):
- if t[1].interface==interface and t[1].view==view:
- topic = t[1]
- break
+ If there is a second interface also provided with the same
+ view name and registered for that interface, still only the first
+ topic will be found.
+
+ >>> from zope.interface import Interface, implementer, alsoProvides
+ >>> class I3(Interface):
+ ... pass
+ >>> @implementer(I3)
+ ... class Dummy3(object):
+ ... pass
+
+ >>> path = os.path.join(testdir(), 'help2.txt')
+ >>> onlinehelp.registerHelpTopic('a', 'help3', 'Help 3',
+ ... path, I3, None)
- return topic
+ >>> getTopicFor(Dummy3()).title
+ 'Help 3'
+ >>> getTopicFor(Dummy1()).title
+ 'Help 2'
+ >>> @implementer(I1, I3)
+ ... class Dummy4(object):
+ ... pass
+ >>> getTopicFor(Dummy4()).title
+ 'Help 2'
+
+ >>> @implementer(I3, I1)
+ ... class Dummy5(object):
+ ... pass
+ >>> getTopicFor(Dummy5()).title
+ 'Help 3'
+
+ """
+ for interface in providedBy(obj):
+ for _name, topic in getUtilitiesFor(IOnlineHelpTopic):
+ if topic.interface == interface and topic.view == view:
+ return topic
def _clear():
- global globalhelp
globalhelp.__init__(globalhelp.title, globalhelp.path)
diff --git a/src/zope/app/onlinehelp/configure.zcml b/src/zope/app/onlinehelp/configure.zcml
index e68c781..3fa7e8e 100644
--- a/src/zope/app/onlinehelp/configure.zcml
+++ b/src/zope/app/onlinehelp/configure.zcml
@@ -14,6 +14,11 @@
/>
</class>
+ <class class="._TraversedOnlineHelpProxy">
+ <require
+ like_class=".onlinehelp.OnlineHelp" />
+ </class>
+
<!-- this is the generic help topic implementation -->
<class class=".onlinehelptopic.OnlineHelpTopic">
<require
diff --git a/src/zope/app/onlinehelp/metaconfigure.py b/src/zope/app/onlinehelp/metaconfigure.py
index 3fd867e..58918f6 100644
--- a/src/zope/app/onlinehelp/metaconfigure.py
+++ b/src/zope/app/onlinehelp/metaconfigure.py
@@ -2,21 +2,20 @@
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
-#
+#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
-#
+#
##############################################################################
"""Meta-Configuration Handlers for "help" namespace.
These handlers process the `registerTopic()` directive of
the "help" ZCML namespace.
-$Id$
"""
__docformat__ = 'restructuredtext'
@@ -25,7 +24,7 @@ from zope.app.onlinehelp import globalhelp
class OnlineHelpTopicDirective(object):
- def __init__(self, _context, id, title, parent="", doc_path=None,
+ def __init__(self, _context, id, title, parent="", doc_path=None,
for_=None, view=None, class_=None, resources=None):
self._context = _context
self.id = id
@@ -36,7 +35,7 @@ class OnlineHelpTopicDirective(object):
self.view = view
self.class_ = class_
self.resources = resources
-
+
def _args(self):
return (self.parent, self.id, self.title, self.doc_path, self.for_,
self.view, self.class_, self.resources)
| The ++help namespace is not thread-safe
When you traverse the `++help` namespace, it [mutates a global object](https://github.com/zopefoundation/zope.app.onlinehelp/blob/master/src/zope/app/onlinehelp/__init__.py#L49), which is [clearly an issue if threads are involved](https://github.com/zopefoundation/zope.app.onlinehelp/pull/2#discussion_r116946254). Further, it is likely to be making that global object have a reference to a persistent object, which is Bad once the transaction ends and the ZODB Connection is closed or reused. | zopefoundation/zope.app.onlinehelp | diff --git a/src/zope/app/onlinehelp/tests/test_onlinehelp.py b/src/zope/app/onlinehelp/tests/test_onlinehelp.py
index cfe0b29..14e6da0 100644
--- a/src/zope/app/onlinehelp/tests/test_onlinehelp.py
+++ b/src/zope/app/onlinehelp/tests/test_onlinehelp.py
@@ -70,9 +70,27 @@ class TestOnlineHelp(unittest.TestCase):
'path that does not exist')
+class TestOnlineHelpNamespace(unittest.TestCase):
+
+ def test_context(self):
+ from zope.app.onlinehelp import globalhelp
+ from zope.app.onlinehelp import helpNamespace
+
+ traversed = helpNamespace(self).traverse(None, None)
+ self.assertIs(traversed.context, self)
+ self.assertIsNone(getattr(globalhelp, 'context', None))
+
+ def test_cannot_pickle(self):
+ from zope.app.onlinehelp import helpNamespace
+ import pickle
+
+ traversed = helpNamespace(self).traverse(None, None)
+ self.assertRaises(TypeError,
+ pickle.dumps, traversed)
+
+
def testdir():
- import zope.app.onlinehelp.tests
- return os.path.dirname(zope.app.onlinehelp.tests.__file__)
+ return os.path.dirname(__file__)
def setUp(tests):
testing.setUp()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 4
} | 4.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"zope.testrunner",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
beautifulsoup4==4.12.3
BTrees==4.11.3
certifi==2021.5.30
cffi==1.15.1
docutils==0.18.1
importlib-metadata==4.8.3
iniconfig==1.1.1
multipart==1.1.0
packaging==21.3
persistent==4.9.3
pluggy==1.0.0
py==1.11.0
pycparser==2.21
pyparsing==3.1.4
pytest==7.0.1
python-gettext==4.1
pytz==2025.2
roman==3.3
six==1.17.0
soupsieve==2.3.2.post1
tomli==1.2.3
transaction==3.1.0
typing_extensions==4.1.1
waitress==2.0.0
WebOb==1.8.9
WebTest==3.0.0
zc.lockfile==2.0
ZConfig==3.6.1
zdaemon==4.4
zipp==3.6.0
ZODB==5.8.1
zodbpickle==2.6
zope.annotation==4.8
zope.app.appsetup==4.2.0
zope.app.authentication==4.0.0
zope.app.basicskin==4.0.0
zope.app.component==4.1.0
zope.app.container==4.0.0
zope.app.content==4.0.0
zope.app.file==4.0.0
zope.app.folder==4.0.0
zope.app.form==5.1.0
zope.app.http==4.0.1
zope.app.localpermission==4.1.0
-e git+https://github.com/zopefoundation/zope.app.onlinehelp.git@0cd750a10af18f630a3dd3560bbd3139a438360e#egg=zope.app.onlinehelp
zope.app.pagetemplate==4.0.0
zope.app.preference==4.1.0
zope.app.principalannotation==4.0.0
zope.app.publication==4.5
zope.app.publisher==4.3.1
zope.app.renderer==4.1.0
zope.app.rotterdam==4.0.1
zope.app.security==5.1.0
zope.app.tree==4.1.0
zope.app.wsgi==4.4
zope.authentication==4.5.0
zope.browser==2.4
zope.browsermenu==4.4
zope.browserpage==4.4.0
zope.browserresource==4.4
zope.cachedescriptors==4.4
zope.component==5.1.0
zope.componentvocabulary==2.3.0
zope.configuration==4.4.1
zope.container==4.10
zope.contenttype==4.6
zope.copy==4.3
zope.copypastemove==4.2.1
zope.datetime==4.3.0
zope.deferredimport==4.4
zope.deprecation==4.4.0
zope.dottedname==5.0
zope.dublincore==4.3.0
zope.error==5.0
zope.event==4.6
zope.exceptions==4.6
zope.filerepresentation==5.0.0
zope.formlib==5.0.1
zope.hookable==5.4
zope.i18n==4.9.0
zope.i18nmessageid==5.1.1
zope.interface==5.5.2
zope.lifecycleevent==4.4
zope.location==4.3
zope.login==2.2
zope.minmax==2.3
zope.pagetemplate==4.6.0
zope.password==4.4
zope.pluggableauth==2.3.1
zope.preference==4.1.0
zope.principalannotation==4.4
zope.principalregistry==4.3
zope.processlifetime==2.4
zope.proxy==4.6.1
zope.ptresource==4.3.0
zope.publisher==6.1.0
zope.schema==6.2.1
zope.security==5.8
zope.securitypolicy==4.3.2
zope.session==4.5
zope.site==4.6.1
zope.size==4.4
zope.structuredtext==4.4
zope.tal==4.5
zope.tales==5.2
zope.testing==5.0.1
zope.testrunner==5.6
zope.traversing==4.4.1
| name: zope.app.onlinehelp
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- beautifulsoup4==4.12.3
- btrees==4.11.3
- cffi==1.15.1
- docutils==0.18.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- multipart==1.1.0
- packaging==21.3
- persistent==4.9.3
- pluggy==1.0.0
- py==1.11.0
- pycparser==2.21
- pyparsing==3.1.4
- pytest==7.0.1
- python-gettext==4.1
- pytz==2025.2
- roman==3.3
- six==1.17.0
- soupsieve==2.3.2.post1
- tomli==1.2.3
- transaction==3.1.0
- typing-extensions==4.1.1
- waitress==2.0.0
- webob==1.8.9
- webtest==3.0.0
- zc-lockfile==2.0
- zconfig==3.6.1
- zdaemon==4.4
- zipp==3.6.0
- zodb==5.8.1
- zodbpickle==2.6
- zope-annotation==4.8
- zope-app-appsetup==4.2.0
- zope-app-authentication==4.0.0
- zope-app-basicskin==4.0.0
- zope-app-component==4.1.0
- zope-app-container==4.0.0
- zope-app-content==4.0.0
- zope-app-file==4.0.0
- zope-app-folder==4.0.0
- zope-app-form==5.1.0
- zope-app-http==4.0.1
- zope-app-localpermission==4.1.0
- zope-app-pagetemplate==4.0.0
- zope-app-preference==4.1.0
- zope-app-principalannotation==4.0.0
- zope-app-publication==4.5
- zope-app-publisher==4.3.1
- zope-app-renderer==4.1.0
- zope-app-rotterdam==4.0.1
- zope-app-security==5.1.0
- zope-app-tree==4.1.0
- zope-app-wsgi==4.4
- zope-authentication==4.5.0
- zope-browser==2.4
- zope-browsermenu==4.4
- zope-browserpage==4.4.0
- zope-browserresource==4.4
- zope-cachedescriptors==4.4
- zope-component==5.1.0
- zope-componentvocabulary==2.3.0
- zope-configuration==4.4.1
- zope-container==4.10
- zope-contenttype==4.6
- zope-copy==4.3
- zope-copypastemove==4.2.1
- zope-datetime==4.3.0
- zope-deferredimport==4.4
- zope-deprecation==4.4.0
- zope-dottedname==5.0
- zope-dublincore==4.3.0
- zope-error==5.0
- zope-event==4.6
- zope-exceptions==4.6
- zope-filerepresentation==5.0.0
- zope-formlib==5.0.1
- zope-hookable==5.4
- zope-i18n==4.9.0
- zope-i18nmessageid==5.1.1
- zope-interface==5.5.2
- zope-lifecycleevent==4.4
- zope-location==4.3
- zope-login==2.2
- zope-minmax==2.3
- zope-pagetemplate==4.6.0
- zope-password==4.4
- zope-pluggableauth==2.3.1
- zope-preference==4.1.0
- zope-principalannotation==4.4
- zope-principalregistry==4.3
- zope-processlifetime==2.4
- zope-proxy==4.6.1
- zope-ptresource==4.3.0
- zope-publisher==6.1.0
- zope-schema==6.2.1
- zope-security==5.8
- zope-securitypolicy==4.3.2
- zope-session==4.5
- zope-site==4.6.1
- zope-size==4.4
- zope-structuredtext==4.4
- zope-tal==4.5
- zope-tales==5.2
- zope-testing==5.0.1
- zope-testrunner==5.6
- zope-traversing==4.4.1
prefix: /opt/conda/envs/zope.app.onlinehelp
| [
"src/zope/app/onlinehelp/tests/test_onlinehelp.py::TestOnlineHelpNamespace::test_cannot_pickle",
"src/zope/app/onlinehelp/tests/test_onlinehelp.py::TestOnlineHelpNamespace::test_context"
]
| []
| [
"src/zope/app/onlinehelp/tests/test_onlinehelp.py::TestOnlineHelpResource::test_size",
"src/zope/app/onlinehelp/tests/test_onlinehelp.py::TestBaseOnlineHelpTopic::test_bad_path",
"src/zope/app/onlinehelp/tests/test_onlinehelp.py::TestOnlineHelp::test_bad_path",
"src/zope/app/onlinehelp/tests/test_onlinehelp.py::testdir",
"src/zope/app/onlinehelp/tests/test_onlinehelp.py::test_suite"
]
| []
| null | 1,421 | [
"src/zope/app/onlinehelp/configure.zcml",
"src/zope/app/onlinehelp/__init__.py",
"src/zope/app/onlinehelp/metaconfigure.py",
"CHANGES.rst"
]
| [
"src/zope/app/onlinehelp/configure.zcml",
"src/zope/app/onlinehelp/__init__.py",
"src/zope/app/onlinehelp/metaconfigure.py",
"CHANGES.rst"
]
|
|
google__mobly-238 | 310791d8d2faa9f554fddb0a5ce11ae249156bb4 | 2017-06-30 19:50:04 | 31dcff279d4808e011f6af8ab0661b9750357cda | dthkao:
Review status: 0 of 4 files reviewed at latest revision, 2 unresolved discussions.
---
*[mobly/base_test.py, line 598 at r1](https://reviewable.io:443/reviews/google/mobly/238#-Knubf1cLSPD58iF-quR:-Knubf1dAyi-v2lyZAv_:b-lxayl5) ([raw file](https://github.com/google/mobly/blob/6d55796147bc2f537531ca28a8c475fbfd501487/mobly/base_test.py#L598)):*
> ```Python
> # Skip all tests peacefully.
> e.details = 'Test class aborted due to: %s' % e.details
> for test_name in self._get_all_test_names():
> ```
This loop reappears below. Factor out? Also would the third version of it (which checks for execution) be enough for all three usages?
---
*[mobly/records.py, line 297 at r1](https://reviewable.io:443/reviews/google/mobly/238#-KnucBh9qyNSnandAWAA:-KnucBh9qyNSnandAWAB:b-looy6t) ([raw file](https://github.com/google/mobly/blob/6d55796147bc2f537531ca28a8c475fbfd501487/mobly/records.py#L297)):*
> ```Python
> test_record: A TestResultRecord object for the test class.
> """
> self.error.append(test_record)
> ```
How does this change the behavior? Can the eventual squashed message of the issue tracking explain what is changing better?
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/238)*
<!-- Sent from Reviewable.io -->
xpconanfan:
Review status: 0 of 4 files reviewed at latest revision, 2 unresolved discussions.
---
*[mobly/base_test.py, line 598 at r1](https://reviewable.io:443/reviews/google/mobly/238#-Knubf1cLSPD58iF-quR:-KnueMf5oRp9gFSuuo4E:b-896fix) ([raw file](https://github.com/google/mobly/blob/6d55796147bc2f537531ca28a8c475fbfd501487/mobly/base_test.py#L598)):*
<details><summary><i>Previously, dthkao (David T.H. Kao) wrote…</i></summary><blockquote>
This loop reappears below. Factor out? Also would the third version of it (which checks for execution) be enough for all three usages?
</blockquote></details>
Done.
---
*[mobly/records.py, line 297 at r1](https://reviewable.io:443/reviews/google/mobly/238#-KnucBh9qyNSnandAWAA:-Knucpn9CcYDquEHggaj:b3x9rtu) ([raw file](https://github.com/google/mobly/blob/6d55796147bc2f537531ca28a8c475fbfd501487/mobly/records.py#L297)):*
<details><summary><i>Previously, dthkao (David T.H. Kao) wrote…</i></summary><blockquote>
How does this change the behavior? Can the eventual squashed message of the issue tracking explain what is changing better?
</blockquote></details>
`fail` is defined as explicit failure by throwing signals.TestFailure, other things like setup_failure should be considered error.
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/238)*
<!-- Sent from Reviewable.io -->
dthkao:
Review status: 0 of 4 files reviewed at latest revision, 3 unresolved discussions.
---
*[mobly/records.py, line 297 at r1](https://reviewable.io:443/reviews/google/mobly/238#-KnucBh9qyNSnandAWAA:-KoO-QzC7UZAppGueV4P:b-2egkbq) ([raw file](https://github.com/google/mobly/blob/6d55796147bc2f537531ca28a8c475fbfd501487/mobly/records.py#L297)):*
<details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote>
`fail` is defined as explicit failure by throwing signals.TestFailure, other things like setup_failure should be considered error.
</blockquote></details>
I meant how does this (appending to error instead of executed) change the summary json?
---
*[mobly/records.py, line 309 at r2](https://reviewable.io:443/reviews/google/mobly/238#-KoO-fmMHOvIdDsPppMO:-KoO-fmMHOvIdDsPppMP:b-xo747k) ([raw file](https://github.com/google/mobly/blob/20f609c9ae1d0f34b1feed91a461f09d820db6f4/mobly/records.py#L309)):*
> ```Python
> False otherwise.
> """
> for record in self.executed:
> ```
minor: could be a list comprehension, right?
something like:
matches = [x for x in self.executed if x.test_name == test_name]
return len(matches)>0
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/238)*
<!-- Sent from Reviewable.io -->
xpconanfan:
Review status: 0 of 4 files reviewed at latest revision, 3 unresolved discussions.
---
*[mobly/records.py, line 309 at r2](https://reviewable.io:443/reviews/google/mobly/238#-KoO-fmMHOvIdDsPppMO:-KoO0_KphsyjCeZgOlLY:b-jgdz5e) ([raw file](https://github.com/google/mobly/blob/20f609c9ae1d0f34b1feed91a461f09d820db6f4/mobly/records.py#L309)):*
<details><summary><i>Previously, dthkao (David T.H. Kao) wrote…</i></summary><blockquote>
minor: could be a list comprehension, right?
something like:
matches = [x for x in self.executed if x.test_name == test_name]
return len(matches)>0
</blockquote></details>
what's the benefit of that?
we'd loop through the whole list every time instead of returning as long as we find a match.
seems less efficient?
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/238)*
<!-- Sent from Reviewable.io -->
xpconanfan:
Review status: 0 of 4 files reviewed at latest revision, 2 unresolved discussions.
---
*[mobly/records.py, line 297 at r1](https://reviewable.io:443/reviews/google/mobly/238#-KnucBh9qyNSnandAWAA:-KoO2R8FZIzsiQfZgxCs:b-bk5w0m) ([raw file](https://github.com/google/mobly/blob/6d55796147bc2f537531ca28a8c475fbfd501487/mobly/records.py#L297)):*
<details><summary><i>Previously, dthkao (David T.H. Kao) wrote…</i></summary><blockquote>
I meant how does this (appending to error instead of executed) change the summary json?
</blockquote></details>
Oh, I'm thinking, if `setup_class` failed, no test was executed.
Hence it does not seem appropriate to append to `executed` tally.
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/238)*
<!-- Sent from Reviewable.io -->
| diff --git a/mobly/records.py b/mobly/records.py
index ae550d9..6c5efe2 100644
--- a/mobly/records.py
+++ b/mobly/records.py
@@ -262,11 +262,12 @@ class TestResult(object):
Args:
record: A test record object to add.
"""
+ if record.result == TestResultEnums.TEST_RESULT_SKIP:
+ self.skipped.append(record)
+ return
self.executed.append(record)
if record.result == TestResultEnums.TEST_RESULT_FAIL:
self.failed.append(record)
- elif record.result == TestResultEnums.TEST_RESULT_SKIP:
- self.skipped.append(record)
elif record.result == TestResultEnums.TEST_RESULT_PASS:
self.passed.append(record)
else:
@@ -283,14 +284,32 @@ class TestResult(object):
self.controller_info[name] = info
def fail_class(self, test_record):
- """Add a record to indicate a test class setup has failed and no test
- in the class was executed.
+ """Add a record to indicate a test class has failed before any test
+ could execute.
+
+ This is only called before any test is actually executed. So it only
+ adds an error entry that describes why the class failed to the tally
+ and does not affect the total number of tests requrested or exedcuted.
Args:
test_record: A TestResultRecord object for the test class.
"""
- self.executed.append(test_record)
- self.failed.append(test_record)
+ self.error.append(test_record)
+
+ def is_test_executed(self, test_name):
+ """Checks if a specific test has been executed.
+
+ Args:
+ test_name: string, the name of the test to check.
+
+ Returns:
+ True if the test has been executed according to the test result,
+ False otherwise.
+ """
+ for record in self.executed:
+ if record.test_name == test_name:
+ return True
+ return False
@property
def is_all_pass(self):
| Properly report skipped test classes
Now that we have a way to reliably get all the tests in a class, including the generated tests, the test report for an aborted class should include entries for all the tests requested, instead of only one entry for the class. | google/mobly | diff --git a/mobly/base_test.py b/mobly/base_test.py
index a83334d..b5176a4 100644
--- a/mobly/base_test.py
+++ b/mobly/base_test.py
@@ -344,6 +344,12 @@ class BaseTestClass(object):
test_method(*args, **kwargs)
else:
test_method()
+ except signals.TestPass as e:
+ raise e
+ except Exception as e:
+ logging.exception('Exception occurred in %s.',
+ self.current_test_name)
+ raise e
finally:
try:
self._teardown_test(test_name)
@@ -354,7 +360,6 @@ class BaseTestClass(object):
tr_record.add_error('teardown_test', e)
self._exec_procedure_func(self._on_fail, tr_record)
except (signals.TestFailure, AssertionError) as e:
- logging.exception(e)
tr_record.test_fail(e)
self._exec_procedure_func(self._on_fail, tr_record)
except signals.TestSkip as e:
@@ -374,7 +379,6 @@ class BaseTestClass(object):
is_generate_trigger = True
self.results.requested.remove(test_name)
except Exception as e:
- logging.exception(e)
# Exception happened during test.
tr_record.test_error(e)
self._exec_procedure_func(self._on_fail, tr_record)
@@ -547,6 +551,22 @@ class BaseTestClass(object):
test_methods.append((test_name, test_method))
return test_methods
+ def _skip_remaining_tests(self, exception):
+ """Marks any requested test that has not been executed in a class as
+ skipped.
+
+ This is useful for handling abort class signal.
+
+ Args:
+ exception: The exception object that was thrown to trigger the
+ skip.
+ """
+ for test_name in self.results.requested:
+ if not self.results.is_test_executed(test_name):
+ test_record = records.TestResultRecord(test_name, self.TAG)
+ test_record.test_skip(exception)
+ self.results.add_record(test_record)
+
def run(self, test_names=None):
"""Runs tests within a test class.
@@ -591,21 +611,33 @@ class BaseTestClass(object):
# Setup for the class.
try:
self._setup_class()
+ except signals.TestAbortClass as e:
+ # The test class is intentionally aborted.
+ # Skip all tests peacefully.
+ e.details = 'Test class aborted due to: %s' % e.details
+ self._skip_remaining_tests(e)
+ return self.results
except Exception as e:
+ # Setup class failed for unknown reasons.
+ # Fail the class and skip all tests.
logging.exception('Failed to setup %s.', self.TAG)
class_record = records.TestResultRecord('setup_class', self.TAG)
class_record.test_begin()
class_record.test_fail(e)
self._exec_procedure_func(self._on_fail, class_record)
self.results.fail_class(class_record)
- self._safe_exec_func(self.teardown_class)
+ self._skip_remaining_tests(e)
return self.results
+ finally:
+ self._safe_exec_func(self.teardown_class)
# Run tests in order.
try:
for test_name, test_method in tests:
self.exec_one_test(test_name, test_method)
return self.results
- except signals.TestAbortClass:
+ except signals.TestAbortClass as e:
+ e.details = 'Test class aborted due to: %s' % e.details
+ self._skip_remaining_tests(e)
return self.results
except signals.TestAbortAll as e:
# Piggy-back test results on this exception object so we don't lose
diff --git a/tests/mobly/base_test_test.py b/tests/mobly/base_test_test.py
index 0ed05f7..c9d517b 100755
--- a/tests/mobly/base_test_test.py
+++ b/tests/mobly/base_test_test.py
@@ -168,6 +168,10 @@ class BaseTestTest(unittest.TestCase):
# This should not execute because setup_class failed.
never_call()
+ def test_something2(self):
+ # This should not execute because setup_class failed.
+ never_call()
+
def teardown_class(self):
# This should execute because the setup_class failure should
# have already been recorded.
@@ -179,12 +183,12 @@ class BaseTestTest(unittest.TestCase):
bt_cls = MockBaseTest(self.mock_test_cls_configs)
bt_cls.run()
- actual_record = bt_cls.results.failed[0]
+ actual_record = bt_cls.results.error[0]
self.assertEqual(actual_record.test_name, "setup_class")
self.assertEqual(actual_record.details, MSG_EXPECTED_EXCEPTION)
self.assertIsNone(actual_record.extras)
- expected_summary = ("Error 0, Executed 1, Failed 1, Passed 0, "
- "Requested 1, Skipped 0")
+ expected_summary = ("Error 1, Executed 0, Failed 0, Passed 0, "
+ "Requested 2, Skipped 2")
self.assertEqual(bt_cls.results.summary_str(), expected_summary)
teardown_class_call_check.assert_called_once_with("heehee")
on_fail_call_check.assert_called_once_with("haha")
@@ -526,7 +530,33 @@ class BaseTestTest(unittest.TestCase):
"Requested 1, Skipped 0")
self.assertEqual(bt_cls.results.summary_str(), expected_summary)
- def test_abort_class(self):
+ def test_abort_setup_class(self):
+ """A class was intentionally aborted by the test.
+
+ This is not considered an error as the abort class is used as a skip
+ signal for the entire class, which is different from raising other
+ exceptions in `setup_class`.
+ """
+ class MockBaseTest(base_test.BaseTestClass):
+ def setup_class(self):
+ asserts.abort_class(MSG_EXPECTED_EXCEPTION)
+
+ def test_1(self):
+ never_call()
+
+ def test_2(self):
+ never_call()
+
+ def test_3(self):
+ never_call()
+
+ bt_cls = MockBaseTest(self.mock_test_cls_configs)
+ bt_cls.run(test_names=["test_1", "test_2", "test_3"])
+ self.assertEqual(bt_cls.results.summary_str(),
+ ("Error 0, Executed 0, Failed 0, Passed 0, "
+ "Requested 3, Skipped 3"))
+
+ def test_abort_class_in_test(self):
class MockBaseTest(base_test.BaseTestClass):
def test_1(self):
pass
@@ -545,7 +575,7 @@ class BaseTestTest(unittest.TestCase):
MSG_EXPECTED_EXCEPTION)
self.assertEqual(bt_cls.results.summary_str(),
("Error 0, Executed 2, Failed 1, Passed 1, "
- "Requested 3, Skipped 0"))
+ "Requested 3, Skipped 1"))
def test_uncaught_exception(self):
class MockBaseTest(base_test.BaseTestClass):
@@ -958,12 +988,12 @@ class BaseTestTest(unittest.TestCase):
bt_cls = MockBaseTest(self.mock_test_cls_configs)
bt_cls.run()
- actual_record = bt_cls.results.failed[0]
+ actual_record = bt_cls.results.error[0]
self.assertEqual(actual_record.test_name, "setup_generated_tests")
self.assertEqual(
actual_record.details,
'Test name "ha" already exists, cannot be duplicated!')
- expected_summary = ("Error 0, Executed 1, Failed 1, Passed 0, "
+ expected_summary = ("Error 1, Executed 0, Failed 0, Passed 0, "
"Requested 0, Skipped 0")
self.assertEqual(bt_cls.results.summary_str(), expected_summary)
diff --git a/tests/mobly/records_test.py b/tests/mobly/records_test.py
index d74c00b..9500d1d 100755
--- a/tests/mobly/records_test.py
+++ b/tests/mobly/records_test.py
@@ -219,8 +219,8 @@ class RecordsTest(unittest.TestCase):
record2 = records.TestResultRecord("SomeTest", s)
tr.fail_class(record2)
self.assertEqual(len(tr.passed), 1)
- self.assertEqual(len(tr.failed), 1)
- self.assertEqual(len(tr.executed), 2)
+ self.assertEqual(len(tr.error), 1)
+ self.assertEqual(len(tr.executed), 1)
def test_result_fail_class_with_special_error(self):
"""Call TestResult.fail_class with an error class that requires more
@@ -241,8 +241,8 @@ class RecordsTest(unittest.TestCase):
record2 = records.TestResultRecord("SomeTest", se)
tr.fail_class(record2)
self.assertEqual(len(tr.passed), 1)
- self.assertEqual(len(tr.failed), 1)
- self.assertEqual(len(tr.executed), 2)
+ self.assertEqual(len(tr.error), 1)
+ self.assertEqual(len(tr.executed), 1)
def test_is_all_pass(self):
s = signals.TestPass(self.details, self.float_extra)
@@ -284,6 +284,15 @@ class RecordsTest(unittest.TestCase):
tr.fail_class(record1)
self.assertFalse(tr.is_all_pass)
+ def test_is_test_executed(self):
+ record1 = records.TestResultRecord(self.tn)
+ record1.test_begin()
+ record1.test_fail(Exception("haha"))
+ tr = records.TestResult()
+ tr.add_record(record1)
+ self.assertTrue(tr.is_test_executed(record1.test_name))
+ self.assertFalse(tr.is_test_executed(self.tn + 'ha'))
+
if __name__ == "__main__":
unittest.main()
| {
"commit_name": "merge_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 1.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
future==1.0.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
-e git+https://github.com/google/mobly.git@310791d8d2faa9f554fddb0a5ce11ae249156bb4#egg=mobly
mock==1.0.1
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
portpicker==1.6.0
psutil==7.0.0
pytest @ file:///croot/pytest_1738938843180/work
pytz==2025.2
PyYAML==6.0.2
timeout-decorator==0.5.0
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
| name: mobly
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- future==1.0.0
- mock==1.0.1
- portpicker==1.6.0
- psutil==7.0.0
- pytz==2025.2
- pyyaml==6.0.2
- timeout-decorator==0.5.0
prefix: /opt/conda/envs/mobly
| [
"tests/mobly/base_test_test.py::BaseTestTest::test_abort_class_in_test",
"tests/mobly/base_test_test.py::BaseTestTest::test_abort_setup_class",
"tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_dup_test_name",
"tests/mobly/base_test_test.py::BaseTestTest::test_setup_class_fail_by_exception",
"tests/mobly/records_test.py::RecordsTest::test_is_test_executed",
"tests/mobly/records_test.py::RecordsTest::test_result_fail_class_with_special_error",
"tests/mobly/records_test.py::RecordsTest::test_result_fail_class_with_test_signal"
]
| []
| [
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_equal_fail",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_equal_fail_with_msg",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_equal_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_fail_with_noop",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_fail_with_wrong_error",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_fail_with_wrong_regex",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_regex_fail_with_noop",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_regex_fail_with_wrong_error",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_regex_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_true",
"tests/mobly/base_test_test.py::BaseTestTest::test_both_teardown_and_test_body_raise_exceptions",
"tests/mobly/base_test_test.py::BaseTestTest::test_cli_test_selection_fail_by_convention",
"tests/mobly/base_test_test.py::BaseTestTest::test_cli_test_selection_override_self_tests_list",
"tests/mobly/base_test_test.py::BaseTestTest::test_current_test_name",
"tests/mobly/base_test_test.py::BaseTestTest::test_default_execution_of_all_tests",
"tests/mobly/base_test_test.py::BaseTestTest::test_explicit_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_explicit_pass_but_teardown_test_raises_an_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_fail",
"tests/mobly/base_test_test.py::BaseTestTest::test_failure_in_procedure_functions_is_recorded",
"tests/mobly/base_test_test.py::BaseTestTest::test_failure_to_call_procedure_function_is_recorded",
"tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_call_outside_of_setup_generated_tests",
"tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_run",
"tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_selected_run",
"tests/mobly/base_test_test.py::BaseTestTest::test_generated_tests",
"tests/mobly/base_test_test.py::BaseTestTest::test_implicit_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_missing_requested_test_func",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_teardown_test_fails",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_test_fails",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_test_setup_fails_by_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_raise_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_pass_raise_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_self_tests_list",
"tests/mobly/base_test_test.py::BaseTestTest::test_self_tests_list_fail_by_convention",
"tests/mobly/base_test_test.py::BaseTestTest::test_setup_test_fail_by_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_setup_test_fail_by_test_signal",
"tests/mobly/base_test_test.py::BaseTestTest::test_skip",
"tests/mobly/base_test_test.py::BaseTestTest::test_skip_if",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_assert_fail",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_executed_if_setup_test_fails",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_executed_if_test_fails",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_executed_if_test_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_raise_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_uncaught_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_basic",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_None",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_overwrite",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_overwrite_by_optional_param_list",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_overwrite_by_required_param_list",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_optional",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_optional_missing",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_optional_with_default",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_required",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_required_missing",
"tests/mobly/records_test.py::RecordsTest::test_is_all_pass",
"tests/mobly/records_test.py::RecordsTest::test_is_all_pass_negative",
"tests/mobly/records_test.py::RecordsTest::test_is_all_pass_with_fail_class",
"tests/mobly/records_test.py::RecordsTest::test_result_add_operator_success",
"tests/mobly/records_test.py::RecordsTest::test_result_add_operator_type_mismatch",
"tests/mobly/records_test.py::RecordsTest::test_result_record_fail_none",
"tests/mobly/records_test.py::RecordsTest::test_result_record_fail_stacktrace",
"tests/mobly/records_test.py::RecordsTest::test_result_record_fail_with_float_extra",
"tests/mobly/records_test.py::RecordsTest::test_result_record_fail_with_json_extra",
"tests/mobly/records_test.py::RecordsTest::test_result_record_pass_none",
"tests/mobly/records_test.py::RecordsTest::test_result_record_pass_with_float_extra",
"tests/mobly/records_test.py::RecordsTest::test_result_record_pass_with_json_extra",
"tests/mobly/records_test.py::RecordsTest::test_result_record_skip_none",
"tests/mobly/records_test.py::RecordsTest::test_result_record_skip_with_float_extra",
"tests/mobly/records_test.py::RecordsTest::test_result_record_skip_with_json_extra"
]
| []
| Apache License 2.0 | 1,422 | [
"mobly/records.py"
]
| [
"mobly/records.py"
]
|
Pylons__webob-332 | b2e78a53af7abe866b90a532479cf5c0ae00301b | 2017-06-30 20:28:31 | b2e78a53af7abe866b90a532479cf5c0ae00301b | diff --git a/CHANGES.txt b/CHANGES.txt
index 4b5784a..ce5397f 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -44,6 +44,10 @@ These features are experimental and may change at any point in the future.
Bugfix
~~~~~~
+- Request.host_url, Request.host_port, Request.domain correctly parse IPv6 Host
+ headers as provided by a browser. See
+ https://github.com/Pylons/webob/pull/332
+
- Request.authorization would raise ValueError for unusual or malformed header
values. See https://github.com/Pylons/webob/issues/231
diff --git a/src/webob/request.py b/src/webob/request.py
index 011617c..b9f45d9 100644
--- a/src/webob/request.py
+++ b/src/webob/request.py
@@ -413,8 +413,8 @@ class BaseRequest(object):
e = self.environ
host = e.get('HTTP_HOST')
if host is not None:
- if ':' in host:
- host, port = host.split(':', 1)
+ if ':' in host and host[-1] != ']':
+ host, port = host.rsplit(':', 1)
else:
url_scheme = e['wsgi.url_scheme']
if url_scheme == 'https':
@@ -435,8 +435,8 @@ class BaseRequest(object):
url = scheme + '://'
host = e.get('HTTP_HOST')
if host is not None:
- if ':' in host:
- host, port = host.split(':', 1)
+ if ':' in host and host[-1] != ']':
+ host, port = host.rsplit(':', 1)
else:
port = None
else:
@@ -667,8 +667,8 @@ class BaseRequest(object):
.. code-block:: python
domain = request.host
- if ':' in domain:
- domain = domain.split(':', 1)[0]
+ if ':' in domain and domain[-1] != ']': # Check for ] because of IPv6
+ domain = domain.rsplit(':', 1)[0]
This will be equivalent to the domain portion of the ``HTTP_HOST``
value in the environment if it exists, or the ``SERVER_NAME`` value in
@@ -680,8 +680,8 @@ class BaseRequest(object):
value use :meth:`webob.request.Request.host` instead.
"""
domain = self.host
- if ':' in domain:
- domain = domain.split(':', 1)[0]
+ if ':' in domain and domain[-1] != ']':
+ domain = domain.rsplit(':', 1)[0]
return domain
@property
| IPv6 support
Parts of WebOb haven't been adapted to work in IPv6 environment:
- request.domain will split IPv6 addresses incorrectly
- request.host_port will split IPv6 addresses incorrectly
- request.host_url will split IPv6 addresses incorrectly
- .. maybe more places, I haven't checked all sources
This issue is similar to #174 however there it's about mitigating potential vulnerabilities and this issue is only about making API work correctly. Software using WebOb can workaround this by parsing the Host header manually, but it's just wasteful because everybody has to repeat the same work, that should be done by WebOb (as promised by the API)
| Pylons/webob | diff --git a/tests/test_request.py b/tests/test_request.py
index 85e6047..c0f932d 100644
--- a/tests/test_request.py
+++ b/tests/test_request.py
@@ -1639,6 +1639,16 @@ class TestBaseRequest(object):
req = self._makeOne(environ)
assert req.domain == 'example.com'
+ def test_domain_with_ipv6(self):
+ environ = {'HTTP_HOST': '[2001:DB8::1]:6453'}
+ req = self._makeOne(environ)
+ assert req.domain == '[2001:DB8::1]'
+
+ def test_domain_with_ipv6_no_port(self):
+ environ = {'HTTP_HOST': '[2001:DB8::1]'}
+ req = self._makeOne(environ)
+ assert req.domain == '[2001:DB8::1]'
+
def test_encget_raises_without_default(self):
inst = self._makeOne({})
with pytest.raises(KeyError):
@@ -1965,6 +1975,18 @@ class TestLegacyRequest(object):
req = self._makeOne(environ)
assert req.host_port == '4333'
+ def test_host_port_ipv6(self):
+ environ = {'HTTP_HOST': '[2001:DB8::1]:6453'}
+ req = self._makeOne(environ)
+ assert req.host_port == '6453'
+
+ def test_host_port_ipv6(self):
+ environ = {'wsgi.url_scheme': 'https',
+ 'HTTP_HOST': '[2001:DB8::1]'
+ }
+ req = self._makeOne(environ)
+ assert req.host_port == '443'
+
def test_host_url_w_http_host_and_no_port(self):
environ = {'wsgi.url_scheme': 'http',
'HTTP_HOST': 'example.com',
@@ -2015,6 +2037,20 @@ class TestLegacyRequest(object):
req = self._makeOne(environ)
assert req.host_url == 'https://example.com:4333'
+ def test_host_url_http_ipv6_host(self):
+ environ = {'wsgi.url_scheme': 'https',
+ 'HTTP_HOST': '[2001:DB8::1]:6453'
+ }
+ req = self._makeOne(environ)
+ assert req.host_url == 'https://[2001:DB8::1]:6453'
+
+ def test_host_url_http_ipv6_host_no_port(self):
+ environ = {'wsgi.url_scheme': 'https',
+ 'HTTP_HOST': '[2001:DB8::1]'
+ }
+ req = self._makeOne(environ)
+ assert req.host_url == 'https://[2001:DB8::1]'
+
@py2only
def test_application_url_py2(self):
inst = self._blankOne('/%C3%AB')
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 2
} | 1.7 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[testing]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | coverage==7.8.0
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
execnet==2.1.1
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
pytest @ file:///croot/pytest_1738938843180/work
pytest-cov==6.0.0
pytest-xdist==3.6.1
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
-e git+https://github.com/Pylons/webob.git@b2e78a53af7abe866b90a532479cf5c0ae00301b#egg=WebOb
| name: webob
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.8.0
- execnet==2.1.1
- pytest-cov==6.0.0
- pytest-xdist==3.6.1
prefix: /opt/conda/envs/webob
| [
"tests/test_request.py::TestBaseRequest::test_domain_with_ipv6",
"tests/test_request.py::TestBaseRequest::test_domain_with_ipv6_no_port",
"tests/test_request.py::TestLegacyRequest::test_host_port_ipv6"
]
| []
| [
"tests/test_request.py::TestRequestCommon::test_ctor_environ_getter_raises_WTF",
"tests/test_request.py::TestRequestCommon::test_ctor_wo_environ_raises_WTF",
"tests/test_request.py::TestRequestCommon::test_ctor_w_environ",
"tests/test_request.py::TestRequestCommon::test_ctor_w_non_utf8_charset",
"tests/test_request.py::TestRequestCommon::test_scheme",
"tests/test_request.py::TestRequestCommon::test_body_file_getter",
"tests/test_request.py::TestRequestCommon::test_body_file_getter_seekable",
"tests/test_request.py::TestRequestCommon::test_body_file_getter_cache",
"tests/test_request.py::TestRequestCommon::test_body_file_getter_unreadable",
"tests/test_request.py::TestRequestCommon::test_body_file_setter_w_bytes",
"tests/test_request.py::TestRequestCommon::test_body_file_setter_non_bytes",
"tests/test_request.py::TestRequestCommon::test_body_file_deleter",
"tests/test_request.py::TestRequestCommon::test_body_file_raw",
"tests/test_request.py::TestRequestCommon::test_body_file_seekable_input_not_seekable",
"tests/test_request.py::TestRequestCommon::test_body_file_seekable_input_is_seekable",
"tests/test_request.py::TestRequestCommon::test_urlvars_getter_w_paste_key",
"tests/test_request.py::TestRequestCommon::test_urlvars_getter_w_wsgiorg_key",
"tests/test_request.py::TestRequestCommon::test_urlvars_getter_wo_keys",
"tests/test_request.py::TestRequestCommon::test_urlvars_setter_w_paste_key",
"tests/test_request.py::TestRequestCommon::test_urlvars_setter_w_wsgiorg_key",
"tests/test_request.py::TestRequestCommon::test_urlvars_setter_wo_keys",
"tests/test_request.py::TestRequestCommon::test_urlvars_deleter_w_paste_key",
"tests/test_request.py::TestRequestCommon::test_urlvars_deleter_w_wsgiorg_key_non_empty_tuple",
"tests/test_request.py::TestRequestCommon::test_urlvars_deleter_w_wsgiorg_key_empty_tuple",
"tests/test_request.py::TestRequestCommon::test_urlvars_deleter_wo_keys",
"tests/test_request.py::TestRequestCommon::test_urlargs_getter_w_paste_key",
"tests/test_request.py::TestRequestCommon::test_urlargs_getter_w_wsgiorg_key",
"tests/test_request.py::TestRequestCommon::test_urlargs_getter_wo_keys",
"tests/test_request.py::TestRequestCommon::test_urlargs_setter_w_paste_key",
"tests/test_request.py::TestRequestCommon::test_urlargs_setter_w_wsgiorg_key",
"tests/test_request.py::TestRequestCommon::test_urlargs_setter_wo_keys",
"tests/test_request.py::TestRequestCommon::test_urlargs_deleter_w_wsgiorg_key",
"tests/test_request.py::TestRequestCommon::test_urlargs_deleter_w_wsgiorg_key_empty",
"tests/test_request.py::TestRequestCommon::test_urlargs_deleter_wo_keys",
"tests/test_request.py::TestRequestCommon::test_cookies_empty_environ",
"tests/test_request.py::TestRequestCommon::test_cookies_is_mutable",
"tests/test_request.py::TestRequestCommon::test_cookies_w_webob_parsed_cookies_matching_source",
"tests/test_request.py::TestRequestCommon::test_cookies_w_webob_parsed_cookies_mismatched_source",
"tests/test_request.py::TestRequestCommon::test_set_cookies",
"tests/test_request.py::TestRequestCommon::test_body_getter",
"tests/test_request.py::TestRequestCommon::test_body_setter_None",
"tests/test_request.py::TestRequestCommon::test_body_setter_non_string_raises",
"tests/test_request.py::TestRequestCommon::test_body_setter_value",
"tests/test_request.py::TestRequestCommon::test_body_deleter_None",
"tests/test_request.py::TestRequestCommon::test_json_body",
"tests/test_request.py::TestRequestCommon::test_json_body_array",
"tests/test_request.py::TestRequestCommon::test_text_body",
"tests/test_request.py::TestRequestCommon::test__text_get_without_charset",
"tests/test_request.py::TestRequestCommon::test__text_set_without_charset",
"tests/test_request.py::TestRequestCommon::test_POST_not_POST_or_PUT",
"tests/test_request.py::TestRequestCommon::test_POST_existing_cache_hit",
"tests/test_request.py::TestRequestCommon::test_PUT_missing_content_type",
"tests/test_request.py::TestRequestCommon::test_PATCH_missing_content_type",
"tests/test_request.py::TestRequestCommon::test_POST_missing_content_type",
"tests/test_request.py::TestRequestCommon::test_POST_json_no_content_type",
"tests/test_request.py::TestRequestCommon::test_PUT_bad_content_type",
"tests/test_request.py::TestRequestCommon::test_POST_multipart",
"tests/test_request.py::TestRequestCommon::test_GET_reflects_query_string",
"tests/test_request.py::TestRequestCommon::test_GET_updates_query_string",
"tests/test_request.py::TestRequestCommon::test_cookies_wo_webob_parsed_cookies",
"tests/test_request.py::TestRequestCommon::test_copy_get",
"tests/test_request.py::TestRequestCommon::test_remove_conditional_headers_accept_encoding",
"tests/test_request.py::TestRequestCommon::test_remove_conditional_headers_if_modified_since",
"tests/test_request.py::TestRequestCommon::test_remove_conditional_headers_if_none_match",
"tests/test_request.py::TestRequestCommon::test_remove_conditional_headers_if_range",
"tests/test_request.py::TestRequestCommon::test_remove_conditional_headers_range",
"tests/test_request.py::TestRequestCommon::test_is_body_readable_POST",
"tests/test_request.py::TestRequestCommon::test_is_body_readable_PATCH",
"tests/test_request.py::TestRequestCommon::test_is_body_readable_GET",
"tests/test_request.py::TestRequestCommon::test_is_body_readable_unknown_method_and_content_length",
"tests/test_request.py::TestRequestCommon::test_is_body_readable_special_flag",
"tests/test_request.py::TestRequestCommon::test_cache_control_reflects_environ",
"tests/test_request.py::TestRequestCommon::test_cache_control_updates_environ",
"tests/test_request.py::TestRequestCommon::test_cache_control_set_dict",
"tests/test_request.py::TestRequestCommon::test_cache_control_set_object",
"tests/test_request.py::TestRequestCommon::test_cache_control_gets_cached",
"tests/test_request.py::TestRequestCommon::test_call_application_calls_application",
"tests/test_request.py::TestRequestCommon::test_call_application_provides_write",
"tests/test_request.py::TestRequestCommon::test_call_application_closes_iterable_when_mixed_w_write_calls",
"tests/test_request.py::TestRequestCommon::test_call_application_raises_exc_info",
"tests/test_request.py::TestRequestCommon::test_call_application_returns_exc_info",
"tests/test_request.py::TestRequestCommon::test_blank__method_subtitution",
"tests/test_request.py::TestRequestCommon::test_blank__ctype_in_env",
"tests/test_request.py::TestRequestCommon::test_blank__ctype_in_headers",
"tests/test_request.py::TestRequestCommon::test_blank__ctype_as_kw",
"tests/test_request.py::TestRequestCommon::test_blank__str_post_data_for_unsupported_ctype",
"tests/test_request.py::TestRequestCommon::test_blank__post_urlencoded",
"tests/test_request.py::TestRequestCommon::test_blank__post_multipart",
"tests/test_request.py::TestRequestCommon::test_blank__post_files",
"tests/test_request.py::TestRequestCommon::test_blank__post_file_w_wrong_ctype",
"tests/test_request.py::TestRequestCommon::test_from_bytes_extra_data",
"tests/test_request.py::TestRequestCommon::test_as_bytes_skip_body",
"tests/test_request.py::TestRequestCommon::test_charset_in_content_type",
"tests/test_request.py::TestRequestCommon::test_limited_length_file_repr",
"tests/test_request.py::TestRequestCommon::test_request_wrong_clen[False]",
"tests/test_request.py::TestRequestCommon::test_request_wrong_clen[True]",
"tests/test_request.py::TestBaseRequest::test_method",
"tests/test_request.py::TestBaseRequest::test_http_version",
"tests/test_request.py::TestBaseRequest::test_script_name",
"tests/test_request.py::TestBaseRequest::test_path_info",
"tests/test_request.py::TestBaseRequest::test_content_length_getter",
"tests/test_request.py::TestBaseRequest::test_content_length_setter_w_str",
"tests/test_request.py::TestBaseRequest::test_remote_user",
"tests/test_request.py::TestBaseRequest::test_remote_addr",
"tests/test_request.py::TestBaseRequest::test_query_string",
"tests/test_request.py::TestBaseRequest::test_server_name",
"tests/test_request.py::TestBaseRequest::test_server_port_getter",
"tests/test_request.py::TestBaseRequest::test_server_port_setter_with_string",
"tests/test_request.py::TestBaseRequest::test_uscript_name",
"tests/test_request.py::TestBaseRequest::test_upath_info",
"tests/test_request.py::TestBaseRequest::test_upath_info_set_unicode",
"tests/test_request.py::TestBaseRequest::test_content_type_getter_no_parameters",
"tests/test_request.py::TestBaseRequest::test_content_type_getter_w_parameters",
"tests/test_request.py::TestBaseRequest::test_content_type_setter_w_None",
"tests/test_request.py::TestBaseRequest::test_content_type_setter_existing_paramter_no_new_paramter",
"tests/test_request.py::TestBaseRequest::test_content_type_deleter_clears_environ_value",
"tests/test_request.py::TestBaseRequest::test_content_type_deleter_no_environ_value",
"tests/test_request.py::TestBaseRequest::test_headers_getter",
"tests/test_request.py::TestBaseRequest::test_headers_setter",
"tests/test_request.py::TestBaseRequest::test_no_headers_deleter",
"tests/test_request.py::TestBaseRequest::test_client_addr_xff_singleval",
"tests/test_request.py::TestBaseRequest::test_client_addr_xff_multival",
"tests/test_request.py::TestBaseRequest::test_client_addr_prefers_xff",
"tests/test_request.py::TestBaseRequest::test_client_addr_no_xff",
"tests/test_request.py::TestBaseRequest::test_client_addr_no_xff_no_remote_addr",
"tests/test_request.py::TestBaseRequest::test_host_port_w_http_host_and_no_port",
"tests/test_request.py::TestBaseRequest::test_host_port_w_http_host_and_standard_port",
"tests/test_request.py::TestBaseRequest::test_host_port_w_http_host_and_oddball_port",
"tests/test_request.py::TestBaseRequest::test_host_port_w_http_host_https_and_no_port",
"tests/test_request.py::TestBaseRequest::test_host_port_w_http_host_https_and_standard_port",
"tests/test_request.py::TestBaseRequest::test_host_port_w_http_host_https_and_oddball_port",
"tests/test_request.py::TestBaseRequest::test_host_port_wo_http_host",
"tests/test_request.py::TestBaseRequest::test_host_url_w_http_host_and_no_port",
"tests/test_request.py::TestBaseRequest::test_host_url_w_http_host_and_standard_port",
"tests/test_request.py::TestBaseRequest::test_host_url_w_http_host_and_oddball_port",
"tests/test_request.py::TestBaseRequest::test_host_url_w_http_host_https_and_no_port",
"tests/test_request.py::TestBaseRequest::test_host_url_w_http_host_https_and_standard_port",
"tests/test_request.py::TestBaseRequest::test_host_url_w_http_host_https_and_oddball_port",
"tests/test_request.py::TestBaseRequest::test_host_url_wo_http_host",
"tests/test_request.py::TestBaseRequest::test_application_url",
"tests/test_request.py::TestBaseRequest::test_path_url",
"tests/test_request.py::TestBaseRequest::test_path",
"tests/test_request.py::TestBaseRequest::test_path_qs_no_qs",
"tests/test_request.py::TestBaseRequest::test_path_qs_w_qs",
"tests/test_request.py::TestBaseRequest::test_url_no_qs",
"tests/test_request.py::TestBaseRequest::test_url_w_qs",
"tests/test_request.py::TestBaseRequest::test_relative_url_to_app_true_wo_leading_slash",
"tests/test_request.py::TestBaseRequest::test_relative_url_to_app_true_w_leading_slash",
"tests/test_request.py::TestBaseRequest::test_relative_url_to_app_false_other_w_leading_slash",
"tests/test_request.py::TestBaseRequest::test_relative_url_to_app_false_other_wo_leading_slash",
"tests/test_request.py::TestBaseRequest::test_path_info_pop_empty",
"tests/test_request.py::TestBaseRequest::test_path_info_pop_just_leading_slash",
"tests/test_request.py::TestBaseRequest::test_path_info_pop_non_empty_no_pattern",
"tests/test_request.py::TestBaseRequest::test_path_info_pop_non_empty_w_pattern_miss",
"tests/test_request.py::TestBaseRequest::test_path_info_pop_non_empty_w_pattern_hit",
"tests/test_request.py::TestBaseRequest::test_path_info_pop_skips_empty_elements",
"tests/test_request.py::TestBaseRequest::test_path_info_peek_empty",
"tests/test_request.py::TestBaseRequest::test_path_info_peek_just_leading_slash",
"tests/test_request.py::TestBaseRequest::test_path_info_peek_non_empty",
"tests/test_request.py::TestBaseRequest::test_is_xhr_no_header",
"tests/test_request.py::TestBaseRequest::test_is_xhr_header_miss",
"tests/test_request.py::TestBaseRequest::test_is_xhr_header_hit",
"tests/test_request.py::TestBaseRequest::test_host_getter_w_HTTP_HOST",
"tests/test_request.py::TestBaseRequest::test_host_getter_wo_HTTP_HOST",
"tests/test_request.py::TestBaseRequest::test_host_setter",
"tests/test_request.py::TestBaseRequest::test_host_deleter_hit",
"tests/test_request.py::TestBaseRequest::test_host_deleter_miss",
"tests/test_request.py::TestBaseRequest::test_domain_nocolon",
"tests/test_request.py::TestBaseRequest::test_domain_withcolon",
"tests/test_request.py::TestBaseRequest::test_encget_raises_without_default",
"tests/test_request.py::TestBaseRequest::test_encget_doesnt_raises_with_default",
"tests/test_request.py::TestBaseRequest::test_encget_with_encattr",
"tests/test_request.py::TestBaseRequest::test_encget_with_encattr_latin_1",
"tests/test_request.py::TestBaseRequest::test_encget_no_encattr",
"tests/test_request.py::TestBaseRequest::test_relative_url",
"tests/test_request.py::TestBaseRequest::test_header_getter",
"tests/test_request.py::TestBaseRequest::test_json_body",
"tests/test_request.py::TestBaseRequest::test_host_get",
"tests/test_request.py::TestBaseRequest::test_host_get_w_no_http_host",
"tests/test_request.py::TestLegacyRequest::test_method",
"tests/test_request.py::TestLegacyRequest::test_http_version",
"tests/test_request.py::TestLegacyRequest::test_script_name",
"tests/test_request.py::TestLegacyRequest::test_path_info",
"tests/test_request.py::TestLegacyRequest::test_content_length_getter",
"tests/test_request.py::TestLegacyRequest::test_content_length_setter_w_str",
"tests/test_request.py::TestLegacyRequest::test_remote_user",
"tests/test_request.py::TestLegacyRequest::test_remote_addr",
"tests/test_request.py::TestLegacyRequest::test_query_string",
"tests/test_request.py::TestLegacyRequest::test_server_name",
"tests/test_request.py::TestLegacyRequest::test_server_port_getter",
"tests/test_request.py::TestLegacyRequest::test_server_port_setter_with_string",
"tests/test_request.py::TestLegacyRequest::test_uscript_name",
"tests/test_request.py::TestLegacyRequest::test_upath_info",
"tests/test_request.py::TestLegacyRequest::test_upath_info_set_unicode",
"tests/test_request.py::TestLegacyRequest::test_content_type_getter_no_parameters",
"tests/test_request.py::TestLegacyRequest::test_content_type_getter_w_parameters",
"tests/test_request.py::TestLegacyRequest::test_content_type_setter_w_None",
"tests/test_request.py::TestLegacyRequest::test_content_type_setter_existing_paramter_no_new_paramter",
"tests/test_request.py::TestLegacyRequest::test_content_type_deleter_clears_environ_value",
"tests/test_request.py::TestLegacyRequest::test_content_type_deleter_no_environ_value",
"tests/test_request.py::TestLegacyRequest::test_headers_getter",
"tests/test_request.py::TestLegacyRequest::test_headers_setter",
"tests/test_request.py::TestLegacyRequest::test_no_headers_deleter",
"tests/test_request.py::TestLegacyRequest::test_client_addr_xff_singleval",
"tests/test_request.py::TestLegacyRequest::test_client_addr_xff_multival",
"tests/test_request.py::TestLegacyRequest::test_client_addr_prefers_xff",
"tests/test_request.py::TestLegacyRequest::test_client_addr_no_xff",
"tests/test_request.py::TestLegacyRequest::test_client_addr_no_xff_no_remote_addr",
"tests/test_request.py::TestLegacyRequest::test_host_port_w_http_host_and_no_port",
"tests/test_request.py::TestLegacyRequest::test_host_port_w_http_host_and_standard_port",
"tests/test_request.py::TestLegacyRequest::test_host_port_w_http_host_and_oddball_port",
"tests/test_request.py::TestLegacyRequest::test_host_port_w_http_host_https_and_no_port",
"tests/test_request.py::TestLegacyRequest::test_host_port_w_http_host_https_and_standard_port",
"tests/test_request.py::TestLegacyRequest::test_host_port_w_http_host_https_and_oddball_port",
"tests/test_request.py::TestLegacyRequest::test_host_port_wo_http_host",
"tests/test_request.py::TestLegacyRequest::test_host_url_w_http_host_and_no_port",
"tests/test_request.py::TestLegacyRequest::test_host_url_w_http_host_and_standard_port",
"tests/test_request.py::TestLegacyRequest::test_host_url_w_http_host_and_oddball_port",
"tests/test_request.py::TestLegacyRequest::test_host_url_w_http_host_https_and_no_port",
"tests/test_request.py::TestLegacyRequest::test_host_url_w_http_host_https_and_standard_port",
"tests/test_request.py::TestLegacyRequest::test_host_url_w_http_host_https_and_oddball_port",
"tests/test_request.py::TestLegacyRequest::test_host_url_wo_http_host",
"tests/test_request.py::TestLegacyRequest::test_host_url_http_ipv6_host",
"tests/test_request.py::TestLegacyRequest::test_host_url_http_ipv6_host_no_port",
"tests/test_request.py::TestLegacyRequest::test_application_url",
"tests/test_request.py::TestLegacyRequest::test_path_url",
"tests/test_request.py::TestLegacyRequest::test_path",
"tests/test_request.py::TestLegacyRequest::test_path_qs_no_qs",
"tests/test_request.py::TestLegacyRequest::test_path_qs_w_qs",
"tests/test_request.py::TestLegacyRequest::test_url_no_qs",
"tests/test_request.py::TestLegacyRequest::test_url_w_qs",
"tests/test_request.py::TestLegacyRequest::test_relative_url_to_app_true_wo_leading_slash",
"tests/test_request.py::TestLegacyRequest::test_relative_url_to_app_true_w_leading_slash",
"tests/test_request.py::TestLegacyRequest::test_relative_url_to_app_false_other_w_leading_slash",
"tests/test_request.py::TestLegacyRequest::test_relative_url_to_app_false_other_wo_leading_slash",
"tests/test_request.py::TestLegacyRequest::test_path_info_pop_empty",
"tests/test_request.py::TestLegacyRequest::test_path_info_pop_just_leading_slash",
"tests/test_request.py::TestLegacyRequest::test_path_info_pop_non_empty_no_pattern",
"tests/test_request.py::TestLegacyRequest::test_path_info_pop_non_empty_w_pattern_miss",
"tests/test_request.py::TestLegacyRequest::test_path_info_pop_non_empty_w_pattern_hit",
"tests/test_request.py::TestLegacyRequest::test_path_info_pop_skips_empty_elements",
"tests/test_request.py::TestLegacyRequest::test_path_info_peek_empty",
"tests/test_request.py::TestLegacyRequest::test_path_info_peek_just_leading_slash",
"tests/test_request.py::TestLegacyRequest::test_path_info_peek_non_empty",
"tests/test_request.py::TestLegacyRequest::test_is_xhr_no_header",
"tests/test_request.py::TestLegacyRequest::test_is_xhr_header_miss",
"tests/test_request.py::TestLegacyRequest::test_is_xhr_header_hit",
"tests/test_request.py::TestLegacyRequest::test_host_getter_w_HTTP_HOST",
"tests/test_request.py::TestLegacyRequest::test_host_getter_wo_HTTP_HOST",
"tests/test_request.py::TestLegacyRequest::test_host_setter",
"tests/test_request.py::TestLegacyRequest::test_host_deleter_hit",
"tests/test_request.py::TestLegacyRequest::test_host_deleter_miss",
"tests/test_request.py::TestLegacyRequest::test_encget_raises_without_default",
"tests/test_request.py::TestLegacyRequest::test_encget_doesnt_raises_with_default",
"tests/test_request.py::TestLegacyRequest::test_encget_with_encattr",
"tests/test_request.py::TestLegacyRequest::test_encget_no_encattr",
"tests/test_request.py::TestLegacyRequest::test_relative_url",
"tests/test_request.py::TestLegacyRequest::test_header_getter",
"tests/test_request.py::TestLegacyRequest::test_json_body",
"tests/test_request.py::TestLegacyRequest::test_host_get_w_http_host",
"tests/test_request.py::TestLegacyRequest::test_host_get_w_no_http_host",
"tests/test_request.py::TestRequestConstructorWarnings::test_ctor_w_unicode_errors",
"tests/test_request.py::TestRequestConstructorWarnings::test_ctor_w_decode_param_names",
"tests/test_request.py::TestRequestWithAdhocAttr::test_adhoc_attrs_set",
"tests/test_request.py::TestRequestWithAdhocAttr::test_adhoc_attrs_set_nonadhoc",
"tests/test_request.py::TestRequestWithAdhocAttr::test_adhoc_attrs_get",
"tests/test_request.py::TestRequestWithAdhocAttr::test_adhoc_attrs_get_missing",
"tests/test_request.py::TestRequestWithAdhocAttr::test_adhoc_attrs_del",
"tests/test_request.py::TestRequestWithAdhocAttr::test_adhoc_attrs_del_missing",
"tests/test_request.py::TestRequest_functional::test_gets",
"tests/test_request.py::TestRequest_functional::test_gets_with_query_string",
"tests/test_request.py::TestRequest_functional::test_language_parsing1",
"tests/test_request.py::TestRequest_functional::test_language_parsing2",
"tests/test_request.py::TestRequest_functional::test_language_parsing3",
"tests/test_request.py::TestRequest_functional::test_mime_parsing1",
"tests/test_request.py::TestRequest_functional::test_mime_parsing2",
"tests/test_request.py::TestRequest_functional::test_mime_parsing3",
"tests/test_request.py::TestRequest_functional::test_accept_best_match",
"tests/test_request.py::TestRequest_functional::test_from_mimeparse",
"tests/test_request.py::TestRequest_functional::test_headers",
"tests/test_request.py::TestRequest_functional::test_bad_cookie",
"tests/test_request.py::TestRequest_functional::test_cookie_quoting",
"tests/test_request.py::TestRequest_functional::test_path_quoting",
"tests/test_request.py::TestRequest_functional::test_path_quoting_pct_encodes",
"tests/test_request.py::TestRequest_functional::test_params",
"tests/test_request.py::TestRequest_functional::test_copy_body",
"tests/test_request.py::TestRequest_functional::test_already_consumed_stream",
"tests/test_request.py::TestRequest_functional::test_none_field_name",
"tests/test_request.py::TestRequest_functional::test_broken_seek",
"tests/test_request.py::TestRequest_functional::test_set_body",
"tests/test_request.py::TestRequest_functional::test_broken_clen_header",
"tests/test_request.py::TestRequest_functional::test_nonstr_keys",
"tests/test_request.py::TestRequest_functional::test_authorization",
"tests/test_request.py::TestRequest_functional::test_as_bytes",
"tests/test_request.py::TestRequest_functional::test_as_text",
"tests/test_request.py::TestRequest_functional::test_req_kw_none_val",
"tests/test_request.py::TestRequest_functional::test_env_keys",
"tests/test_request.py::TestRequest_functional::test_repr_nodefault",
"tests/test_request.py::TestRequest_functional::test_request_noenviron_param",
"tests/test_request.py::TestRequest_functional::test_unexpected_kw",
"tests/test_request.py::TestRequest_functional::test_conttype_set_del",
"tests/test_request.py::TestRequest_functional::test_headers2",
"tests/test_request.py::TestRequest_functional::test_host_url",
"tests/test_request.py::TestRequest_functional::test_path_info_p",
"tests/test_request.py::TestRequest_functional::test_urlvars_property",
"tests/test_request.py::TestRequest_functional::test_urlargs_property",
"tests/test_request.py::TestRequest_functional::test_host_property",
"tests/test_request.py::TestRequest_functional::test_body_property",
"tests/test_request.py::TestRequest_functional::test_repr_invalid",
"tests/test_request.py::TestRequest_functional::test_from_garbage_file",
"tests/test_request.py::TestRequest_functional::test_from_file_patch",
"tests/test_request.py::TestRequest_functional::test_from_bytes",
"tests/test_request.py::TestRequest_functional::test_from_text",
"tests/test_request.py::TestRequest_functional::test_blank",
"tests/test_request.py::TestRequest_functional::test_post_does_not_reparse",
"tests/test_request.py::TestRequest_functional::test_middleware_body",
"tests/test_request.py::TestRequest_functional::test_body_file_noseek",
"tests/test_request.py::TestRequest_functional::test_cgi_escaping_fix",
"tests/test_request.py::TestRequest_functional::test_content_type_none",
"tests/test_request.py::TestRequest_functional::test_body_file_seekable",
"tests/test_request.py::TestRequest_functional::test_request_init",
"tests/test_request.py::TestRequest_functional::test_request_query_and_POST_vars",
"tests/test_request.py::TestRequest_functional::test_request_put",
"tests/test_request.py::TestRequest_functional::test_request_patch",
"tests/test_request.py::TestRequest_functional::test_call_WSGI_app",
"tests/test_request.py::TestRequest_functional::test_call_WSGI_app_204",
"tests/test_request.py::TestRequest_functional::test_call_WSGI_app_no_content_type",
"tests/test_request.py::TestRequest_functional::test_get_response_catch_exc_info_true",
"tests/test_request.py::TestFakeCGIBody::test_encode_multipart_value_type_options",
"tests/test_request.py::TestFakeCGIBody::test_encode_multipart_no_boundary",
"tests/test_request.py::TestFakeCGIBody::test_repr",
"tests/test_request.py::TestFakeCGIBody::test_fileno",
"tests/test_request.py::TestFakeCGIBody::test_iter",
"tests/test_request.py::TestFakeCGIBody::test_readline",
"tests/test_request.py::TestFakeCGIBody::test_read_bad_content_type",
"tests/test_request.py::TestFakeCGIBody::test_read_urlencoded",
"tests/test_request.py::TestFakeCGIBody::test_readable",
"tests/test_request.py::Test_cgi_FieldStorage__repr__patch::test_with_file",
"tests/test_request.py::Test_cgi_FieldStorage__repr__patch::test_without_file",
"tests/test_request.py::TestLimitedLengthFile::test_fileno",
"tests/test_request.py::Test_environ_from_url::test_environ_from_url",
"tests/test_request.py::Test_environ_from_url::test_environ_from_url_highorder_path_info",
"tests/test_request.py::Test_environ_from_url::test_fileupload_mime_type_detection",
"tests/test_request.py::TestRequestMultipart::test_multipart_with_charset"
]
| []
| null | 1,423 | [
"CHANGES.txt",
"src/webob/request.py"
]
| [
"CHANGES.txt",
"src/webob/request.py"
]
|
|
choderalab__openmmtools-235 | fe82cd1b81ae2026ad0299430293e27c0128335e | 2017-06-30 21:41:26 | 9e299792b6ae45acb1b5cd6a4033a4a15df1dd75 | diff --git a/openmmtools/cache.py b/openmmtools/cache.py
index f6d3ede..87ec8d6 100644
--- a/openmmtools/cache.py
+++ b/openmmtools/cache.py
@@ -17,7 +17,7 @@ Provide cache classes to handle creation of OpenMM Context objects.
import copy
import collections
-from simtk import openmm
+from simtk import openmm, unit
from openmmtools import integrators
@@ -114,12 +114,26 @@ class LRUCache(object):
@time_to_live.setter
def time_to_live(self, new_time_to_live):
# Update entries only if we are changing the ttl.
- ttl_diff = new_time_to_live - self._ttl
- if ttl_diff == 0:
+ if new_time_to_live == self._ttl:
return
+
+ # Update expiration of cache entries.
for entry in self._data.values():
- entry.expiration += ttl_diff
- self._remove_expired()
+ # If there was no time to live before, just let entries
+ # expire in new_time_to_live accesses
+ if self._ttl is None:
+ entry.expiration = self._n_access + new_time_to_live
+ # If we don't want expiration anymore, delete the field.
+ # This way we save memory in case there are a lot of entries.
+ elif new_time_to_live is None:
+ del entry.expiration
+ # Otherwise just add/subtract the difference.
+ else:
+ entry.expiration += new_time_to_live - self._ttl
+
+ # Purge cache only if there is a time to live.
+ if new_time_to_live is not None:
+ self._remove_expired()
self._ttl = new_time_to_live
def empty(self):
@@ -366,11 +380,18 @@ class ContextCache(object):
thermodynamic_state_id = self._generate_state_id(thermodynamic_state)
matching_context_ids = [context_id for context_id in self._lru
if context_id[0] == thermodynamic_state_id]
- if len(matching_context_ids) > 0:
- context = self._lru[matching_context_ids[0]] # Return first found.
+ if len(matching_context_ids) == 0:
+ # We have to create a new Context.
+ integrator = self._get_default_integrator(thermodynamic_state.temperature)
+ elif len(matching_context_ids) == 1:
+ # Only one match.
+ context = self._lru[matching_context_ids[0]]
else:
- # We have to create a new Context. Use a likely-to-be-used Integrator.
- integrator = integrators.GeodesicBAOABIntegrator(temperature=thermodynamic_state.temperature)
+ # Multiple matches, prefer non-default Integrator.
+ for context_id in matching_context_ids:
+ if context_id[1] != self._default_integrator_id():
+ context = self._lru[context_id]
+ break
if context is None:
# Determine the Context id matching the pair state-integrator.
@@ -447,6 +468,7 @@ class ContextCache(object):
"""
standard_integrator = copy.deepcopy(integrator)
+ integrators.RestorableIntegrator.restore_interface(standard_integrator)
for attribute, std_value in cls._COMPATIBLE_INTEGRATOR_ATTRIBUTES.items():
try:
getattr(standard_integrator, 'set' + attribute)(std_value)
@@ -459,7 +481,13 @@ class ContextCache(object):
"""Return a unique key for the ThermodynamicState."""
# We take advantage of the cached _standard_system_hash property
# to generate a compatible hash for the thermodynamic state.
- return str(thermodynamic_state._standard_system_hash)
+ return thermodynamic_state._standard_system_hash
+
+ @classmethod
+ def _generate_integrator_id(cls, integrator):
+ """Return a unique key for the given Integrator."""
+ standard_integrator = cls._standardize_integrator(integrator)
+ return openmm.XmlSerializer.serialize(standard_integrator).__hash__()
@classmethod
def _generate_context_id(cls, thermodynamic_state, integrator):
@@ -472,10 +500,25 @@ class ContextCache(object):
"""
state_id = cls._generate_state_id(thermodynamic_state)
- standard_integrator = cls._standardize_integrator(integrator)
- integrator_id = openmm.XmlSerializer.serialize(standard_integrator).__hash__()
+ integrator_id = cls._generate_integrator_id(integrator)
return state_id, integrator_id
+ @staticmethod
+ def _get_default_integrator(temperature):
+ """Return a new instance of the default integrator."""
+ # Use a likely-to-be-used Integrator.
+ return integrators.GeodesicBAOABIntegrator(temperature=temperature)
+
+ @classmethod
+ def _default_integrator_id(cls):
+ """Return the unique key of the default integrator."""
+ if cls._cached_default_integrator_id is None:
+ default_integrator = cls._get_default_integrator(300*unit.kelvin)
+ default_integrator_id = cls._generate_integrator_id(default_integrator)
+ cls._cached_default_integrator_id = default_integrator_id
+ return cls._cached_default_integrator_id
+ _cached_default_integrator_id = None
+
# =============================================================================
# DUMMY CONTEXT CACHE
@@ -523,7 +566,7 @@ class DummyContextCache(object):
# GLOBAL CONTEXT CACHE
# =============================================================================
-global_context_cache = ContextCache(capacity=3, time_to_live=50)
+global_context_cache = ContextCache(capacity=None, time_to_live=None)
# =============================================================================
diff --git a/openmmtools/integrators.py b/openmmtools/integrators.py
index ecdb460..ce87c38 100644
--- a/openmmtools/integrators.py
+++ b/openmmtools/integrators.py
@@ -38,6 +38,7 @@ this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import logging
import re
+import zlib
import simtk.unit
@@ -213,9 +214,18 @@ class RestorableIntegrator(mm.CustomIntegrator,PrettyPrintableIntegrator):
@staticmethod
def _compute_class_hash(integrator_class):
- """Return a numeric hash for the integrator class."""
- # We need to convert to float because some digits may be lost in the conversion
- return float(hash(integrator_class.__name__))
+ """Return a numeric hash for the integrator class.
+
+ The hash will become part of the Integrator serialization,
+ so it is important for it consistent across processes in case
+ the integrator is sent to a remote worker. The hash() built-in
+ function is seeded by the PYTHONHASHSEED environmental variable,
+ so we can't use it here.
+
+ We also need to convert to float because some digits may be
+ lost in the conversion.
+ """
+ return float(zlib.adler32(integrator_class.__name__.encode()))
class ThermostatedIntegrator(RestorableIntegrator):
| Context cache failure to access `set*`
Unfortunately, I don't have a minimal example yet, but I did want to note that a few days ago, as I was trying to use the `ContextCache` with a `celery` worker, I ran into a strange bug. Using the `AlchemicalNonequilibriumLangevinIntegrator`, whenever it tried to recover a context and integrator, I got an exception that there was no attribute `set*` (including `setTemperature`, etc.).
After some debugging, I noticed that the `AlchemicalNonequilibriumLangevinIntegrator` was becoming a regular `CustomIntegrator` somehow, but I'm not quite sure how or why.
This isn't super high priority, since I have a workaround for now, but since the `ContextCache` API is really awesome and convenient, it would be great to figure out what's going on. | choderalab/openmmtools | diff --git a/openmmtools/tests/test_cache.py b/openmmtools/tests/test_cache.py
index de9b008..ef8a8ef 100644
--- a/openmmtools/tests/test_cache.py
+++ b/openmmtools/tests/test_cache.py
@@ -124,14 +124,32 @@ def test_lru_cache_capacity_property():
def test_lru_cache_time_to_live_property():
"""Decreasing the time to live updates the expiration of elements."""
- cache = LRUCache(time_to_live=50)
- for i in range(4):
- cache[str(i)] = i
+ def add_4_elements(_cache):
+ for i in range(4):
+ _cache[str(i)] = i
+
+ cache = LRUCache(time_to_live=None)
+ add_4_elements(cache)
assert len(cache) == 4
+
+ # Setting time to live to 1 cause all 4 entries to expire on next access.
cache.time_to_live = 1
- assert len(cache) == 1
+ assert len(cache) == 4
assert cache.time_to_live == 1
- assert '3' in cache
+ cache['4'] = 4
+ assert len(cache) == 1
+ assert '4' in cache
+
+ # Increase time_to_live.
+ cache.time_to_live = 2
+ add_4_elements(cache)
+ assert len(cache) == 2
+ assert '2' in cache and '3' in cache
+
+ # Setting it back to None makes it limitless.
+ cache.time_to_live = None
+ add_4_elements(cache)
+ assert len(cache) == 4
# =============================================================================
@@ -226,19 +244,25 @@ class TestContextCache(object):
cache = ContextCache()
state1, state2 = self.incompatible_states[:2]
- # First we create a Context in state1.
- cache.get_context(state1, copy.deepcopy(self.verlet_2fs))
+ # If there are no previous Contexts, a default integrator is used to create a new one.
+ context, default_integrator = cache.get_context(state1)
assert len(cache) == 1
- # When we don't specify the integrator, it first looks for cached Contexts.
+ # Now we create another Context in state1 with a different integrator.
+ assert type(self.verlet_2fs) is not type(default_integrator) # test precondition
+ cache.get_context(state1, copy.deepcopy(self.verlet_2fs))
+ assert len(cache) == 2
+
+ # When we don't specify the integrator, it first looks
+ # for cached Contexts, and non-default contexts are preferred.
context, integrator = cache.get_context(state1)
- assert len(cache) == 1
+ assert len(cache) == 2
assert state1.is_context_compatible(context)
- assert isinstance(integrator, openmm.VerletIntegrator)
+ assert isinstance(integrator, type(self.verlet_2fs)), type(integrator)
# With an incompatible state, a new Context is created.
cache.get_context(state2)
- assert len(cache) == 2
+ assert len(cache) == 3
def test_cache_capacity_ttl(self):
"""Check that the cache capacity and time_to_live work as expected."""
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 2
} | 0.11 | {
"env_vars": null,
"env_yml_path": [
"docs/environment.yml"
],
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": true,
"packages": "environment.yml",
"pip_packages": [
"nose",
"pymbar",
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster @ file:///home/ktietz/src/ci/alabaster_1611921544520/work
attrs==22.2.0
Babel @ file:///tmp/build/80754af9/babel_1620871417480/work
brotlipy==0.7.0
certifi==2021.5.30
cffi @ file:///tmp/build/80754af9/cffi_1625814693874/work
charset-normalizer @ file:///tmp/build/80754af9/charset-normalizer_1630003229654/work
colorama @ file:///tmp/build/80754af9/colorama_1607707115595/work
cryptography @ file:///tmp/build/80754af9/cryptography_1635366128178/work
Cython @ file:///tmp/build/80754af9/cython_1626256602391/work
docutils @ file:///tmp/build/80754af9/docutils_1620827982266/work
idna @ file:///tmp/build/80754af9/idna_1637925883363/work
imagesize @ file:///tmp/build/80754af9/imagesize_1637939814114/work
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig==1.1.1
Jinja2 @ file:///opt/conda/conda-bld/jinja2_1647436528585/work
MarkupSafe @ file:///tmp/build/80754af9/markupsafe_1621528150516/work
nose==1.3.7
numexpr==2.8.1
numpy @ file:///tmp/build/80754af9/numpy_and_numpy_base_1603483703303/work
numpydoc @ file:///tmp/build/80754af9/numpydoc_1605117425582/work
OpenMM==7.4.2
-e git+https://github.com/choderalab/openmmtools.git@fe82cd1b81ae2026ad0299430293e27c0128335e#egg=openmmtools
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pandas==1.1.5
ParmEd==3.2.0
pluggy==1.0.0
py==1.11.0
pycparser @ file:///tmp/build/80754af9/pycparser_1636541352034/work
Pygments @ file:///opt/conda/conda-bld/pygments_1644249106324/work
pymbar==4.0.3
pyOpenSSL @ file:///opt/conda/conda-bld/pyopenssl_1643788558760/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
PySocks @ file:///tmp/build/80754af9/pysocks_1605305763431/work
pytest==7.0.1
python-dateutil @ file:///tmp/build/80754af9/python-dateutil_1626374649649/work
pytz==2021.3
requests @ file:///opt/conda/conda-bld/requests_1641824580448/work
scipy @ file:///tmp/build/80754af9/scipy_1597686635649/work
six @ file:///tmp/build/80754af9/six_1644875935023/work
snowballstemmer @ file:///tmp/build/80754af9/snowballstemmer_1637937080595/work
Sphinx @ file:///opt/conda/conda-bld/sphinx_1643644169832/work
sphinxcontrib-applehelp @ file:///home/ktietz/src/ci/sphinxcontrib-applehelp_1611920841464/work
sphinxcontrib-devhelp @ file:///home/ktietz/src/ci/sphinxcontrib-devhelp_1611920923094/work
sphinxcontrib-htmlhelp @ file:///tmp/build/80754af9/sphinxcontrib-htmlhelp_1623945626792/work
sphinxcontrib-jsmath @ file:///home/ktietz/src/ci/sphinxcontrib-jsmath_1611920942228/work
sphinxcontrib-qthelp @ file:///home/ktietz/src/ci/sphinxcontrib-qthelp_1611921055322/work
sphinxcontrib-serializinghtml @ file:///tmp/build/80754af9/sphinxcontrib-serializinghtml_1624451540180/work
tomli==1.2.3
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
urllib3 @ file:///opt/conda/conda-bld/urllib3_1643638302206/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: openmmtools
channels:
- omnia
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- alabaster=0.7.12=pyhd3eb1b0_0
- babel=2.9.1=pyhd3eb1b0_0
- blas=1.0=openblas
- brotlipy=0.7.0=py36h27cfd23_1003
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- cffi=1.14.6=py36h400218f_0
- charset-normalizer=2.0.4=pyhd3eb1b0_0
- colorama=0.4.4=pyhd3eb1b0_0
- cryptography=35.0.0=py36hd23ed53_0
- cython=0.29.24=py36h295c915_0
- docutils=0.17.1=py36h06a4308_1
- fftw3f=3.3.4=2
- idna=3.3=pyhd3eb1b0_0
- imagesize=1.3.0=pyhd3eb1b0_0
- importlib-metadata=4.8.1=py36h06a4308_0
- jinja2=3.0.3=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgfortran-ng=7.5.0=ha8ba4b0_17
- libgfortran4=7.5.0=ha8ba4b0_17
- libgomp=11.2.0=h1234567_1
- libopenblas=0.3.18=hf726d26_0
- libstdcxx-ng=11.2.0=h1234567_1
- markupsafe=2.0.1=py36h27cfd23_0
- ncurses=6.4=h6a678d5_0
- numpy=1.19.2=py36h6163131_0
- numpy-base=1.19.2=py36h75fe3a5_0
- numpydoc=1.1.0=pyhd3eb1b0_1
- openmm=7.4.2=py36_cuda101_rc_1
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pandas=1.1.5=py36ha9443f7_0
- parmed=3.2.0=py36_0
- pip=21.2.2=py36h06a4308_0
- pycparser=2.21=pyhd3eb1b0_0
- pygments=2.11.2=pyhd3eb1b0_0
- pyopenssl=22.0.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pysocks=1.7.1=py36h06a4308_0
- python=3.6.13=h12debd9_1
- python-dateutil=2.8.2=pyhd3eb1b0_0
- pytz=2021.3=pyhd3eb1b0_0
- readline=8.2=h5eee18b_0
- requests=2.27.1=pyhd3eb1b0_0
- scipy=1.5.2=py36habc2bb6_0
- setuptools=58.0.4=py36h06a4308_0
- six=1.16.0=pyhd3eb1b0_1
- snowballstemmer=2.2.0=pyhd3eb1b0_0
- sphinx=4.4.0=pyhd3eb1b0_0
- sphinxcontrib-applehelp=1.0.2=pyhd3eb1b0_0
- sphinxcontrib-devhelp=1.0.2=pyhd3eb1b0_0
- sphinxcontrib-htmlhelp=2.0.0=pyhd3eb1b0_0
- sphinxcontrib-jsmath=1.0.1=pyhd3eb1b0_0
- sphinxcontrib-qthelp=1.0.3=pyhd3eb1b0_0
- sphinxcontrib-serializinghtml=1.1.5=pyhd3eb1b0_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- typing_extensions=4.1.1=pyh06a4308_0
- urllib3=1.26.8=pyhd3eb1b0_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- iniconfig==1.1.1
- nose==1.3.7
- numexpr==2.8.1
- pluggy==1.0.0
- py==1.11.0
- pymbar==4.0.3
- pytest==7.0.1
- tomli==1.2.3
prefix: /opt/conda/envs/openmmtools
| [
"openmmtools/tests/test_cache.py::test_lru_cache_time_to_live_property",
"openmmtools/tests/test_cache.py::TestContextCache::test_get_context_any_integrator"
]
| []
| [
"openmmtools/tests/test_cache.py::test_lru_cache_cache_entry_unpacking",
"openmmtools/tests/test_cache.py::test_lru_cache_maximum_capacity",
"openmmtools/tests/test_cache.py::test_lru_cache_eliminate_least_recently_used",
"openmmtools/tests/test_cache.py::test_lru_cache_access_to_live",
"openmmtools/tests/test_cache.py::test_lru_cache_capacity_property",
"openmmtools/tests/test_cache.py::TestContextCache::test_copy_integrator_state",
"openmmtools/tests/test_cache.py::TestContextCache::test_generate_compatible_context_key",
"openmmtools/tests/test_cache.py::TestContextCache::test_generate_incompatible_context_key",
"openmmtools/tests/test_cache.py::TestContextCache::test_get_compatible_context",
"openmmtools/tests/test_cache.py::TestContextCache::test_get_incompatible_context",
"openmmtools/tests/test_cache.py::TestContextCache::test_cache_capacity_ttl",
"openmmtools/tests/test_cache.py::TestContextCache::test_platform_property"
]
| []
| MIT License | 1,424 | [
"openmmtools/integrators.py",
"openmmtools/cache.py"
]
| [
"openmmtools/integrators.py",
"openmmtools/cache.py"
]
|
|
neogeny__TatSu-35 | 4dcfba04d700e858c2c3ae1fdb258e6d5bbce2ab | 2017-07-01 13:13:51 | 4aa9636ab1a77a24a5b60eeb06575aee5cf20dd7 | diff --git a/docs/left_recursion.rst b/docs/left_recursion.rst
index 615a5d1..8b16e61 100644
--- a/docs/left_recursion.rst
+++ b/docs/left_recursion.rst
@@ -16,3 +16,30 @@ Left recursion support is enabled by default in |TatSu|. To disable it for a par
@@left_recursion :: False
+
+.. warning::
+
+ Not all left-recursive grammars that use the |TatSu| syntax are PEG_. The same happens with right-recursive grammars. **The order of rules in matters in PEG**.
+
+ For right-recursive grammars the choices that parse the most input must come first. The same is true for left-recursive grammars.
+
+ Additionally, for grammars with **indirect left recursion, the rules containing choices must be the first invoked during a parse**. The following grammar is correct,but it will not work if the start rule is changed to ```start = mul ;```.
+
+ .. code:: ocaml
+
+ start = expr ;
+
+ expr
+ =
+ mul | identifier
+ ;
+
+ mul
+ =
+ expr '*' identifier
+ ;
+
+ identifier
+ =
+ /\w+/
+ ;
diff --git a/tatsu/contexts.py b/tatsu/contexts.py
index c5dfa78..2a35c4a 100644
--- a/tatsu/contexts.py
+++ b/tatsu/contexts.py
@@ -330,7 +330,6 @@ class ParseContext(object):
prune_dict(cache, lambda k, _: k[0] < cutpos)
prune(self._memos, self._pos)
- prune(self._results, self._pos)
def _memoization(self):
return self.memoize_lookaheads or self._lookahead == 0
@@ -464,7 +463,6 @@ class ParseContext(object):
def _forget(self, key):
self._memos.pop(key, None)
- self._results.pop(key, None)
def _memo_for(self, key):
memo = self._memos.get(key)
| Another left recursion problem
Grammar:
```
identifier = /\w+/ ;
expr = mul | identifier ;
mul = expr '*' identifier ;
```
Parsing `a * b` with the start rule `expr` gives the expected result: `['a', '*', 'b']`. But parsing with the start rule `mul` gives the following error:
```
Traceback (most recent call last):
File "/home/manu/vcs/TatSu/tatsu/contexts.py", line 203, in parse
result = rule()
File "/home/manu/vcs/TatSu/tatsu/contexts.py", line 54, in wrapper
return self._call(ruleinfo)
File "/home/manu/vcs/TatSu/tatsu/contexts.py", line 513, in _call
result = self._recursive_call(ruleinfo)
File "/home/manu/vcs/TatSu/tatsu/contexts.py", line 546, in _recursive_call
result = self._invoke_rule(ruleinfo, pos, key)
File "/home/manu/vcs/TatSu/tatsu/contexts.py", line 580, in _invoke_rule
ruleinfo.impl(self)
File "parser.py", line 96, in _mul_
self._token('*')
File "/home/manu/vcs/TatSu/tatsu/contexts.py", line 617, in _token
self._error(token, exclass=FailedToken)
File "/home/manu/vcs/TatSu/tatsu/contexts.py", line 436, in _error
raise self._make_exception(item, exclass=exclass)
tatsu.exceptions.FailedToken: /proc/self/fd/12(1:1) expecting '*' :
^
mul
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "parser.py", line 122, in <module>
ast = generic_main(main, UnknownParser, name='Unknown')
File "/home/manu/vcs/TatSu/tatsu/util.py", line 335, in generic_main
colorize=args.color
File "parser.py", line 115, in main
return parser.parse(text, startrule, filename=filename, **kwargs)
File "/home/manu/vcs/TatSu/tatsu/contexts.py", line 211, in parse
raise self._furthest_exception
File "/home/manu/vcs/TatSu/tatsu/contexts.py", line 679, in _option
yield
File "parser.py", line 88, in _expr_
self._mul_()
File "/home/manu/vcs/TatSu/tatsu/contexts.py", line 54, in wrapper
return self._call(ruleinfo)
File "/home/manu/vcs/TatSu/tatsu/contexts.py", line 513, in _call
result = self._recursive_call(ruleinfo)
File "/home/manu/vcs/TatSu/tatsu/contexts.py", line 546, in _recursive_call
result = self._invoke_rule(ruleinfo, pos, key)
File "/home/manu/vcs/TatSu/tatsu/contexts.py", line 580, in _invoke_rule
ruleinfo.impl(self)
File "parser.py", line 96, in _mul_
self._token('*')
File "/home/manu/vcs/TatSu/tatsu/contexts.py", line 617, in _token
self._error(token, exclass=FailedToken)
File "/home/manu/vcs/TatSu/tatsu/contexts.py", line 436, in _error
raise self._make_exception(item, exclass=exclass)
tatsu.exceptions.FailedToken: /proc/self/fd/12(1:1) expecting '*' :
^
mul
expr
mul
``` | neogeny/TatSu | diff --git a/test/grammar/left_recursion_test.py b/test/grammar/left_recursion_test.py
index 0dced21..1290c3e 100644
--- a/test/grammar/left_recursion_test.py
+++ b/test/grammar/left_recursion_test.py
@@ -309,3 +309,46 @@ class LeftRecursionTests(unittest.TestCase):
ast = model.parse('foo, bar', trace=trace, colorize=True)
self.assertEqual(['foo', ',', 'bar'], ast)
+
+ def test_change_start_rule(self, trace=False):
+ grammar = '''
+ start = expr ;
+
+ expr
+ =
+ mul | identifier
+ ;
+
+ mul
+ =
+ expr '*' identifier
+ ;
+
+ identifier
+ =
+ /\w+/
+ ;
+ '''
+ model = compile(grammar)
+
+ ast = model.parse('a * b', start='expr', trace=trace, colorize=True)
+ self.assertEqual(['a', '*', 'b'], ast)
+
+ try:
+ model.parse('a * b', start='mul', trace=trace, colorize=True)
+ self.fail('failure expected as first recursive rule does not cotain a choice')
+ except FailedParse:
+ pass
+
+ def test_with_gather(self, trace=False):
+ grammar = '''
+ identifier = /\w+/ ;
+ expr = mul | tmp ;
+ mul = expr '*' tmp ;
+ tmp = call | identifier ;
+ call = identifier '(' ','.{expr} ')' ;
+ '''
+ model = compile(grammar)
+
+ ast = model.parse('a(b, c)', start='expr', trace=trace, colorize=True)
+ self.assertEqual(['a', '(', ['b', 'c'], ')'], ast)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 2
} | 4.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-flake8",
"pytest-mypy",
"pytest-pylint"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | astroid==3.3.9
dill==0.3.9
exceptiongroup==1.2.2
filelock==3.18.0
flake8==7.2.0
iniconfig==2.1.0
isort==6.0.1
mccabe==0.7.0
mypy==1.15.0
mypy-extensions==1.0.0
packaging==24.2
platformdirs==4.3.7
pluggy==1.5.0
pycodestyle==2.13.0
pyflakes==3.3.1
pylint==3.3.6
pytest==8.3.5
pytest-flake8==1.3.0
pytest-mypy==1.0.0
pytest-pylint==0.21.0
-e git+https://github.com/neogeny/TatSu.git@4dcfba04d700e858c2c3ae1fdb258e6d5bbce2ab#egg=TatSu
tomli==2.2.1
tomlkit==0.13.2
typing_extensions==4.13.0
| name: TatSu
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- astroid==3.3.9
- dill==0.3.9
- exceptiongroup==1.2.2
- filelock==3.18.0
- flake8==7.2.0
- iniconfig==2.1.0
- isort==6.0.1
- mccabe==0.7.0
- mypy==1.15.0
- mypy-extensions==1.0.0
- packaging==24.2
- platformdirs==4.3.7
- pluggy==1.5.0
- pycodestyle==2.13.0
- pyflakes==3.3.1
- pylint==3.3.6
- pytest==8.3.5
- pytest-flake8==1.3.0
- pytest-mypy==1.0.0
- pytest-pylint==0.21.0
- tomli==2.2.1
- tomlkit==0.13.2
- typing-extensions==4.13.0
prefix: /opt/conda/envs/TatSu
| [
"test/grammar/left_recursion_test.py::LeftRecursionTests::test_with_gather"
]
| []
| [
"test/grammar/left_recursion_test.py::LeftRecursionTests::test_change_start_rule",
"test/grammar/left_recursion_test.py::LeftRecursionTests::test_direct_left_recursion",
"test/grammar/left_recursion_test.py::LeftRecursionTests::test_dropped_input_bug",
"test/grammar/left_recursion_test.py::LeftRecursionTests::test_indirect_left_recursion",
"test/grammar/left_recursion_test.py::LeftRecursionTests::test_indirect_left_recursion_with_cut",
"test/grammar/left_recursion_test.py::LeftRecursionTests::test_left_recursion_bug",
"test/grammar/left_recursion_test.py::LeftRecursionTests::test_left_recursion_with_right_associativity",
"test/grammar/left_recursion_test.py::LeftRecursionTests::test_nested_left_recursion",
"test/grammar/left_recursion_test.py::LeftRecursionTests::test_no_left_recursion",
"test/grammar/left_recursion_test.py::LeftRecursionTests::test_partial_input_bug"
]
| []
| BSD License | 1,425 | [
"docs/left_recursion.rst",
"tatsu/contexts.py"
]
| [
"docs/left_recursion.rst",
"tatsu/contexts.py"
]
|
|
aio-libs__aiosmtpd-119 | b87538bc1fc0137b5d188db938c9b386c71683a3 | 2017-07-03 14:33:46 | b87538bc1fc0137b5d188db938c9b386c71683a3 | diff --git a/aiosmtpd/docs/NEWS.rst b/aiosmtpd/docs/NEWS.rst
index dc6631a..7ade980 100644
--- a/aiosmtpd/docs/NEWS.rst
+++ b/aiosmtpd/docs/NEWS.rst
@@ -21,6 +21,10 @@
* Widen the catch of ``ConnectionResetError`` and ``CancelledError`` to also
catch such errors from handler methods. (Closes #110)
* Added a manpage for the ``aiosmtpd`` command line script. (Closes #116)
+* Added much better support for the ``HELP``. There's a new decorator called
+ ``@syntax()`` which you can use in derived classes to decorate ``smtp_*()``
+ methods. These then show up in ``HELP`` responses. This also fixes
+ ``HELP`` responses for the ``LMTP`` subclass. (Closes #113)
1.0 (2017-05-15)
================
diff --git a/aiosmtpd/docs/smtp.rst b/aiosmtpd/docs/smtp.rst
index f46a65e..de2aebc 100644
--- a/aiosmtpd/docs/smtp.rst
+++ b/aiosmtpd/docs/smtp.rst
@@ -28,8 +28,9 @@ All methods implementing ``SMTP`` commands are prefixed with ``smtp_``; they
must also be coroutines. Here's how you could implement this use case::
>>> import asyncio
- >>> from aiosmtpd.smtp import SMTP as Server
+ >>> from aiosmtpd.smtp import SMTP as Server, syntax
>>> class MyServer(Server):
+ ... @syntax('PING [ignored]')
... async def smtp_PING(self, arg):
... await self.push('259 Pong')
@@ -61,6 +62,17 @@ command, we have to use the lower level interface to talk to it.
>>> message
b'Pong'
+Because we prefixed the ``smtp_PING()`` method with the ``@syntax()``
+decorator, the command shows up in the ``HELP`` output.
+
+ >>> print(client.help().decode('utf-8'))
+ Supported commands: DATA EHLO HELO HELP MAIL NOOP PING QUIT RCPT RSET VRFY
+
+And we can get more detailed help on the new command.
+
+ >>> print(client.help('PING').decode('utf-8'))
+ Syntax: PING [ignored]
+
Server hooks
============
diff --git a/aiosmtpd/lmtp.py b/aiosmtpd/lmtp.py
index 65b5fd3..53ed087 100644
--- a/aiosmtpd/lmtp.py
+++ b/aiosmtpd/lmtp.py
@@ -1,12 +1,14 @@
-from aiosmtpd.smtp import SMTP
+from aiosmtpd.smtp import SMTP, syntax
from public import public
@public
class LMTP(SMTP):
+ @syntax('LHLO hostname')
async def smtp_LHLO(self, arg):
"""The LMTP greeting, used instead of HELO/EHLO."""
await super().smtp_HELO(arg)
+ self.show_smtp_greeting = False
async def smtp_HELO(self, arg):
"""HELO is not a valid LMTP command."""
diff --git a/aiosmtpd/smtp.py b/aiosmtpd/smtp.py
index dfdbcb5..780d11a 100644
--- a/aiosmtpd/smtp.py
+++ b/aiosmtpd/smtp.py
@@ -51,6 +51,15 @@ def make_loop():
return asyncio.get_event_loop()
+def syntax(text, extended=None, when=None):
+ def decorator(f):
+ f.__smtp_syntax__ = text
+ f.__smtp_syntax_extended__ = extended
+ f.__smtp_syntax_when__ = when
+ return f
+ return decorator
+
+
@public
class SMTP(asyncio.StreamReaderProtocol):
command_size_limit = 512
@@ -281,6 +290,7 @@ class SMTP(asyncio.StreamReaderProtocol):
await self.push(status)
# SMTP and ESMTP commands
+ @syntax('HELO hostname')
async def smtp_HELO(self, hostname):
if not hostname:
await self.push('501 Syntax: HELO hostname')
@@ -293,6 +303,7 @@ class SMTP(asyncio.StreamReaderProtocol):
status = '250 {}'.format(self.hostname)
await self.push(status)
+ @syntax('EHLO hostname')
async def smtp_EHLO(self, hostname):
if not hostname:
await self.push('501 Syntax: EHLO hostname')
@@ -320,10 +331,12 @@ class SMTP(asyncio.StreamReaderProtocol):
status = '250 HELP'
await self.push(status)
+ @syntax('NOOP [ignored]')
async def smtp_NOOP(self, arg):
status = await self._call_handler_hook('NOOP', arg)
await self.push('250 OK' if status is MISSING else status)
+ @syntax('QUIT')
async def smtp_QUIT(self, arg):
if arg:
await self.push('501 Syntax: QUIT')
@@ -333,6 +346,7 @@ class SMTP(asyncio.StreamReaderProtocol):
self._handler_coroutine.cancel()
self.transport.close()
+ @syntax('STARTTLS', when='tls_context')
async def smtp_STARTTLS(self, arg):
log.info('%r STARTTLS', self.session.peer)
if arg:
@@ -390,43 +404,39 @@ class SMTP(asyncio.StreamReaderProtocol):
result[param] = value if eq else True
return result
+ def _syntax_available(self, method):
+ if getattr(method, '__smtp_syntax__', None) is None:
+ return False
+ if method.__smtp_syntax_when__:
+ return bool(getattr(self, method.__smtp_syntax_when__))
+ return True
+
+ @syntax('HELP [command]')
+ @asyncio.coroutine
async def smtp_HELP(self, arg):
+ code = 250
if arg:
- extended = ' [SP <mail-parameters>]'
- lc_arg = arg.upper()
- if lc_arg == 'EHLO':
- await self.push('250 Syntax: EHLO hostname')
- elif lc_arg == 'HELO':
- await self.push('250 Syntax: HELO hostname')
- elif lc_arg == 'MAIL':
- msg = '250 Syntax: MAIL FROM: <address>'
- if self.session.extended_smtp:
- msg += extended
- await self.push(msg)
- elif lc_arg == 'RCPT':
- msg = '250 Syntax: RCPT TO: <address>'
- if self.session.extended_smtp:
- msg += extended
- await self.push(msg)
- elif lc_arg == 'DATA':
- await self.push('250 Syntax: DATA')
- elif lc_arg == 'RSET':
- await self.push('250 Syntax: RSET')
- elif lc_arg == 'NOOP':
- await self.push('250 Syntax: NOOP')
- elif lc_arg == 'QUIT':
- await self.push('250 Syntax: QUIT')
- elif lc_arg == 'VRFY':
- await self.push('250 Syntax: VRFY <address>')
- else:
- await self.push(
- '501 Supported commands: EHLO HELO MAIL RCPT '
- 'DATA RSET NOOP QUIT VRFY')
- else:
- await self.push(
- '250 Supported commands: EHLO HELO MAIL RCPT DATA '
- 'RSET NOOP QUIT VRFY')
-
+ method = getattr(self, 'smtp_' + arg.upper(), None)
+ if method and self._syntax_available(method):
+ help_str = method.__smtp_syntax__
+ if (self.session.extended_smtp
+ and method.__smtp_syntax_extended__):
+ help_str += method.__smtp_syntax_extended__
+ await self.push('250 Syntax: ' + help_str)
+ return
+ code = 501
+ commands = []
+ for name in dir(self):
+ if not name.startswith('smtp_'):
+ continue
+ method = getattr(self, name)
+ if self._syntax_available(method):
+ commands.append(name.lstrip('smtp_'))
+ commands.sort()
+ await self.push(
+ '{} Supported commands: {}'.format(code, ' '.join(commands)))
+
+ @syntax('VRFY <address>')
async def smtp_VRFY(self, arg):
if arg:
try:
@@ -444,6 +454,7 @@ class SMTP(asyncio.StreamReaderProtocol):
else:
await self.push('501 Syntax: VRFY <address>')
+ @syntax('MAIL FROM: <address>', extended=' [SP <mail-parameters>]')
async def smtp_MAIL(self, arg):
if not self.session.host_name:
await self.push('503 Error: send HELO first')
@@ -510,6 +521,7 @@ class SMTP(asyncio.StreamReaderProtocol):
log.info('%r sender: %s', self.session.peer, address)
await self.push(status)
+ @syntax('RCPT TO: <address>', extended=' [SP <mail-parameters>]')
async def smtp_RCPT(self, arg):
if not self.session.host_name:
await self.push('503 Error: send HELO first')
@@ -556,6 +568,7 @@ class SMTP(asyncio.StreamReaderProtocol):
log.info('%r recip: %s', self.session.peer, address)
await self.push(status)
+ @syntax('RSET')
async def smtp_RSET(self, arg):
if arg:
await self.push('501 Syntax: RSET')
@@ -568,6 +581,7 @@ class SMTP(asyncio.StreamReaderProtocol):
status = await self._call_handler_hook('RSET')
await self.push('250 OK' if status is MISSING else status)
+ @syntax('DATA')
async def smtp_DATA(self, arg):
if not self.session.host_name:
await self.push('503 Error: send HELO first')
| LMTP server offers HELO, doesn't provide correct LHLO response
As initailly reported here: https://gitlab.com/mailman/mailman/issues/348
Telnet to the LMTP server, and type "HELP".
It responds with
250 Supported commands: EHLO HELO MAIL RCPT DATA RSET NOOP QUIT VRFY
This isn't correct. LMTP uses LHLO; the EHLO and HELO should not be offered.
LHLO foo
Responds with
250 foo.example.net
RFC2033 says the implementation MUST support PIPELINING and ENHANCEDSTATUSCODES, and SHOULD support 8BITMIME
Thus, LHLO should be responding with the ESMTP keywords for these extensions.
8BITMIME (RFC 1652)
ENHANCEDSTATUSCODES (RFC 2034)
PIPELINING (RFC 1854)
Since it also responds to HELP HELP should also be listed (RFC 1869)
Thus, the dialog should look like:
````
S: 220 foo.example.net GNU Mailman LMTP ...
C: LHLO foo
S: 250-foo.example.net
S: 250-HELP
S: 250-ENHANCEDSTATUSCODES
S: 250-8BITMIME
S: 250 PIPELINING
````
(The order doesn't matter.)
And, obviously, 'HELP' should respond with the correct list of supported commands - including LHLO.
| aio-libs/aiosmtpd | diff --git a/aiosmtpd/tests/test_lmtp.py b/aiosmtpd/tests/test_lmtp.py
index 0af6594..d77816e 100644
--- a/aiosmtpd/tests/test_lmtp.py
+++ b/aiosmtpd/tests/test_lmtp.py
@@ -40,3 +40,13 @@ class TestLMTP(unittest.TestCase):
code, response = client.ehlo('example.com')
self.assertEqual(code, 500)
self.assertEqual(response, b'Error: command "EHLO" not recognized')
+
+ def test_help(self):
+ # https://github.com/aio-libs/aiosmtpd/issues/113
+ with SMTP(*self.address) as client:
+ # Don't get tricked by smtplib processing of the response.
+ code, response = client.docmd('HELP')
+ self.assertEqual(code, 250)
+ self.assertEqual(response,
+ b'Supported commands: DATA HELP LHLO MAIL '
+ b'NOOP QUIT RCPT RSET VRFY')
diff --git a/aiosmtpd/tests/test_smtp.py b/aiosmtpd/tests/test_smtp.py
index ab52cf6..8d2a07d 100644
--- a/aiosmtpd/tests/test_smtp.py
+++ b/aiosmtpd/tests/test_smtp.py
@@ -287,8 +287,8 @@ class TestSMTP(unittest.TestCase):
code, response = client.docmd('HELP')
self.assertEqual(code, 250)
self.assertEqual(response,
- b'Supported commands: EHLO HELO MAIL RCPT '
- b'DATA RSET NOOP QUIT VRFY')
+ b'Supported commands: DATA EHLO HELO HELP MAIL '
+ b'NOOP QUIT RCPT RSET VRFY')
def test_help_helo(self):
with SMTP(*self.address) as client:
@@ -354,7 +354,7 @@ class TestSMTP(unittest.TestCase):
with SMTP(*self.address) as client:
code, response = client.docmd('HELP', 'NOOP')
self.assertEqual(code, 250)
- self.assertEqual(response, b'Syntax: NOOP')
+ self.assertEqual(response, b'Syntax: NOOP [ignored]')
def test_help_quit(self):
with SMTP(*self.address) as client:
@@ -374,8 +374,8 @@ class TestSMTP(unittest.TestCase):
code, response = client.docmd('HELP me!')
self.assertEqual(code, 501)
self.assertEqual(response,
- b'Supported commands: EHLO HELO MAIL RCPT '
- b'DATA RSET NOOP QUIT VRFY')
+ b'Supported commands: DATA EHLO HELO HELP MAIL '
+ b'NOOP QUIT RCPT RSET VRFY')
def test_expn(self):
with SMTP(*self.address) as client:
diff --git a/aiosmtpd/tests/test_starttls.py b/aiosmtpd/tests/test_starttls.py
index e8608c4..f7c1fde 100644
--- a/aiosmtpd/tests/test_starttls.py
+++ b/aiosmtpd/tests/test_starttls.py
@@ -103,6 +103,18 @@ class TestStartTLS(unittest.TestCase):
code, response = client.docmd('STARTTLS', 'TRUE')
self.assertEqual(code, 501)
+ def test_help_after_starttls(self):
+ controller = TLSController(Sink())
+ controller.start()
+ self.addCleanup(controller.stop)
+ with SMTP(controller.hostname, controller.port) as client:
+ # Don't get tricked by smtplib processing of the response.
+ code, response = client.docmd('HELP')
+ self.assertEqual(code, 250)
+ self.assertEqual(response,
+ b'Supported commands: DATA EHLO HELO HELP MAIL '
+ b'NOOP QUIT RCPT RSET STARTTLS VRFY')
+
class TestTLSForgetsSessionData(unittest.TestCase):
def setUp(self):
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 3,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 4
} | 1.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/aio-libs/aiosmtpd.git@b87538bc1fc0137b5d188db938c9b386c71683a3#egg=aiosmtpd
atpublic==5.1
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
pytest @ file:///croot/pytest_1738938843180/work
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
| name: aiosmtpd
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- atpublic==5.1
prefix: /opt/conda/envs/aiosmtpd
| [
"aiosmtpd/tests/test_lmtp.py::TestLMTP::test_help",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_help",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_help_bad_arg",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_help_noop",
"aiosmtpd/tests/test_starttls.py::TestStartTLS::test_help_after_starttls"
]
| []
| [
"aiosmtpd/tests/test_lmtp.py::TestLMTP::test_ehlo",
"aiosmtpd/tests/test_lmtp.py::TestLMTP::test_helo",
"aiosmtpd/tests/test_lmtp.py::TestLMTP::test_lhlo",
"aiosmtpd/tests/test_smtp.py::TestProtocol::test_empty_email",
"aiosmtpd/tests/test_smtp.py::TestProtocol::test_honors_mail_delimeters",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_data_invalid_params",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_data_no_helo",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_data_no_rcpt",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_ehlo",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_ehlo_duplicate",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_ehlo_no_hostname",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_ehlo_then_helo",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_empty_command",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_expn",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_helo",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_helo_duplicate",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_helo_no_hostname",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_helo_then_ehlo",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_help_data",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_help_ehlo",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_help_helo",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_help_mail",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_help_mail_esmtp",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_help_quit",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_help_rcpt",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_help_rcpt_esmtp",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_help_rset",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_help_vrfy",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_mail_fail_parse_email",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_mail_from_malformed",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_mail_from_twice",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_mail_malformed_params_esmtp",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_mail_missing_params_esmtp",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_mail_no_arg",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_mail_no_from",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_mail_no_helo",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_mail_params_bad_syntax_esmtp",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_mail_params_esmtp",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_mail_params_no_esmtp",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_mail_unrecognized_params_esmtp",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_noop",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_noop_with_arg",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_quit",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_quit_with_arg",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_rcpt_fail_parse_email",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_rcpt_no_address",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_rcpt_no_arg",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_rcpt_no_arg_esmtp",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_rcpt_no_helo",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_rcpt_no_mail",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_rcpt_no_to",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_rcpt_with_bad_params",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_rcpt_with_params_no_esmtp",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_rcpt_with_unknown_params",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_rset",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_rset_with_arg",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_too_long_command",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_unknown_command",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_vrfy",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_vrfy_no_arg",
"aiosmtpd/tests/test_smtp.py::TestSMTP::test_vrfy_not_an_address",
"aiosmtpd/tests/test_smtp.py::TestResetCommands::test_ehlo",
"aiosmtpd/tests/test_smtp.py::TestResetCommands::test_helo",
"aiosmtpd/tests/test_smtp.py::TestResetCommands::test_rset",
"aiosmtpd/tests/test_smtp.py::TestSMTPWithController::test_bad_encodings",
"aiosmtpd/tests/test_smtp.py::TestSMTPWithController::test_dots_escaped",
"aiosmtpd/tests/test_smtp.py::TestSMTPWithController::test_esmtp_no_size_limit",
"aiosmtpd/tests/test_smtp.py::TestSMTPWithController::test_exception_handler_exception",
"aiosmtpd/tests/test_smtp.py::TestSMTPWithController::test_exception_handler_undescribable",
"aiosmtpd/tests/test_smtp.py::TestSMTPWithController::test_mail_invalid_body",
"aiosmtpd/tests/test_smtp.py::TestSMTPWithController::test_mail_with_compatible_smtputf8",
"aiosmtpd/tests/test_smtp.py::TestSMTPWithController::test_mail_with_incompatible_smtputf8",
"aiosmtpd/tests/test_smtp.py::TestSMTPWithController::test_mail_with_size_too_large",
"aiosmtpd/tests/test_smtp.py::TestSMTPWithController::test_mail_with_unrequited_smtputf8",
"aiosmtpd/tests/test_smtp.py::TestSMTPWithController::test_process_message_error",
"aiosmtpd/tests/test_smtp.py::TestSMTPWithController::test_too_long_message_body",
"aiosmtpd/tests/test_smtp.py::TestSMTPWithController::test_unexpected_errors",
"aiosmtpd/tests/test_smtp.py::TestSMTPWithController::test_unexpected_errors_custom_response",
"aiosmtpd/tests/test_smtp.py::TestSMTPWithController::test_unexpected_errors_unhandled",
"aiosmtpd/tests/test_smtp.py::TestCustomizations::test_custom_greeting",
"aiosmtpd/tests/test_smtp.py::TestCustomizations::test_custom_hostname",
"aiosmtpd/tests/test_smtp.py::TestCustomizations::test_default_greeting",
"aiosmtpd/tests/test_smtp.py::TestCustomizations::test_mail_invalid_body_param",
"aiosmtpd/tests/test_smtp.py::TestClientCrash::test_close_in_command",
"aiosmtpd/tests/test_smtp.py::TestClientCrash::test_close_in_data",
"aiosmtpd/tests/test_smtp.py::TestClientCrash::test_connection_reset_during_DATA",
"aiosmtpd/tests/test_smtp.py::TestClientCrash::test_connection_reset_during_command",
"aiosmtpd/tests/test_smtp.py::TestStrictASCII::test_bad_encoded_param",
"aiosmtpd/tests/test_smtp.py::TestStrictASCII::test_data",
"aiosmtpd/tests/test_smtp.py::TestStrictASCII::test_ehlo",
"aiosmtpd/tests/test_smtp.py::TestStrictASCII::test_mail_param",
"aiosmtpd/tests/test_smtp.py::TestSleepingHandler::test_close_after_helo",
"aiosmtpd/tests/test_starttls.py::TestStartTLS::test_disabled_tls",
"aiosmtpd/tests/test_starttls.py::TestStartTLS::test_failed_handshake",
"aiosmtpd/tests/test_starttls.py::TestStartTLS::test_starttls",
"aiosmtpd/tests/test_starttls.py::TestStartTLS::test_tls_bad_syntax",
"aiosmtpd/tests/test_starttls.py::TestTLSForgetsSessionData::test_forget_ehlo",
"aiosmtpd/tests/test_starttls.py::TestTLSForgetsSessionData::test_forget_mail",
"aiosmtpd/tests/test_starttls.py::TestTLSForgetsSessionData::test_forget_rcpt",
"aiosmtpd/tests/test_starttls.py::TestRequireTLS::test_data_fails",
"aiosmtpd/tests/test_starttls.py::TestRequireTLS::test_ehlo",
"aiosmtpd/tests/test_starttls.py::TestRequireTLS::test_hello_fails",
"aiosmtpd/tests/test_starttls.py::TestRequireTLS::test_help_fails",
"aiosmtpd/tests/test_starttls.py::TestRequireTLS::test_mail_fails",
"aiosmtpd/tests/test_starttls.py::TestRequireTLS::test_rcpt_fails",
"aiosmtpd/tests/test_starttls.py::TestRequireTLS::test_vrfy_fails"
]
| []
| Apache License 2.0 | 1,426 | [
"aiosmtpd/smtp.py",
"aiosmtpd/docs/NEWS.rst",
"aiosmtpd/lmtp.py",
"aiosmtpd/docs/smtp.rst"
]
| [
"aiosmtpd/smtp.py",
"aiosmtpd/docs/NEWS.rst",
"aiosmtpd/lmtp.py",
"aiosmtpd/docs/smtp.rst"
]
|
|
vertexproject__synapse-294 | 82966be7ae8b614ae15eb42b3bf7b8306e5604bf | 2017-07-03 14:51:10 | 6f5fc661a88b8cc3f4befb2c9c7ddcebf0b89ba0 | diff --git a/synapse/cores/common.py b/synapse/cores/common.py
index 29a17931e..69634180c 100644
--- a/synapse/cores/common.py
+++ b/synapse/cores/common.py
@@ -2036,7 +2036,7 @@ class Cortex(EventBus, DataModel, Runtime, Configable, s_ingest.IngestApi):
self.delTufoDset(tufo, name)
if self.caching:
- for prop, valu in tufo[1].items():
+ for prop, valu in list(tufo[1].items()):
self._bumpTufoCache(tufo, prop, valu, None)
iden = tufo[0]
| delTufo dictionary changed during iteration
```
cli> ask inet:fqdn=woowoowoowoo.com delnode(force=1)
oplog:
lift (took:0) {'took': 0, 'sub': 0, 'add': 1, 'mnem': 'lift'}
delnode (took:1) {'excinfo': {'errfile': '/home/autouser/git/synapse/synapse/cores/common.py', 'errline': 2037, 'err': 'RuntimeError', 'errmsg': 'dictionary changed size during iteration'}, 'took': 1, 'sub': 1, 'add': 0, 'mnem': 'delnode'}
options:
limit = None
uniq = 1
limits:
lift = None
time = None
touch = None
(0 results)
``` | vertexproject/synapse | diff --git a/synapse/tests/test_cortex.py b/synapse/tests/test_cortex.py
index 24707a38d..4e1eb7a64 100644
--- a/synapse/tests/test_cortex.py
+++ b/synapse/tests/test_cortex.py
@@ -1143,12 +1143,17 @@ class CortexTest(SynTest):
tufs1 = core.getTufosByProp('foo:qwer', valu=10)
tufs2 = core.getTufosByProp('foo:qwer', valu=11)
+ # Ensure we have cached the tufos we're deleting.
+ self.nn(core.cache_byiden.get(tufo0[0]))
+ self.nn(core.cache_byiden.get(tufo1[0]))
+
self.eq(len(tufs0), 2)
self.eq(len(tufs1), 2)
self.eq(len(tufs2), 0)
+ # Delete an uncached object - here the tufo contents was cached
+ # during lifts but the object itself is a different tuple id()
core.delTufo(tufo0)
- #tufo2 = core.formTufoByProp('foo','lol', qwer=10)
tufs0 = core.getTufosByProp('foo:qwer')
tufs1 = core.getTufosByProp('foo:qwer', valu=10)
@@ -1158,6 +1163,11 @@ class CortexTest(SynTest):
self.eq(len(tufs1), 1)
self.eq(len(tufs2), 0)
+ # Delete an object which was actually cached during lift
+ core.delTufo(tufs0[0])
+ tufs0 = core.getTufosByProp('foo:qwer')
+ self.eq(len(tufs0), 0)
+
def test_cortex_caching_atlimit(self):
with s_cortex.openurl('ram://') as core:
diff --git a/synapse/tests/test_lib_storm.py b/synapse/tests/test_lib_storm.py
index 0c146c1c0..e4236c6b4 100644
--- a/synapse/tests/test_lib_storm.py
+++ b/synapse/tests/test_lib_storm.py
@@ -439,6 +439,15 @@ class StormTest(SynTest):
core.eval('inet:ipv4=1.2.3.4 delnode(force=1)')
self.none(core.getTufoByProp('inet:ipv4', 0x01020304))
+ def test_storm_delnode_caching(self):
+ with s_cortex.openurl('ram:///') as core:
+ core.setConfOpt('caching', True)
+ node = core.formTufoByProp('inet:ipv4', 0x01020304)
+ core.eval('inet:ipv4=1.2.3.4 delnode()')
+ self.nn(core.getTufoByProp('inet:ipv4', 0x01020304))
+ core.eval('inet:ipv4=1.2.3.4 delnode(force=1)')
+ self.none(core.getTufoByProp('inet:ipv4', 0x01020304))
+
def test_storm_editmode(self):
with s_cortex.openurl('ram:///') as core:
node = core.formTufoByProp('inet:ipv4', 0x01020304)
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 0.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"nose",
"nose-cov",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
cffi==1.15.1
cov-core==1.15.0
coverage==6.2
cryptography==40.0.2
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
lmdb==1.6.2
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
msgpack-python==0.5.6
nose==1.3.7
nose-cov==1.6
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pycparser==2.21
pyOpenSSL==23.2.0
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
-e git+https://github.com/vertexproject/synapse.git@82966be7ae8b614ae15eb42b3bf7b8306e5604bf#egg=synapse
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tornado==6.1
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
xxhash==3.2.0
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: synapse
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- cffi==1.15.1
- cov-core==1.15.0
- coverage==6.2
- cryptography==40.0.2
- lmdb==1.6.2
- msgpack-python==0.5.6
- nose==1.3.7
- nose-cov==1.6
- pycparser==2.21
- pyopenssl==23.2.0
- tornado==6.1
- xxhash==3.2.0
prefix: /opt/conda/envs/synapse
| [
"synapse/tests/test_cortex.py::CortexTest::test_cortex_caching_del_tufo",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_delnode_caching"
]
| []
| [
"synapse/tests/test_cortex.py::CortexTest::test_cortex_addmodel",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_by_type",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_bytype",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_caching",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_caching_add_tufo",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_caching_atlimit",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_caching_new",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_caching_oneref",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_caching_set",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_caching_tags",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_caching_under_limit",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_choptag",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_comp",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_dict",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_enforce",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_events",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_fire_set",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_getbytag",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_ingest",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_isnew",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_lmdb",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_local",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_minmax",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_minmax_epoch",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_modlrevs",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_modlvers",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_module",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_module_datamodel_migration",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_module_datamodel_migration_persistent",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_norm_fail",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_notguidform",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_ram",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_ramhost",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_ramtyperange",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_reqstor",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_rev0",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_savefd",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_seed",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_seq",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_splicefd",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_splicepump",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_splices",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_splices_errs",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_sqlite3",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_stats",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_tag_ival",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_tagform",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_tags",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_tlib_persistence",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_tufo_by_default",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_tufo_del",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_tufo_list",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_tufo_pop",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_tufo_setprop",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_tufo_setprops",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_tufo_tag",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_xact_deadlock",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_addnode",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_addtag",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_alltag",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_cmpr_norm",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_delnode",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_deltag",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_edit_end",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_editmode",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_filt_regex",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_lift",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_lifts_by",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_pivot",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_refs",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_setprop",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_show_help",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_tag_fromtag",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_tag_glob",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_tag_ival",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_tag_jointag",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_tag_query",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_tag_totag",
"synapse/tests/test_lib_storm.py::LimitTest::test_limit_behavior",
"synapse/tests/test_lib_storm.py::LimitTest::test_limit_behavior_negatives",
"synapse/tests/test_lib_storm.py::LimitTest::test_limit_default"
]
| []
| Apache License 2.0 | 1,427 | [
"synapse/cores/common.py"
]
| [
"synapse/cores/common.py"
]
|
|
openmrslab__suspect-64 | e63218387349081ee3d790ee96d1cb8e0ec74af2 | 2017-07-03 15:00:52 | 820e897294d90e08c4b91be7289e4ee9ebc6d009 | coveralls:
[](https://coveralls.io/builds/12233612)
Coverage increased (+0.8%) to 78.486% when pulling **c1e331e468074797918589bae34635e5166f596b on 63_array_transforms** into **e63218387349081ee3d790ee96d1cb8e0ec74af2 on master**.
| diff --git a/suspect/_transforms.py b/suspect/_transforms.py
index caddbfe..ee2f9f2 100644
--- a/suspect/_transforms.py
+++ b/suspect/_transforms.py
@@ -68,3 +68,37 @@ def rotation_matrix(angle, axis):
matrix[2, 1] = axis[2] * axis[1] * (1 - c) + axis[0] * s
matrix[2, 2] = c + axis[2] ** 2 * (1 - c)
return matrix
+
+
+def normalise_positions_for_transform(*args):
+ """
+ Takes an input set of arguments which should represent some (x, y, z)
+ coords to be transformed and makes sure they are in a numpy.ndarray,
+ and adds a w dimension of magnitude 1.
+
+ The two acceptable forms of input arguments are a single array_like
+ with final dimension of 3, or three floating point arguments representing
+ x, y and z. In the first case the returned array will have the same shape
+ for all axes except the last, which will go from size 3 to 4, while in the
+ second case the returned array will be of shape (4,).
+
+ Parameters
+ ----------
+ args : array_like or 3 separate floats
+ The arguments to be processed
+
+ Returns
+ -------
+ numpy.ndarray
+ Points ready for transformation by a matrix
+ """
+ if len(args) == 3:
+ positions = [*args, 1]
+ elif len(args) == 1:
+ positions = numpy.atleast_2d(args[0])
+ w_array = numpy.expand_dims(numpy.ones(positions.shape[:-1]), axis=-1)
+ positions = numpy.append(positions, w_array, axis=-1)
+ else:
+ raise ValueError("Unrecognised form for input args")
+
+ return positions
diff --git a/suspect/base.py b/suspect/base.py
index 02025a5..a7eeb60 100644
--- a/suspect/base.py
+++ b/suspect/base.py
@@ -1,5 +1,7 @@
import numpy
+from . import _transforms
+
class ImageBase(numpy.ndarray):
"""
@@ -15,10 +17,14 @@ class ImageBase(numpy.ndarray):
def __array_finalize__(self, obj):
self.transform = getattr(obj, 'transform', None)
- def to_scanner(self, x, y, z):
+ def to_scanner(self, *args):
"""
Converts a 3d position in ImageBase space to the scanner
- reference space. Raises a ValueError if no transform is set.
+ reference space. Argument can either be 3 individual floats
+ for x, y and z or a numpy array_like with final dimension of
+ size 3.
+
+ Raises a ValueError if no transform is set.
Parameters
----------
@@ -37,14 +43,20 @@ class ImageBase(numpy.ndarray):
if self.transform is None:
raise ValueError("No transform set for {} object {}".format(type(self), self))
- transformed_point = self.transform * numpy.matrix([x, y, z, 1]).T
+ positions = _transforms.normalise_positions_for_transform(*args)
+
+ transformed_point = numpy.einsum("ij,...j", self.transform, positions)
- return numpy.squeeze(numpy.asarray(transformed_point))[0:3]
+ return numpy.squeeze(numpy.asarray(transformed_point))[..., 0:3]
- def from_scanner(self, x, y, z):
+ def from_scanner(self, *args):
"""
Converts a 3d position in scanner space to the ImageBase
- reference space. Raises a ValueError if no transform is set.
+ reference space. Argument can either be 3 individual floats
+ for x, y and z or a numpy array_like with final dimension of
+ size 3.
+
+ Raises a ValueError if no transform is set.
Parameters
----------
@@ -63,9 +75,13 @@ class ImageBase(numpy.ndarray):
if self.transform is None:
raise ValueError("No transform set for {} object {}".format(type(self), self))
- transformed_point = numpy.linalg.inv(self.transform) * numpy.matrix([x, y, z, 1]).T
+ positions = _transforms.normalise_positions_for_transform(*args)
+
+ transformed_point = numpy.einsum("ij,...j",
+ numpy.linalg.inv(self.transform),
+ positions)
- return numpy.squeeze(numpy.asarray(transformed_point))[0:3]
+ return numpy.squeeze(numpy.asarray(transformed_point))[..., 0:3]
@property
def voxel_size(self):
| ENH: to/from_scanner() should accept arrays of coords
Currently the to_scanner() and from_scanner() functions take three arguments: x, y, z and return an numpy array. It would be convenient if the functions could handle a numpy array as an input for consistency, this would also allow passing in an array of coords to be transformed all at once, for example from meshgrid(). On the other hand, in the case where the user wants to pass in simple known x, y and z values, this requires them to be wrapped in a tuple, being called like to_scanner((x, y, z)), similarly to the numpy.ones() function. This could be addressed by permitting either 1 or 3 *args and interpreting either as an ndarray of shape [...3] or 3 floats. | openmrslab/suspect | diff --git a/tests/test_mrs/test_base.py b/tests/test_mrs/test_base.py
index 58cee68..9d35987 100644
--- a/tests/test_mrs/test_base.py
+++ b/tests/test_mrs/test_base.py
@@ -1,6 +1,8 @@
import suspect
import numpy
+import pytest
import suspect.base
+from suspect import _transforms
def test_create_base():
@@ -12,9 +14,29 @@ def test_create_base():
def test_base_transform():
- position = [10, 20, 30]
- voxel_size = [20, 20, 20]
+ position = numpy.array([10, 20, 30])
+ voxel_size = numpy.array([20, 20, 20])
transform = suspect.transformation_matrix([1, 0, 0], [0, 1, 0], position, voxel_size)
base = suspect.base.ImageBase(numpy.zeros(1), transform)
numpy.testing.assert_equal(base.position, position)
numpy.testing.assert_equal(base.voxel_size, voxel_size)
+ transformed = base.to_scanner(0, 0, 0)
+ numpy.testing.assert_equal(transformed, position)
+ transformed = base.to_scanner(numpy.array([[0, 0, 0], [1, 1, 1]]))
+ numpy.testing.assert_equal(transformed, [position, position + voxel_size])
+ transformed = base.from_scanner(position)
+ numpy.testing.assert_equal((0, 0, 0), transformed)
+
+
+def test_transforms_fail():
+ base = suspect.base.ImageBase(numpy.zeros(1))
+ with pytest.raises(ValueError):
+ base.to_scanner(0, 0, 0)
+ with pytest.raises(ValueError):
+ base.from_scanner(0, 0, 0)
+ with pytest.raises(ValueError):
+ pos = base.position
+ with pytest.raises(ValueError):
+ vox = base.voxel_size
+ with pytest.raises(ValueError):
+ _transforms.normalise_positions_for_transform(0, 0)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 2
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"mock"
],
"pre_install": null,
"python": "3.5",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
asteval==0.9.26
async-generator==1.10
attrs==22.2.0
Babel==2.11.0
backcall==0.2.0
bleach==4.1.0
certifi==2021.5.30
charset-normalizer==2.0.12
decorator==5.1.1
defusedxml==0.7.1
docutils==0.18.1
entrypoints==0.4
future==1.0.0
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
iniconfig==1.1.1
ipykernel==5.5.6
ipython==7.16.3
ipython-genutils==0.2.0
jedi==0.17.2
Jinja2==3.0.3
jsonschema==3.2.0
jupyter-client==7.1.2
jupyter-core==4.9.2
jupyterlab-pygments==0.1.2
lmfit==1.0.3
MarkupSafe==2.0.1
mistune==0.8.4
mock==5.2.0
nbclient==0.5.9
nbconvert==6.0.7
nbformat==5.1.3
nbsphinx==0.8.8
nest-asyncio==1.6.0
numpy==1.19.5
packaging==21.3
pandocfilters==1.5.1
parse==1.20.2
Parsley==1.3
parso==0.7.1
pexpect==4.9.0
pickleshare==0.7.5
pluggy==1.0.0
prompt-toolkit==3.0.36
ptyprocess==0.7.0
py==1.11.0
pydicom==2.3.1
Pygments==2.14.0
pyparsing==3.1.4
pyrsistent==0.18.0
pytest==7.0.1
python-dateutil==2.9.0.post0
pytz==2025.2
PyWavelets==1.1.1
pyzmq==25.1.2
requests==2.27.1
scipy==1.5.4
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
-e git+https://github.com/openmrslab/suspect.git@e63218387349081ee3d790ee96d1cb8e0ec74af2#egg=suspect
testpath==0.6.0
tomli==1.2.3
tornado==6.1
traitlets==4.3.3
typing_extensions==4.1.1
uncertainties==3.1.7
urllib3==1.26.20
wcwidth==0.2.13
webencodings==0.5.1
zipp==3.6.0
| name: suspect
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- asteval==0.9.26
- async-generator==1.10
- attrs==22.2.0
- babel==2.11.0
- backcall==0.2.0
- bleach==4.1.0
- charset-normalizer==2.0.12
- decorator==5.1.1
- defusedxml==0.7.1
- docutils==0.18.1
- entrypoints==0.4
- future==1.0.0
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- ipykernel==5.5.6
- ipython==7.16.3
- ipython-genutils==0.2.0
- jedi==0.17.2
- jinja2==3.0.3
- jsonschema==3.2.0
- jupyter-client==7.1.2
- jupyter-core==4.9.2
- jupyterlab-pygments==0.1.2
- lmfit==1.0.3
- markupsafe==2.0.1
- mistune==0.8.4
- mock==5.2.0
- nbclient==0.5.9
- nbconvert==6.0.7
- nbformat==5.1.3
- nbsphinx==0.8.8
- nest-asyncio==1.6.0
- numpy==1.19.5
- packaging==21.3
- pandocfilters==1.5.1
- parse==1.20.2
- parsley==1.3
- parso==0.7.1
- pexpect==4.9.0
- pickleshare==0.7.5
- pluggy==1.0.0
- prompt-toolkit==3.0.36
- ptyprocess==0.7.0
- py==1.11.0
- pydicom==2.3.1
- pygments==2.14.0
- pyparsing==3.1.4
- pyrsistent==0.18.0
- pytest==7.0.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pywavelets==1.1.1
- pyzmq==25.1.2
- requests==2.27.1
- scipy==1.5.4
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- testpath==0.6.0
- tomli==1.2.3
- tornado==6.1
- traitlets==4.3.3
- typing-extensions==4.1.1
- uncertainties==3.1.7
- urllib3==1.26.20
- wcwidth==0.2.13
- webencodings==0.5.1
- zipp==3.6.0
prefix: /opt/conda/envs/suspect
| [
"tests/test_mrs/test_base.py::test_base_transform",
"tests/test_mrs/test_base.py::test_transforms_fail"
]
| []
| [
"tests/test_mrs/test_base.py::test_create_base"
]
| []
| MIT License | 1,428 | [
"suspect/base.py",
"suspect/_transforms.py"
]
| [
"suspect/base.py",
"suspect/_transforms.py"
]
|
vertexproject__synapse-295 | 82966be7ae8b614ae15eb42b3bf7b8306e5604bf | 2017-07-03 18:06:45 | 6f5fc661a88b8cc3f4befb2c9c7ddcebf0b89ba0 | diff --git a/synapse/lib/cache.py b/synapse/lib/cache.py
index 788dc81b5..6c3487c41 100644
--- a/synapse/lib/cache.py
+++ b/synapse/lib/cache.py
@@ -428,5 +428,15 @@ class RefDict:
with self.lock:
return [self._pop(k) for k in keys]
+ def clear(self):
+ with self.lock:
+ self.vals.clear()
+ self.refs.clear()
+
+ def __contains__(self, item):
+ with self.lock:
+ return item in self.vals
+
def __len__(self):
- return len(self.vals)
+ with self.lock:
+ return len(self.vals)
| dynamically disabling caching on a cortex seems broken
Trigger with setConfOpt("caching",0)
```
Traceback (most recent call last):
File "/home/autouser/git/synapse/synapse/eventbus.py", line 154, in dist
ret.append( func( event ) )
File "/home/autouser/git/synapse/synapse/lib/config.py", line 135, in callback
return func(valu)
File "/home/autouser/git/synapse/synapse/cores/common.py", line 643, in _onSetCaching
self.cache_byiden.clear()
AttributeError: 'RefDict' object has no attribute 'clear'
``` | vertexproject/synapse | diff --git a/synapse/tests/test_cache.py b/synapse/tests/test_cache.py
index 524d70d61..0c6eb36bf 100644
--- a/synapse/tests/test_cache.py
+++ b/synapse/tests/test_cache.py
@@ -1,9 +1,6 @@
-import unittest
-import threading
import collections
import synapse.lib.cache as s_cache
-import synapse.cortex as s_cortex
from synapse.tests.common import *
@@ -220,3 +217,54 @@ class CacheTest(SynTest):
od = s_cache.OnDem()
with self.raises(KeyError) as cm:
od.get('foo')
+
+ def test_refdict(self):
+ rd = s_cache.RefDict()
+ # puts, pop, get, clear
+ # Put an item in a few times
+ self.eq(rd.put('syn:val', 123), 123)
+ self.true('syn:val' in rd.vals)
+ self.eq(rd.refs.get('syn:val'), 1)
+ rd.put('syn:val', 123)
+ self.true('syn:val' in rd.vals)
+ self.eq(rd.refs.get('syn:val'), 2)
+ # Get values out, ensure defaults of None are returned
+ self.eq(rd.get('syn:val'), 123)
+ self.none(rd.get('syn:noval'))
+ # Pop an item out until its gone from the refdict
+ # XXX Unclear if the following none() is a desired behavior
+ # XXX for pop on a existing value which still has refs > 0
+ self.none(rd.pop('syn:val'))
+ self.eq(rd.refs.get('syn:val'), 1)
+ self.eq(rd.pop('syn:val'), 123)
+ self.false('syn:val' in rd.refs)
+ self.false('syn:val' in rd.vals)
+
+ kvs = (
+ ('foo', 'bar'),
+ ('knight', 'ni'),
+ ('clown', 'pennywise'),
+ ('inet:netuser', 'vertex.link/pennywise')
+ )
+ # puts
+ rd.puts(kvs)
+ # validate setdefault behavior
+ self.eq(rd.put('foo', 'baz'), 'bar')
+ self.eq(rd.refs.get('foo'), 2)
+ # __len__
+ self.eq(len(rd), 4)
+ # __contains__
+ self.true('clown' in rd)
+ self.false('badger' in rd)
+ # gets
+ r = rd.pops(['foo', 'knight', 'syn:novalu'])
+ # self.eq(r, ['bar', 'ni', None])
+ # XXX This is None, ni, None because of the weird pop() behavior.
+ self.eq(r, [None, 'ni', None])
+ self.true('foo' in rd)
+ self.true('knight' not in rd)
+ self.eq(len(rd), 3)
+ # Clear
+ rd.clear()
+ self.eq(len(rd), 0)
+ self.eq(len(rd.refs), 0)
diff --git a/synapse/tests/test_cortex.py b/synapse/tests/test_cortex.py
index 24707a38d..00837e4e8 100644
--- a/synapse/tests/test_cortex.py
+++ b/synapse/tests/test_cortex.py
@@ -1279,6 +1279,26 @@ class CortexTest(SynTest):
self.true(tufo0[1].get('.new'))
self.false(tufo1[1].get('.new'))
+ def test_cortex_caching_disable(self):
+
+ with s_cortex.openurl('ram://') as core:
+
+ core.setConfOpt('caching', 1)
+
+ tufo = core.formTufoByProp('foo', 'bar')
+
+ self.nn(core.cache_byiden.get(tufo[0]))
+ self.nn(core.cache_bykey.get(('foo', 'bar', 1)))
+ self.nn(core.cache_byprop.get(('foo', 'bar')))
+ self.eq(len(core.cache_fifo), 1)
+
+ core.setConfOpt('caching', 0)
+
+ self.none(core.cache_byiden.get(tufo[0]))
+ self.none(core.cache_bykey.get(('foo', 'bar', 1)))
+ self.none(core.cache_byprop.get(('foo', 'bar')))
+ self.eq(len(core.cache_fifo), 0)
+
def test_cortex_reqstor(self):
with s_cortex.openurl('ram://') as core:
self.raises(BadPropValu, core.formTufoByProp, 'foo:bar', True)
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 1
} | 0.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"nose",
"coverage",
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
cffi==1.15.1
coverage==6.2
cryptography==40.0.2
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
lmdb==1.6.2
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
msgpack-python==0.5.6
nose==1.3.7
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pycparser==2.21
pyOpenSSL==23.2.0
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
-e git+https://github.com/vertexproject/synapse.git@82966be7ae8b614ae15eb42b3bf7b8306e5604bf#egg=synapse
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tornado==6.1
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
xxhash==3.2.0
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: synapse
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- cffi==1.15.1
- coverage==6.2
- cryptography==40.0.2
- lmdb==1.6.2
- msgpack-python==0.5.6
- nose==1.3.7
- pycparser==2.21
- pyopenssl==23.2.0
- tornado==6.1
- xxhash==3.2.0
prefix: /opt/conda/envs/synapse
| [
"synapse/tests/test_cache.py::CacheTest::test_refdict",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_caching_disable"
]
| []
| [
"synapse/tests/test_cache.py::CacheTest::test_cache_clearing",
"synapse/tests/test_cache.py::CacheTest::test_cache_defval",
"synapse/tests/test_cache.py::CacheTest::test_cache_fini",
"synapse/tests/test_cache.py::CacheTest::test_cache_fixed",
"synapse/tests/test_cache.py::CacheTest::test_cache_magic",
"synapse/tests/test_cache.py::CacheTest::test_cache_miss",
"synapse/tests/test_cache.py::CacheTest::test_cache_set_maxtime",
"synapse/tests/test_cache.py::CacheTest::test_cache_timeout",
"synapse/tests/test_cache.py::CacheTest::test_cache_tufo",
"synapse/tests/test_cache.py::CacheTest::test_cache_tufo_prop",
"synapse/tests/test_cache.py::CacheTest::test_keycache_lookup",
"synapse/tests/test_cache.py::CacheTest::test_ondem_add",
"synapse/tests/test_cache.py::CacheTest::test_ondem_class",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_addmodel",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_by_type",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_bytype",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_caching",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_caching_add_tufo",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_caching_atlimit",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_caching_del_tufo",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_caching_new",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_caching_oneref",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_caching_set",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_caching_tags",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_caching_under_limit",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_choptag",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_comp",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_dict",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_enforce",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_events",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_fire_set",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_getbytag",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_ingest",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_isnew",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_lmdb",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_local",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_minmax",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_minmax_epoch",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_modlrevs",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_modlvers",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_module",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_module_datamodel_migration",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_module_datamodel_migration_persistent",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_norm_fail",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_notguidform",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_ram",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_ramhost",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_ramtyperange",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_reqstor",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_rev0",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_savefd",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_seed",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_seq",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_splicefd",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_splicepump",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_splices",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_splices_errs",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_sqlite3",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_stats",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_tag_ival",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_tagform",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_tags",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_tlib_persistence",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_tufo_by_default",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_tufo_del",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_tufo_list",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_tufo_pop",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_tufo_setprop",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_tufo_setprops",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_tufo_tag",
"synapse/tests/test_cortex.py::CortexTest::test_cortex_xact_deadlock"
]
| []
| Apache License 2.0 | 1,429 | [
"synapse/lib/cache.py"
]
| [
"synapse/lib/cache.py"
]
|
|
vertexproject__synapse-296 | 2cf04d94d938409b246e89d1aecefe8ed0feb577 | 2017-07-03 19:04:18 | 6f5fc661a88b8cc3f4befb2c9c7ddcebf0b89ba0 | diff --git a/synapse/cores/common.py b/synapse/cores/common.py
index 69634180c..47202a198 100644
--- a/synapse/cores/common.py
+++ b/synapse/cores/common.py
@@ -2208,7 +2208,7 @@ class Cortex(EventBus, DataModel, Runtime, Configable, s_ingest.IngestApi):
valu = inprops.get(name)
prop = form + ':' + name
- if not self._okSetProp(prop):
+ if not self.isSetPropOk(prop):
inprops.pop(name, None)
continue
@@ -2328,8 +2328,27 @@ class Cortex(EventBus, DataModel, Runtime, Configable, s_ingest.IngestApi):
return node
- def _okSetProp(self, prop):
- # check for enforcement and validity of a full prop name
+ def isSetPropOk(self, prop):
+ '''
+ Check for enforcement and validity of a full prop name.
+
+ This can be used to determine if a property name may be set on a node,
+ given the data models currently loaded in a Cortex.
+
+ Args:
+ prop (str): Full property name to check.
+
+ Examples:
+ Check if a value is valid before calling a function.::
+
+ prop = 'foo:bar:baz'
+ if core.isSetPropOk(prop):
+ doSomething(...)
+
+ Returns:
+ bool: True if the property can be set on the node; False if it cannot be set.
+ '''
+ #
if not self.enforce:
return True
@@ -2339,10 +2358,17 @@ class Cortex(EventBus, DataModel, Runtime, Configable, s_ingest.IngestApi):
'''
Set ( with de-duplication ) the given tufo props.
- Example:
+ Args:
+ tufo ((str, dict)): The tufo to set properties on.
+ **props: Properties to set on the tufo.
- tufo = core.setTufoProps(tufo, woot='hehe', blah=10)
+ Examples:
+ ::
+
+ tufo = core.setTufoProps(tufo, woot='hehe', blah=10)
+ Returns:
+ ((str, dict)): The source tufo, with any updated properties.
'''
reqiden(tufo)
# add tufo form prefix to props
@@ -2398,7 +2424,7 @@ class Cortex(EventBus, DataModel, Runtime, Configable, s_ingest.IngestApi):
form = tufo[1].get('tufo:form')
prop = form + ':' + prop
- if not self._okSetProp(prop):
+ if not self.isSetPropOk(prop):
return tufo
return self._incTufoProp(tufo, prop, incval=incval)
diff --git a/synapse/lib/storm.py b/synapse/lib/storm.py
index 1254253f7..376aee35f 100644
--- a/synapse/lib/storm.py
+++ b/synapse/lib/storm.py
@@ -4,6 +4,7 @@ import re
import time
import fnmatch
import logging
+import collections
import synapse.common as s_common
import synapse.compat as s_compat
@@ -1121,12 +1122,58 @@ class Runtime(Configable):
# TODO: use edits here for requested delete
def _stormOperSetProp(self, query, oper):
+ # Coverage of this function is affected by the following issue:
+ # https://bitbucket.org/ned/coveragepy/issues/198/continue-marked-as-not-covered
args = oper[1].get('args')
props = dict(oper[1].get('kwlist'))
core = self.getStormCore()
- [core.setTufoProps(node, **props) for node in query.data()]
+ formnodes = collections.defaultdict(list)
+ formprops = collections.defaultdict(dict)
+
+ for node in query.data():
+ formnodes[node[1].get('tufo:form')].append(node)
+
+ forms = tuple(formnodes.keys())
+
+ for prop, valu in props.items():
+
+ if prop.startswith(':'):
+ valid = False
+ _prop = prop[1:]
+ # Check against every lifted form, since we may have a relative prop
+ # Which is valid against
+ for form in forms:
+ _fprop = form + prop
+ if core.isSetPropOk(_fprop):
+ formprops[form][_prop] = valu
+ valid = True
+ if not valid:
+ mesg = 'Relative prop is not valid on any lifted forms.'
+ raise s_common.BadSyntaxError(name=prop, mesg=mesg)
+ continue # pragma: no cover
+
+ if prop.startswith(forms):
+ valid = False
+ for form in forms:
+ if prop.startswith(form + ':') and core.isSetPropOk(prop):
+ _prop = prop[len(form) + 1:]
+ formprops[form][_prop] = valu
+ valid = True
+ break
+ if not valid:
+ mesg = 'Full prop is not valid on any lifted forms.'
+ raise s_common.BadSyntaxError(name=prop, mesg=mesg)
+ continue # pragma: no cover
+
+ mesg = 'setprop operator requires props to start with relative or full prop names.'
+ raise s_common.BadSyntaxError(name=prop, mesg=mesg)
+
+ for form, nodes in formnodes.items():
+ props = formprops.get(form)
+ if props:
+ [core.setTufoProps(node, **props) for node in nodes]
def _iterPropTags(self, props, tags):
for prop in props:
diff --git a/synapse/lib/syntax.py b/synapse/lib/syntax.py
index b0f8c2f49..e12e62122 100644
--- a/synapse/lib/syntax.py
+++ b/synapse/lib/syntax.py
@@ -611,7 +611,7 @@ def parse(text, off=0):
valu, off = parse_macro_valu(text, off + 1)
if prop[0] == ':':
- kwargs = {prop[1:]: valu}
+ kwargs = {prop: valu}
ret.append(oper('setprop', **kwargs))
continue
| setprop() should take both full props and rel props
```
setprop(foo:bar:baz=10)
- or -
setprop(:baz=10)
```
rather than
```
setprop(baz=10)
``` | vertexproject/synapse | diff --git a/synapse/tests/test_lib_storm.py b/synapse/tests/test_lib_storm.py
index e4236c6b4..1cede3be2 100644
--- a/synapse/tests/test_lib_storm.py
+++ b/synapse/tests/test_lib_storm.py
@@ -44,13 +44,62 @@ class StormTest(SynTest):
with s_cortex.openurl('ram:///') as core:
core.setConfOpt('enforce', 1)
+ # relative key/val syntax, explicitly relative vals
+ node = core.formTufoByProp('inet:netuser', 'vertex.link/pennywise')
+ node = core.formTufoByProp('inet:netuser', 'vertex.link/visi')
+ node = core.eval('inet:netuser=vertex.link/pennywise setprop(:realname="Robert Gray")')[0]
+
+ self.eq(node[1].get('inet:netuser'), 'vertex.link/pennywise')
+ self.eq(node[1].get('inet:netuser:realname'), 'robert gray')
+
+ # Full prop val syntax
+ node = core.eval('inet:netuser=vertex.link/pennywise setprop(inet:netuser:signup="1970-01-01")')[0]
+ self.eq(node[1].get('inet:netuser'), 'vertex.link/pennywise')
+ self.eq(node[1].get('inet:netuser:signup'), 0)
+
+ # Combined syntax using both relative props and full props together
+ cmd = 'inet:netuser=vertex.link/pennywise setprop(:seen:min="2000", :seen:max="2017", ' \
+ 'inet:netuser:[email protected], inet:netuser:signup:ipv4="127.0.0.1")'
+ node = core.eval(cmd)[0]
+ self.nn(node[1].get('inet:netuser:seen:min'))
+ self.nn(node[1].get('inet:netuser:seen:max'))
+ self.nn(node[1].get('inet:netuser:signup:ipv4'))
+ self.eq(node[1].get('inet:netuser:email'), '[email protected]')
+
+ # old / bad syntax fails
+ # kwlist key/val syntax is no longer valid in setprop()
node = core.formTufoByProp('inet:fqdn', 'vertex.link')
-
- node = core.eval('inet:fqdn=vertex.link setprop(created="2016-05-05",updated="2017/05/05")')[0]
-
- self.eq(node[1].get('inet:fqdn'), 'vertex.link')
- self.eq(node[1].get('inet:fqdn:created'), 1462406400000)
- self.eq(node[1].get('inet:fqdn:updated'), 1493942400000)
+ bad_cmd = 'inet:fqdn=vertex.link setprop(created="2016-05-05",updated="2017/05/05")'
+ self.raises(BadSyntaxError, core.eval, bad_cmd)
+ # a full prop which isn't valid for the node is bad
+ bad_cmd = 'inet:fqdn=vertex.link setprop(inet:fqdn:typocreated="2016-05-05")'
+ self.raises(BadSyntaxError, core.eval, bad_cmd)
+ # a rel prop which isn't valid for the node is bad
+ bad_cmd = 'inet:fqdn=vertex.link setprop(:typocreated="2016-05-05")'
+ self.raises(BadSyntaxError, core.eval, bad_cmd)
+
+ # test possible form confusion
+ modl = {
+ 'types': (
+ ('foo:bar', {'subof': 'str'}),
+ ('foo:barbaz', {'subof': 'str'})
+ ),
+ 'forms': (
+ ('foo:bar', {'ptype': 'str'}, [
+ ('blah', {'ptype': 'str'})
+ ]),
+ ('foo:barbaz', {'ptype': 'str'}, [
+ ('blah', {'ptype': 'str'})
+ ]),
+ )
+ }
+ core.addDataModel('form_confusion', modl)
+ node = core.formTufoByProp('foo:bar', 'hehe')
+ core.addTufoTag(node, 'confusion')
+ node = core.formTufoByProp('foo:barbaz', 'haha')
+ core.addTufoTag(node, 'confusion')
+ node = core.eval('''#confusion setprop(foo:barbaz:blah=duck) +foo:barbaz''')[0]
+ self.eq(node[1].get('foo:barbaz:blah'), 'duck')
def test_storm_filt_regex(self):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 3
} | 0.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "tornado>=3.2.2 cryptography>=1.7.2 pyOpenSSL>=16.2.0 msgpack-python>=0.4.2 xxhash>=1.0.1 lmdb>=0.92",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
cffi @ file:///tmp/build/80754af9/cffi_1625814693874/work
cryptography @ file:///tmp/build/80754af9/cryptography_1635366128178/work
importlib-metadata==4.8.3
iniconfig==1.1.1
lmdb==1.6.2
msgpack @ file:///tmp/build/80754af9/msgpack-python_1612287171716/work
msgpack-python==0.5.6
packaging==21.3
pluggy==1.0.0
py==1.11.0
pycparser @ file:///tmp/build/80754af9/pycparser_1636541352034/work
pyOpenSSL @ file:///opt/conda/conda-bld/pyopenssl_1643788558760/work
pyparsing==3.1.4
pytest==7.0.1
-e git+https://github.com/vertexproject/synapse.git@2cf04d94d938409b246e89d1aecefe8ed0feb577#egg=synapse
tomli==1.2.3
tornado @ file:///tmp/build/80754af9/tornado_1606942266872/work
typing_extensions==4.1.1
xxhash==3.2.0
zipp==3.6.0
| name: synapse
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- cffi=1.14.6=py36h400218f_0
- cryptography=35.0.0=py36hd23ed53_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- pycparser=2.21=pyhd3eb1b0_0
- pyopenssl=22.0.0=pyhd3eb1b0_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tornado=6.1=py36h27cfd23_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- lmdb==1.6.2
- msgpack-python==0.5.6
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- tomli==1.2.3
- typing-extensions==4.1.1
- xxhash==3.2.0
- zipp==3.6.0
prefix: /opt/conda/envs/synapse
| [
"synapse/tests/test_lib_storm.py::StormTest::test_storm_setprop"
]
| []
| [
"synapse/tests/test_lib_storm.py::StormTest::test_storm_addnode",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_addtag",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_alltag",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_cmpr_norm",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_delnode",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_delnode_caching",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_deltag",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_edit_end",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_editmode",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_filt_regex",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_lift",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_lifts_by",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_pivot",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_refs",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_show_help",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_tag_fromtag",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_tag_glob",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_tag_ival",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_tag_jointag",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_tag_query",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_tag_totag",
"synapse/tests/test_lib_storm.py::LimitTest::test_limit_behavior",
"synapse/tests/test_lib_storm.py::LimitTest::test_limit_behavior_negatives",
"synapse/tests/test_lib_storm.py::LimitTest::test_limit_default"
]
| []
| Apache License 2.0 | 1,430 | [
"synapse/lib/syntax.py",
"synapse/cores/common.py",
"synapse/lib/storm.py"
]
| [
"synapse/lib/syntax.py",
"synapse/cores/common.py",
"synapse/lib/storm.py"
]
|
|
openmrslab__suspect-65 | a70490c09196d7996ecfd99401eb68765ae25a67 | 2017-07-03 21:19:43 | 820e897294d90e08c4b91be7289e4ee9ebc6d009 | coveralls:
[](https://coveralls.io/builds/12238080)
Coverage increased (+0.2%) to 78.728% when pulling **2e5c6eaa68957507ad19205cfbc872dbb3011ace on 62_voxel_mask** into **a70490c09196d7996ecfd99401eb68765ae25a67 on master**.
| diff --git a/suspect/image/__init__.py b/suspect/image/__init__.py
index 14e78f7..cffdc25 100644
--- a/suspect/image/__init__.py
+++ b/suspect/image/__init__.py
@@ -1,1 +1,2 @@
-from suspect.image._image import *
\ No newline at end of file
+from suspect.image._image import *
+from ._mask import create_mask
\ No newline at end of file
diff --git a/suspect/image/_mask.py b/suspect/image/_mask.py
new file mode 100644
index 0000000..e31cff1
--- /dev/null
+++ b/suspect/image/_mask.py
@@ -0,0 +1,46 @@
+import numpy as np
+
+
+def create_mask(source_image, ref_image, voxels=None):
+ """
+ Creates a volumetric mask for the source_image voxel in the coordinate
+ system of the ref_image volume.
+
+ Parameters
+ ----------
+ source_image : MRSBase
+ The spectroscopy volume from which to create the mask.
+ ref_image : ImageBase
+ The reference image volume which defines the coordinate system for
+ the mask.
+
+ Returns
+ -------
+ numpy.ndarray
+ Boolean array with the same shape as ref_image, True for all voxels
+ inside source_image, false for all others.
+ """
+
+ # create a grid of coordinates for all points in the ref_image
+ # the ref_image has coord index order [z, y, x] so we reverse the shape
+ # to get the indices in (x, y, z) format for the coordinate conversion
+ ref_coords = np.mgrid[[range(0, size) for size in ref_image.shape[::-1]]]
+
+ # mgrid puts the (x, y, z) tuple at the front, we want it at the back
+ ref_coords = np.moveaxis(ref_coords, 0, -1)
+
+ # now we can apply to_scanner and from_scanner to convert from ref coords
+ # into source coords
+ scanner_coords = ref_image.to_scanner(ref_coords)
+ source_coords = source_image.from_scanner(scanner_coords)
+
+ # now check whether the source_coords are in the selected voxel
+ # TODO for now, we assume single voxel data until issue 50 is resolved
+
+ # have to transpose the result to get it to match the shape of ref_image
+ return np.all((source_coords[..., 0] < 0.5,
+ source_coords[..., 0] >= -0.5,
+ source_coords[..., 1] >= -0.5,
+ source_coords[..., 2] >= -0.5,
+ source_coords[..., 1] < 0.5,
+ source_coords[..., 2] < 0.5), axis=0).T
| ENH: Mask function for voxel on image
It should be possible to call a `create_mask()` function which produces a binary mask showing which voxels of a reference image are inside a spectroscopy voxel. In the default case this would be for all the voxels in the spectroscopy, but it should also be possible to specify a specific voxel or list of voxels to be used instead, so that masks for individual CSI voxels are also possible. | openmrslab/suspect | diff --git a/tests/test_mrs/test_image.py b/tests/test_mrs/test_image.py
new file mode 100644
index 0000000..70fca64
--- /dev/null
+++ b/tests/test_mrs/test_image.py
@@ -0,0 +1,23 @@
+import suspect
+
+import suspect._transforms
+
+import numpy as np
+
+
+def test_simple_mask():
+ source_transform = suspect._transforms.transformation_matrix([1, 0, 0],
+ [0, 1, 0],
+ [5, 0, 0],
+ [10, 10, 10])
+ ref_transform = suspect._transforms.transformation_matrix([1, 0, 0],
+ [0, 1, 0],
+ [-10, -5, -5],
+ [1, 1, 1])
+ source_volume = suspect.MRSBase(np.ones(1024), 1e-3, 123, transform=source_transform)
+ ref_volume = suspect.base.ImageBase(np.zeros((20, 20, 20)), transform=ref_transform)
+ mask = suspect.image.create_mask(source_volume, ref_volume)
+ assert ref_volume.shape == mask.shape
+ mask_target = np.zeros_like(ref_volume)
+ mask_target[0:10, 0:10, 10:20] = 1
+ np.testing.assert_equal(mask_target.astype('bool'), mask)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_added_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 1
},
"num_modified_files": 1
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.5",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
asteval==0.9.26
async-generator==1.10
attrs==22.2.0
Babel==2.11.0
backcall==0.2.0
bleach==4.1.0
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
decorator==5.1.1
defusedxml==0.7.1
docutils==0.18.1
entrypoints==0.4
future==1.0.0
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
iniconfig==1.1.1
ipykernel==5.5.6
ipython==7.16.3
ipython-genutils==0.2.0
jedi==0.17.2
Jinja2==3.0.3
jsonschema==3.2.0
jupyter-client==7.1.2
jupyter-core==4.9.2
jupyterlab-pygments==0.1.2
lmfit==1.0.3
MarkupSafe==2.0.1
mistune==0.8.4
nbclient==0.5.9
nbconvert==6.0.7
nbformat==5.1.3
nbsphinx==0.8.8
nest-asyncio==1.6.0
numpy==1.19.5
packaging==21.3
pandocfilters==1.5.1
parse==1.20.2
Parsley==1.3
parso==0.7.1
pexpect==4.9.0
pickleshare==0.7.5
pluggy==1.0.0
prompt-toolkit==3.0.36
ptyprocess==0.7.0
py==1.11.0
pydicom==2.3.1
Pygments==2.14.0
pyparsing==3.1.4
pyrsistent==0.18.0
pytest==7.0.1
pytest-cov==4.0.0
python-dateutil==2.9.0.post0
pytz==2025.2
PyWavelets==1.1.1
pyzmq==25.1.2
requests==2.27.1
scipy==1.5.4
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
-e git+https://github.com/openmrslab/suspect.git@a70490c09196d7996ecfd99401eb68765ae25a67#egg=suspect
testpath==0.6.0
tomli==1.2.3
tornado==6.1
traitlets==4.3.3
typing_extensions==4.1.1
uncertainties==3.1.7
urllib3==1.26.20
wcwidth==0.2.13
webencodings==0.5.1
zipp==3.6.0
| name: suspect
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- asteval==0.9.26
- async-generator==1.10
- attrs==22.2.0
- babel==2.11.0
- backcall==0.2.0
- bleach==4.1.0
- charset-normalizer==2.0.12
- coverage==6.2
- decorator==5.1.1
- defusedxml==0.7.1
- docutils==0.18.1
- entrypoints==0.4
- future==1.0.0
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- ipykernel==5.5.6
- ipython==7.16.3
- ipython-genutils==0.2.0
- jedi==0.17.2
- jinja2==3.0.3
- jsonschema==3.2.0
- jupyter-client==7.1.2
- jupyter-core==4.9.2
- jupyterlab-pygments==0.1.2
- lmfit==1.0.3
- markupsafe==2.0.1
- mistune==0.8.4
- nbclient==0.5.9
- nbconvert==6.0.7
- nbformat==5.1.3
- nbsphinx==0.8.8
- nest-asyncio==1.6.0
- numpy==1.19.5
- packaging==21.3
- pandocfilters==1.5.1
- parse==1.20.2
- parsley==1.3
- parso==0.7.1
- pexpect==4.9.0
- pickleshare==0.7.5
- pluggy==1.0.0
- prompt-toolkit==3.0.36
- ptyprocess==0.7.0
- py==1.11.0
- pydicom==2.3.1
- pygments==2.14.0
- pyparsing==3.1.4
- pyrsistent==0.18.0
- pytest==7.0.1
- pytest-cov==4.0.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pywavelets==1.1.1
- pyzmq==25.1.2
- requests==2.27.1
- scipy==1.5.4
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- testpath==0.6.0
- tomli==1.2.3
- tornado==6.1
- traitlets==4.3.3
- typing-extensions==4.1.1
- uncertainties==3.1.7
- urllib3==1.26.20
- wcwidth==0.2.13
- webencodings==0.5.1
- zipp==3.6.0
prefix: /opt/conda/envs/suspect
| [
"tests/test_mrs/test_image.py::test_simple_mask"
]
| []
| []
| []
| MIT License | 1,431 | [
"suspect/image/_mask.py",
"suspect/image/__init__.py"
]
| [
"suspect/image/_mask.py",
"suspect/image/__init__.py"
]
|
jaywink__federation-90 | 5580b5143f3fb536d791ec74c32a92fb03d5c7be | 2017-07-04 22:00:24 | 5580b5143f3fb536d791ec74c32a92fb03d5c7be | diff --git a/CHANGELOG.md b/CHANGELOG.md
index 63c2f0f..3ba3370 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -2,6 +2,10 @@
## [unreleased]
+### Added
+* New style Diaspora private encrypted JSON payloads are now supported in the receiving side. Outbound private Diaspora payloads are still sent as legacy encrypted payloads. ([issue](https://github.com/jaywink/federation/issues/83))
+ * No additional changes need to be made when calling `handle_receive` from your task processing. Just pass in the full received XML or JSON payload as a string with recipient user object as before.
+
### Fixed
* Fix getting sender from a combination of legacy Diaspora encrypted payload and new entity names (for example `author`). This combination probably only existed in this library.
* Correctly extend entity `_children`. Certain Diaspora payloads caused `_children` for an entity to be written over by an empty list, causing for example status message photos to not be saved. Correctly do an extend on it. ([issue](https://github.com/jaywink/federation/issues/89))
@@ -9,6 +13,7 @@
### Removed
* `Post.photos` entity attribute was never used by any code and has been removed. Child entities of type `Image` are stored in the `Post._children` as before.
+* Removed deprecated user private key lookup using `user.key` in Diaspora receive processing. Passed in `user` objects must now have a `private_key` attribute.
## [0.12.0] - 2017-05-22
diff --git a/federation/protocols/base.py b/federation/protocols/base.py
index 0f48de3..e298ce0 100644
--- a/federation/protocols/base.py
+++ b/federation/protocols/base.py
@@ -37,7 +37,7 @@ class BaseProtocol(object):
"""
raise NotImplementedError("Implement in subclass")
- def receive(self, payload, user=None, sender_key_fetcher=None, *args, **kwargs):
+ def receive(self, payload, user=None, sender_key_fetcher=None):
"""Receive a payload.
Args:
diff --git a/federation/protocols/diaspora/encrypted.py b/federation/protocols/diaspora/encrypted.py
new file mode 100644
index 0000000..ccf23a4
--- /dev/null
+++ b/federation/protocols/diaspora/encrypted.py
@@ -0,0 +1,30 @@
+import json
+from base64 import b64decode
+
+from Crypto.Cipher import PKCS1_v1_5, AES
+from lxml import etree
+
+
+def pkcs7_unpad(data):
+ """Remove the padding bytes that were added at point of encryption."""
+ if isinstance(data, str):
+ return data[0:-ord(data[-1])]
+ else:
+ return data[0:-data[-1]]
+
+
+class EncryptedPayload:
+ """Diaspora encrypted JSON payloads."""
+
+ @staticmethod
+ def decrypt(payload, private_key):
+ """Decrypt an encrypted JSON payload and return the Magic Envelope document inside."""
+ cipher = PKCS1_v1_5.new(private_key)
+ aes_key_str = cipher.decrypt(b64decode(payload.get("aes_key")), sentinel=None)
+ aes_key = json.loads(aes_key_str.decode("utf-8"))
+ key = b64decode(aes_key.get("key"))
+ iv = b64decode(aes_key.get("iv"))
+ encrypted_magic_envelope = b64decode(payload.get("encrypted_magic_envelope"))
+ encrypter = AES.new(key, AES.MODE_CBC, iv)
+ content = encrypter.decrypt(encrypted_magic_envelope)
+ return etree.fromstring(pkcs7_unpad(content))
diff --git a/federation/protocols/diaspora/magic_envelope.py b/federation/protocols/diaspora/magic_envelope.py
index ff9cc13..46a9a08 100644
--- a/federation/protocols/diaspora/magic_envelope.py
+++ b/federation/protocols/diaspora/magic_envelope.py
@@ -8,7 +8,7 @@ from lxml import etree
NAMESPACE = "http://salmon-protocol.org/ns/magic-env"
-class MagicEnvelope():
+class MagicEnvelope:
"""Diaspora protocol magic envelope.
See: http://diaspora.github.io/diaspora_federation/federation/magicsig.html
diff --git a/federation/protocols/diaspora/protocol.py b/federation/protocols/diaspora/protocol.py
index e65d3ef..73c417e 100644
--- a/federation/protocols/diaspora/protocol.py
+++ b/federation/protocols/diaspora/protocol.py
@@ -1,6 +1,5 @@
import json
import logging
-import warnings
from base64 import b64decode, urlsafe_b64decode, b64encode, urlsafe_b64encode
from urllib.parse import unquote_plus
@@ -13,7 +12,9 @@ from lxml import etree
from federation.exceptions import EncryptedMessageError, NoSenderKeyFoundError, SignatureVerificationError
from federation.protocols.base import BaseProtocol
+from federation.protocols.diaspora.encrypted import EncryptedPayload
from federation.protocols.diaspora.magic_envelope import MagicEnvelope
+from federation.utils.text import decode_if_bytes
logger = logging.getLogger("federation")
@@ -29,7 +30,7 @@ def identify_payload(payload):
"""
# Private encrypted JSON payload
try:
- data = json.loads(payload)
+ data = json.loads(decode_if_bytes(payload))
if "encrypted_magic_envelope" in data:
return True
except Exception:
@@ -55,17 +56,36 @@ class Protocol(BaseProtocol):
Mostly taken from Pyaspora (https://github.com/lukeross/pyaspora).
"""
- def receive(self, payload, user=None, sender_key_fetcher=None, skip_author_verification=False, *args, **kwargs):
+ def __init__(self):
+ super().__init__()
+ self.encrypted = self.legacy = False
+
+ def get_json_payload_magic_envelope(self, payload):
+ """Encrypted JSON payload"""
+ private_key = self._get_user_key(self.user)
+ return EncryptedPayload.decrypt(payload=payload, private_key=private_key)
+
+ def store_magic_envelope_doc(self, payload):
+ """Get the Magic Envelope, trying JSON first."""
+ try:
+ json_payload = json.loads(decode_if_bytes(payload))
+ except ValueError:
+ # XML payload
+ xml = unquote_plus(payload)
+ xml = xml.lstrip().encode("utf-8")
+ logger.debug("diaspora.protocol.store_magic_envelope_doc: xml payload: %s", xml)
+ self.doc = etree.fromstring(xml)
+ else:
+ logger.debug("diaspora.protocol.store_magic_envelope_doc: json payload: %s", json_payload)
+ self.doc = self.get_json_payload_magic_envelope(json_payload)
+
+ def receive(self, payload, user=None, sender_key_fetcher=None, skip_author_verification=False):
"""Receive a payload.
For testing purposes, `skip_author_verification` can be passed. Authorship will not be verified."""
self.user = user
self.get_contact_key = sender_key_fetcher
- # Prepare payload
- xml = unquote_plus(payload)
- xml = xml.lstrip().encode("utf-8")
- logger.debug("diaspora.protocol.receive: xml content: %s", xml)
- self.doc = etree.fromstring(xml)
+ self.store_magic_envelope_doc(payload)
# Check for a legacy header
self.find_header()
# Open payload and get actual message
@@ -78,17 +98,11 @@ class Protocol(BaseProtocol):
return self.sender_handle, self.content
def _get_user_key(self, user):
- if not hasattr(self.user, "private_key") or not self.user.private_key:
- if hasattr(self.user, "key") and self.user.key:
- warnings.warn("Using `key` in user object for private key has been deprecated. Please "
- "have available `private_key` instead. Usage of `key` will be removed after 0.8.0.",
- DeprecationWarning)
- return self.user.key
+ if not getattr(self.user, "private_key", None):
raise EncryptedMessageError("Cannot decrypt private message without user key")
return self.user.private_key
def find_header(self):
- self.encrypted = self.legacy = False
self.header = self.doc.find(".//{"+PROTOCOL_NS+"}header")
if self.header != None:
# Legacy public header found
diff --git a/federation/utils/text.py b/federation/utils/text.py
new file mode 100644
index 0000000..310a5e1
--- /dev/null
+++ b/federation/utils/text.py
@@ -0,0 +1,5 @@
+def decode_if_bytes(text):
+ try:
+ return text.decode("utf-8")
+ except AttributeError:
+ return text
| Add support for receiving new style Diaspora private message payloads
See https://github.com/diaspora/diaspora_federation/issues/30 | jaywink/federation | diff --git a/federation/tests/protocols/diaspora/test_encrypted.py b/federation/tests/protocols/diaspora/test_encrypted.py
new file mode 100644
index 0000000..5d10988
--- /dev/null
+++ b/federation/tests/protocols/diaspora/test_encrypted.py
@@ -0,0 +1,33 @@
+from unittest.mock import patch, Mock
+
+from Crypto.Cipher import AES
+
+from federation.protocols.diaspora.encrypted import pkcs7_unpad, EncryptedPayload
+
+
+def test_pkcs7_unpad():
+ assert pkcs7_unpad(b"foobar\x02\x02") == b"foobar"
+ assert pkcs7_unpad("foobar\x02\x02") == "foobar"
+
+
+class TestEncryptedPayload:
+ @patch("federation.protocols.diaspora.encrypted.PKCS1_v1_5.new")
+ @patch("federation.protocols.diaspora.encrypted.AES.new")
+ @patch("federation.protocols.diaspora.encrypted.pkcs7_unpad", side_effect=lambda x: x)
+ @patch("federation.protocols.diaspora.encrypted.b64decode", side_effect=lambda x: x)
+ def test_decrypt(self, mock_decode, mock_unpad, mock_aes, mock_pkcs1):
+ mock_decrypt = Mock(return_value=b'{"iv": "foo", "key": "bar"}')
+ mock_pkcs1.return_value = Mock(decrypt=mock_decrypt)
+ mock_encrypter = Mock(return_value="<foo>bar</foo>")
+ mock_aes.return_value = Mock(decrypt=mock_encrypter)
+ doc = EncryptedPayload.decrypt(
+ {"aes_key": '{"iv": "foo", "key": "bar"}', "encrypted_magic_envelope": "magically encrypted"},
+ "private_key",
+ )
+ mock_pkcs1.assert_called_once_with("private_key")
+ mock_decrypt.assert_called_once_with('{"iv": "foo", "key": "bar"}', sentinel=None)
+ assert mock_decode.call_count == 4
+ mock_aes.assert_called_once_with("bar", AES.MODE_CBC, "foo")
+ mock_encrypter.assert_called_once_with("magically encrypted")
+ assert doc.tag == "foo"
+ assert doc.text == "bar"
diff --git a/federation/tests/protocols/diaspora/test_protocol.py b/federation/tests/protocols/diaspora/test_protocol.py
index 3fd5a37..f33a31a 100644
--- a/federation/tests/protocols/diaspora/test_protocol.py
+++ b/federation/tests/protocols/diaspora/test_protocol.py
@@ -155,3 +155,24 @@ class TestDiasporaProtocol(DiasporaTestBase):
data = protocol.build_send(entity, from_user)
mock_init_message.assert_called_once_with(mock_entity_xml, from_user.handle, from_user.private_key)
assert data == {"xml": "xmldata"}
+
+ @patch("federation.protocols.diaspora.protocol.EncryptedPayload.decrypt")
+ def test_get_json_payload_magic_envelope(self, mock_decrypt):
+ protocol = Protocol()
+ protocol.user = MockUser()
+ protocol.get_json_payload_magic_envelope("payload")
+ mock_decrypt.assert_called_once_with(payload="payload", private_key="foobar")
+
+ @patch.object(Protocol, "get_json_payload_magic_envelope", return_value=etree.fromstring("<foo>bar</foo>"))
+ def test_store_magic_envelope_doc_json_payload(self, mock_store):
+ protocol = Protocol()
+ protocol.store_magic_envelope_doc('{"foo": "bar"}')
+ mock_store.assert_called_once_with({"foo": "bar"})
+ assert protocol.doc.tag == "foo"
+ assert protocol.doc.text == "bar"
+
+ def test_store_magic_envelope_doc_xml_payload(self):
+ protocol = Protocol()
+ protocol.store_magic_envelope_doc("<foo>bar</foo>")
+ assert protocol.doc.tag == "foo"
+ assert protocol.doc.text == "bar"
diff --git a/federation/tests/utils/test_text.py b/federation/tests/utils/test_text.py
new file mode 100644
index 0000000..7f629d3
--- /dev/null
+++ b/federation/tests/utils/test_text.py
@@ -0,0 +1,6 @@
+from federation.utils.text import decode_if_bytes
+
+
+def test_decode_if_bytes():
+ assert decode_if_bytes(b"foobar") == "foobar"
+ assert decode_if_bytes("foobar") == "foobar"
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 3,
"test_score": 2
},
"num_modified_files": 4
} | 0.12 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-warnings"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc libxml2-dev libxslt1-dev"
],
"python": "3.6",
"reqs_path": [
"dev-requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
arrow==1.2.3
attrs==22.2.0
Babel==2.11.0
certifi==2021.5.30
charset-normalizer==2.0.12
codecov==2.1.13
colorama==0.4.5
commonmark==0.9.1
coverage==6.2
cssselect==1.1.0
dirty-validators==0.5.4
docutils==0.18.1
factory-boy==3.2.1
Faker==14.2.1
-e git+https://github.com/jaywink/federation.git@5580b5143f3fb536d791ec74c32a92fb03d5c7be#egg=federation
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
iniconfig==1.1.1
isodate==0.6.1
Jinja2==3.0.3
jsonschema==3.2.0
livereload==2.6.3
lxml==5.3.1
MarkupSafe==2.0.1
packaging==21.3
pluggy==1.0.0
py==1.11.0
pycrypto==2.6.1
Pygments==2.14.0
pyparsing==3.1.4
pyrsistent==0.18.0
pytest==7.0.1
pytest-cov==4.0.0
pytest-warnings==0.3.1
python-dateutil==2.9.0.post0
python-xrd==0.1
pytz==2025.2
recommonmark==0.7.1
requests==2.27.1
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-autobuild==2021.3.14
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
tomli==1.2.3
tornado==6.1
typing_extensions==4.1.1
urllib3==1.26.20
zipp==3.6.0
| name: federation
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- arrow==1.2.3
- attrs==22.2.0
- babel==2.11.0
- charset-normalizer==2.0.12
- codecov==2.1.13
- colorama==0.4.5
- commonmark==0.9.1
- coverage==6.2
- cssselect==1.1.0
- dirty-validators==0.5.4
- docutils==0.18.1
- factory-boy==3.2.1
- faker==14.2.1
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- isodate==0.6.1
- jinja2==3.0.3
- jsonschema==3.2.0
- livereload==2.6.3
- lxml==5.3.1
- markupsafe==2.0.1
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pycrypto==2.6.1
- pygments==2.14.0
- pyparsing==3.1.4
- pyrsistent==0.18.0
- pytest==7.0.1
- pytest-cov==4.0.0
- pytest-warnings==0.3.1
- python-dateutil==2.9.0.post0
- python-xrd==0.1
- pytz==2025.2
- recommonmark==0.7.1
- requests==2.27.1
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-autobuild==2021.3.14
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- tomli==1.2.3
- tornado==6.1
- typing-extensions==4.1.1
- urllib3==1.26.20
- zipp==3.6.0
prefix: /opt/conda/envs/federation
| [
"federation/tests/protocols/diaspora/test_encrypted.py::test_pkcs7_unpad",
"federation/tests/protocols/diaspora/test_encrypted.py::TestEncryptedPayload::test_decrypt",
"federation/tests/protocols/diaspora/test_protocol.py::TestDiasporaProtocol::test_find_unencrypted_header",
"federation/tests/protocols/diaspora/test_protocol.py::TestDiasporaProtocol::test_find_encrypted_header",
"federation/tests/protocols/diaspora/test_protocol.py::TestDiasporaProtocol::test_receive_unencrypted_returns_sender_and_content",
"federation/tests/protocols/diaspora/test_protocol.py::TestDiasporaProtocol::test_receive_encrypted_returns_sender_and_content",
"federation/tests/protocols/diaspora/test_protocol.py::TestDiasporaProtocol::test_receive_raises_on_encrypted_message_and_no_user",
"federation/tests/protocols/diaspora/test_protocol.py::TestDiasporaProtocol::test_receive_raises_on_encrypted_message_and_no_user_key",
"federation/tests/protocols/diaspora/test_protocol.py::TestDiasporaProtocol::test_receive_raises_if_sender_key_cannot_be_found",
"federation/tests/protocols/diaspora/test_protocol.py::TestDiasporaProtocol::test_get_message_content",
"federation/tests/protocols/diaspora/test_protocol.py::TestDiasporaProtocol::test_identify_payload_with_legacy_diaspora_payload",
"federation/tests/protocols/diaspora/test_protocol.py::TestDiasporaProtocol::test_identify_payload_with_diaspora_public_payload",
"federation/tests/protocols/diaspora/test_protocol.py::TestDiasporaProtocol::test_identify_payload_with_diaspora_encrypted_payload",
"federation/tests/protocols/diaspora/test_protocol.py::TestDiasporaProtocol::test_identify_payload_with_other_payload",
"federation/tests/protocols/diaspora/test_protocol.py::TestDiasporaProtocol::test_get_sender_legacy_returns_sender_in_header",
"federation/tests/protocols/diaspora/test_protocol.py::TestDiasporaProtocol::test_get_sender_legacy_returns_sender_in_content",
"federation/tests/protocols/diaspora/test_protocol.py::TestDiasporaProtocol::test_get_sender_legacy_returns_none_if_no_sender_found",
"federation/tests/protocols/diaspora/test_protocol.py::TestDiasporaProtocol::test_build_send",
"federation/tests/protocols/diaspora/test_protocol.py::TestDiasporaProtocol::test_get_json_payload_magic_envelope",
"federation/tests/protocols/diaspora/test_protocol.py::TestDiasporaProtocol::test_store_magic_envelope_doc_json_payload",
"federation/tests/protocols/diaspora/test_protocol.py::TestDiasporaProtocol::test_store_magic_envelope_doc_xml_payload",
"federation/tests/utils/test_text.py::test_decode_if_bytes"
]
| []
| []
| []
| BSD 3-Clause "New" or "Revised" License | 1,432 | [
"federation/protocols/diaspora/protocol.py",
"federation/protocols/diaspora/encrypted.py",
"federation/utils/text.py",
"federation/protocols/diaspora/magic_envelope.py",
"CHANGELOG.md",
"federation/protocols/base.py"
]
| [
"federation/protocols/diaspora/protocol.py",
"federation/protocols/diaspora/encrypted.py",
"federation/utils/text.py",
"federation/protocols/diaspora/magic_envelope.py",
"CHANGELOG.md",
"federation/protocols/base.py"
]
|
|
inducer__pudb-260 | fef17b6f33da7d03758c150b37cd2f84754aa01d | 2017-07-05 00:13:12 | 3f627ce0f7370ab80bc2496cb3d2364686f10efe | diff --git a/pudb/ui_tools.py b/pudb/ui_tools.py
index 9997cad..a398865 100644
--- a/pudb/ui_tools.py
+++ b/pudb/ui_tools.py
@@ -1,6 +1,6 @@
from __future__ import absolute_import, division, print_function
import urwid
-from urwid.util import _target_encoding
+from urwid.util import _target_encoding, calc_width
# generic urwid helpers -------------------------------------------------------
@@ -14,7 +14,7 @@ def make_canvas(txt, attr, maxcol, fill_attr=None):
# filter out zero-length attrs
line_attr = [(aname, l) for aname, l in line_attr if l > 0]
- diff = maxcol - len(line)
+ diff = maxcol - calc_width(line, 0, len(line))
if diff > 0:
line += " "*diff
line_attr.append((fill_attr, diff))
| "Canvas text is wider than the maxcol specified" with Chinese
The full script is simple:
```
data = "中文"
```
Run it with
```
pudb3 a.py
```
And press "n" I got this:
```
Traceback (most recent call last):
File "/usr/lib/python3.6/site-packages/pudb/__init__.py", line 83, in runscript
dbg._runscript(mainpyfile)
File "/usr/lib/python3.6/site-packages/pudb/debugger.py", line 419, in _runscript
self.run(statement, globals=globals_, locals=locals_)
File "/usr/lib/python3.6/bdb.py", line 431, in run
exec(cmd, globals, locals)
File "<string>", line 1, in <module>
File "a.py", line 1, in <module>
data = "中文"
File "/usr/lib/python3.6/bdb.py", line 52, in trace_dispatch
return self.dispatch_return(frame, arg)
File "/usr/lib/python3.6/bdb.py", line 93, in dispatch_return
self.user_return(frame, arg)
File "/usr/lib/python3.6/site-packages/pudb/debugger.py", line 385, in user_return
self.interaction(frame)
File "/usr/lib/python3.6/site-packages/pudb/debugger.py", line 339, in interaction
show_exc_dialog=show_exc_dialog)
File "/usr/lib/python3.6/site-packages/pudb/debugger.py", line 2079, in call_with_ui
return f(*args, **kwargs)
File "/usr/lib/python3.6/site-packages/pudb/debugger.py", line 2307, in interaction
self.event_loop()
File "/usr/lib/python3.6/site-packages/pudb/debugger.py", line 2265, in event_loop
canvas = toplevel.render(self.size, focus=True)
File "/usr/lib/python3.6/site-packages/urwid/widget.py", line 141, in cached_render
canv = fn(self, size, focus=focus)
File "/usr/lib/python3.6/site-packages/urwid/widget.py", line 1750, in render
canv = get_delegate(self).render(size, focus=focus)
File "/usr/lib/python3.6/site-packages/urwid/widget.py", line 141, in cached_render
canv = fn(self, size, focus=focus)
File "/usr/lib/python3.6/site-packages/urwid/container.py", line 1083, in render
focus and self.focus_part == 'body')
File "/usr/lib/python3.6/site-packages/urwid/widget.py", line 141, in cached_render
canv = fn(self, size, focus=focus)
File "/usr/lib/python3.6/site-packages/urwid/decoration.py", line 225, in render
canv = self._original_widget.render(size, focus=focus)
File "/usr/lib/python3.6/site-packages/urwid/widget.py", line 141, in cached_render
canv = fn(self, size, focus=focus)
File "/usr/lib/python3.6/site-packages/urwid/container.py", line 2085, in render
focus = focus and self.focus_position == i)
File "/usr/lib/python3.6/site-packages/urwid/widget.py", line 141, in cached_render
canv = fn(self, size, focus=focus)
File "/usr/lib/python3.6/site-packages/urwid/widget.py", line 1750, in render
canv = get_delegate(self).render(size, focus=focus)
File "/usr/lib/python3.6/site-packages/urwid/widget.py", line 141, in cached_render
canv = fn(self, size, focus=focus)
File "/usr/lib/python3.6/site-packages/urwid/container.py", line 1526, in render
canv = w.render((maxcol, rows), focus=focus and item_focus)
File "/usr/lib/python3.6/site-packages/urwid/widget.py", line 141, in cached_render
canv = fn(self, size, focus=focus)
File "/usr/lib/python3.6/site-packages/urwid/decoration.py", line 225, in render
canv = self._original_widget.render(size, focus=focus)
File "/usr/lib/python3.6/site-packages/urwid/widget.py", line 141, in cached_render
canv = fn(self, size, focus=focus)
File "/usr/lib/python3.6/site-packages/urwid/container.py", line 1526, in render
canv = w.render((maxcol, rows), focus=focus and item_focus)
File "/usr/lib/python3.6/site-packages/urwid/widget.py", line 141, in cached_render
canv = fn(self, size, focus=focus)
File "/usr/lib/python3.6/site-packages/urwid/decoration.py", line 225, in render
canv = self._original_widget.render(size, focus=focus)
File "/usr/lib/python3.6/site-packages/urwid/widget.py", line 141, in cached_render
canv = fn(self, size, focus=focus)
File "/usr/lib/python3.6/site-packages/urwid/widget.py", line 1750, in render
canv = get_delegate(self).render(size, focus=focus)
File "/usr/lib/python3.6/site-packages/urwid/widget.py", line 141, in cached_render
canv = fn(self, size, focus=focus)
File "/usr/lib/python3.6/site-packages/urwid/listbox.py", line 485, in render
canvas = widget.render((maxcol,))
File "/usr/lib/python3.6/site-packages/urwid/widget.py", line 141, in cached_render
canv = fn(self, size, focus=focus)
File "/usr/lib/python3.6/site-packages/pudb/var_view.py", line 163, in render
return make_canvas(text, attr, maxcol, apfx+"value")
File "/usr/lib/python3.6/site-packages/pudb/ui_tools.py", line 48, in make_canvas
maxcol=maxcol)
File "/usr/lib/python3.6/site-packages/urwid/canvas.py", line 356, in __init__
raise CanvasError("Canvas text is wider than the maxcol specified \n%r\n%r\n%r"%(maxcol,widths,text))
urwid.canvas.CanvasError: Canvas text is wider than the maxcol specified
53
[55]
[b"data: '\xe4\xb8\xad\xe6\x96\x87' "]
```
This is a Python 3.6.0 on Arch Linux, with zh_CN.UTF-8 locale. And pudb is "python-pudb 2017.1.1-1" (the pudb3 script doesn't accept `--version` nor `-V` :-( ) | inducer/pudb | diff --git a/test/test_make_canvas.py b/test/test_make_canvas.py
index 093cd63..b1ed681 100644
--- a/test/test_make_canvas.py
+++ b/test/test_make_canvas.py
@@ -49,6 +49,19 @@ def test_byte_boundary():
)
assert list(canvas.content()) == [[('var value', None, b'aaaaaa\xc3\xa9')]]
+def test_wide_chars():
+ text = u"data: '中文'"
+ canvas = make_canvas(
+ txt=[text],
+ attr=[[('var label', 6), ('var value', 4)]],
+ maxcol=47,
+ )
+ assert list(canvas.content()) == [[
+ ('var label', None, b'data: '),
+ ('var value', None, u"'中文'".encode('utf-8')),
+ (None, None, b' '*(47 - 12)), # 10 chars, 2 of which are double width
+ ]]
+
if __name__ == "__main__":
import sys
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 1
} | 2017.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-mock"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements/dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
coverage==6.2
importlib-metadata==4.8.3
iniconfig==1.1.1
packaging==21.3
pluggy==1.0.0
-e git+https://github.com/inducer/pudb.git@fef17b6f33da7d03758c150b37cd2f84754aa01d#egg=pudb
py==1.11.0
Pygments==2.14.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
pytest-mock==3.6.1
tomli==1.2.3
typing_extensions==4.1.1
urwid==2.1.2
zipp==3.6.0
| name: pudb
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- coverage==6.2
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- tomli==1.2.3
- typing-extensions==4.1.1
- urwid==2.1.2
- zipp==3.6.0
prefix: /opt/conda/envs/pudb
| [
"test/test_make_canvas.py::test_wide_chars"
]
| []
| [
"test/test_make_canvas.py::test_simple",
"test/test_make_canvas.py::test_multiple",
"test/test_make_canvas.py::test_boundary",
"test/test_make_canvas.py::test_byte_boundary"
]
| []
| MIT/X Consortium license | 1,433 | [
"pudb/ui_tools.py"
]
| [
"pudb/ui_tools.py"
]
|
|
oasis-open__cti-python-stix2-27 | f8e3a4f0e895da95fbef109041dec26d6f968690 | 2017-07-05 15:40:21 | 58f39f80af5cbfe02879c2efa4b3b4ef7a504390 | diff --git a/docs/overview.rst b/docs/overview.rst
index 643253c..b8c3809 100644
--- a/docs/overview.rst
+++ b/docs/overview.rst
@@ -1,9 +1,6 @@
Overview
========
-Goals
------
-
High level goals/principles of the python-stix2 library:
1. It should be as easy as possible (but no easier!) to perform common tasks of
@@ -13,9 +10,6 @@ High level goals/principles of the python-stix2 library:
the STIX 2.0 spec, as well as associated best practices. The library should
make it hard to do "the wrong thing".
-Design Decisions
-----------------
-
To accomplish these goals, and to incorporate lessons learned while developing
python-stix (for STIX 1.x), several decisions influenced the design of
python-stix2:
@@ -28,70 +22,3 @@ python-stix2:
3. Core Python data types (including numeric types, ``datetime``) should be used
when appropriate, and serialized to the correct format in JSON as specified
in the STIX 2.0 spec.
-
-Architecture
-------------
-
-The `stix2` library APIs are divided into three logical layers, representing
-different levels of abstraction useful in different types of scripts and larger
-applications. It is possible to combine multiple layers in the same program,
-and the higher levels build on the layers below.
-
-
-Object Layer
-^^^^^^^^^^^^
-
-The lowest layer, **Object Layer**, is where Python objects representing STIX 2
-data types (such as SDOs, SROs, and Cyber Observable Objects, as well as
-non-top-level objects like External References, Kill Chain phases, and Cyber
-Observable extensions) are created, and can be serialized and deserialized
-to and from JSON representation.
-
-This layer is appropriate for stand-alone scripts that produce or consume STIX
-2 content, or can serve as a low-level data API for larger applications that
-need to represent STIX objects as Python classes.
-
-At this level, non-embedded reference properties (those ending in ``_ref``, such
-as the links from a Relationship object to its source and target objects) are
-not implemented as references between the Python objects themselves, but by
-simply having the same values in ``id`` and reference properties. There is no
-referential integrity maintained by the ``stix2`` library.
-
-*This layer is mostly complete.*
-
-Environment Layer
-^^^^^^^^^^^^^^^^^
-
-The **Environment Layer** adds several components that make it easier to handle
-STIX 2 data as part of a larger application and as part of a larger cyber threat
-intelligence ecosystem.
-
-- ``Data Source``s represent locations from which STIX data can be retrieved,
- such as a TAXII server, database, or local filesystem. The Data Source API
- abstracts differences between these storage location, giving a common API to
- get objects by ID or query by various properties, as well as allowing
- federated operations over multiple data sources.
-- Similarly, ``Data Sink`` objects represent destinations for sending STIX data.
-- An ``Object Factory`` provides a way to add common properties to all created
- objects (such as the same ``created_by_ref``, or a ``StatementMarking`` with
- copyright information or terms of use for the STIX data).
-
-Each of these components can be used individually, or combined as part of an
-``Environment``. These ``Environment`` objects allow different settings to be
-used by different users of a multi-user application (such as a web application).
-
-*This layer is currently being developed.*
-
-Workbench Layer
-^^^^^^^^^^^^^^^
-
-The highest layer of the ``stix2`` APIs is the **Workbench Layer**, designed for
-a single user in a highly-interactive analytical environment (such as a `Jupyter
-Notebook <https://jupyter.org/>`_). It builds on the lower layers of the API,
-while hiding most of their complexity. Unlike the other layers, this layer is
-designed to be used directly by end users. For users who are comfortable with,
-Python, the Workbench Layer makes it easy to quickly interact with STIX data
-from a variety of sources without needing to write and run one-off Python
-scripts.
-
-*This layer has not yet been started.*
diff --git a/stix2/base.py b/stix2/base.py
index 7de193b..cb17f11 100644
--- a/stix2/base.py
+++ b/stix2/base.py
@@ -176,7 +176,7 @@ class _STIXBase(collections.Mapping):
if 'modified' not in kwargs:
kwargs['modified'] = get_timestamp()
else:
- new_modified_property = parse_into_datetime(kwargs['modified'], precision='millisecond')
+ new_modified_property = parse_into_datetime(kwargs['modified'])
if new_modified_property < self.modified:
raise InvalidValueError(cls, 'modified', "The new modified datetime cannot be before the current modified datatime.")
new_obj_inner.update(kwargs)
diff --git a/stix2/bundle.py b/stix2/bundle.py
index 85be3e1..b598ceb 100644
--- a/stix2/bundle.py
+++ b/stix2/bundle.py
@@ -17,6 +17,9 @@ class Bundle(_STIXBase):
def __init__(self, *args, **kwargs):
# Add any positional arguments to the 'objects' kwarg.
if args:
- kwargs['objects'] = kwargs.get('objects', []) + list(args)
+ if isinstance(args[0], list):
+ kwargs['objects'] = args[0] + list(args[1:]) + kwargs.get('objects', [])
+ else:
+ kwargs['objects'] = list(args) + kwargs.get('objects', [])
super(Bundle, self).__init__(**kwargs)
diff --git a/stix2/common.py b/stix2/common.py
index 7c6e747..c8c243d 100644
--- a/stix2/common.py
+++ b/stix2/common.py
@@ -7,8 +7,8 @@ from .utils import NOW
COMMON_PROPERTIES = {
# 'type' and 'id' should be defined on each individual type
- 'created': TimestampProperty(default=lambda: NOW, precision='millisecond'),
- 'modified': TimestampProperty(default=lambda: NOW, precision='millisecond'),
+ 'created': TimestampProperty(default=lambda: NOW),
+ 'modified': TimestampProperty(default=lambda: NOW),
'external_references': ListProperty(ExternalReference),
'revoked': BooleanProperty(),
'labels': ListProperty(StringProperty),
diff --git a/stix2/observables.py b/stix2/observables.py
index a8f3b67..086dc45 100644
--- a/stix2/observables.py
+++ b/stix2/observables.py
@@ -215,7 +215,7 @@ class WindowsPEBinaryExt(_Extension):
'imphash': StringProperty(),
'machine_hex': HexProperty(),
'number_of_sections': IntegerProperty(),
- 'time_date_stamp': TimestampProperty(precision='second'),
+ 'time_date_stamp': TimestampProperty(),
'pointer_to_symbol_table_hex': HexProperty(),
'number_of_symbols': IntegerProperty(),
'size_of_optional_header': IntegerProperty(),
diff --git a/stix2/properties.py b/stix2/properties.py
index 80e5345..9d54cf0 100644
--- a/stix2/properties.py
+++ b/stix2/properties.py
@@ -1,15 +1,18 @@
import base64
import binascii
import collections
+import datetime as dt
import inspect
import re
import uuid
+from dateutil import parser
+import pytz
from six import text_type
from .base import _Observable, _STIXBase
from .exceptions import DictionaryKeyError
-from .utils import get_dict, parse_into_datetime
+from .utils import get_dict
class Property(object):
@@ -212,12 +215,26 @@ class BooleanProperty(Property):
class TimestampProperty(Property):
- def __init__(self, precision=None, **kwargs):
- self.precision = precision
- super(TimestampProperty, self).__init__(**kwargs)
-
def clean(self, value):
- return parse_into_datetime(value, self.precision)
+ if isinstance(value, dt.date):
+ if hasattr(value, 'hour'):
+ return value
+ else:
+ # Add a time component
+ return dt.datetime.combine(value, dt.time(), tzinfo=pytz.utc)
+
+ # value isn't a date or datetime object so assume it's a string
+ try:
+ parsed = parser.parse(value)
+ except TypeError:
+ # Unknown format
+ raise ValueError("must be a datetime object, date object, or "
+ "timestamp string in a recognizable format.")
+ if parsed.tzinfo:
+ return parsed.astimezone(pytz.utc)
+ else:
+ # Doesn't have timezone info in the string; assume UTC
+ return pytz.utc.localize(parsed)
class ObservableProperty(Property):
diff --git a/stix2/utils.py b/stix2/utils.py
index 12b889c..bb41937 100644
--- a/stix2/utils.py
+++ b/stix2/utils.py
@@ -12,29 +12,15 @@ import pytz
NOW = object()
-class STIXdatetime(dt.datetime):
- def __new__(cls, *args, **kwargs):
- precision = kwargs.pop('precision', None)
- if isinstance(args[0], dt.datetime): # Allow passing in a datetime object
- dttm = args[0]
- args = (dttm.year, dttm.month, dttm.day, dttm.hour, dttm.minute,
- dttm.second, dttm.microsecond, dttm.tzinfo)
- # self will be an instance of STIXdatetime, not dt.datetime
- self = dt.datetime.__new__(cls, *args, **kwargs)
- self.precision = precision
- return self
-
-
def get_timestamp():
- return STIXdatetime.now(tz=pytz.UTC)
+ return dt.datetime.now(tz=pytz.UTC)
def format_datetime(dttm):
# 1. Convert to timezone-aware
# 2. Convert to UTC
# 3. Format in ISO format
- # 4. Ensure correct precision
- # 4a. Add subsecond value if non-zero and precision not defined
+ # 4. Add subsecond value if non-zero
# 5. Add "Z"
if dttm.tzinfo is None or dttm.tzinfo.utcoffset(dttm) is None:
@@ -43,53 +29,32 @@ def format_datetime(dttm):
else:
zoned = dttm.astimezone(pytz.utc)
ts = zoned.strftime("%Y-%m-%dT%H:%M:%S")
- ms = zoned.strftime("%f")
- precision = getattr(dttm, "precision", None)
- if precision == 'second':
- pass # Alredy precise to the second
- elif precision == "millisecond":
- ts = ts + '.' + ms[:3]
- elif zoned.microsecond > 0:
+ if zoned.microsecond > 0:
+ ms = zoned.strftime("%f")
ts = ts + '.' + ms.rstrip("0")
return ts + "Z"
-def parse_into_datetime(value, precision=None):
+def parse_into_datetime(value):
if isinstance(value, dt.date):
if hasattr(value, 'hour'):
- ts = value
+ return value
else:
# Add a time component
- ts = dt.datetime.combine(value, dt.time(0, 0, tzinfo=pytz.utc))
+ return dt.datetime.combine(value, dt.time(0, 0, tzinfo=pytz.utc))
+
+ # value isn't a date or datetime object so assume it's a string
+ try:
+ parsed = parser.parse(value)
+ except (TypeError, ValueError):
+ # Unknown format
+ raise ValueError("must be a datetime object, date object, or "
+ "timestamp string in a recognizable format.")
+ if parsed.tzinfo:
+ return parsed.astimezone(pytz.utc)
else:
- # value isn't a date or datetime object so assume it's a string
- try:
- parsed = parser.parse(value)
- except (TypeError, ValueError):
- # Unknown format
- raise ValueError("must be a datetime object, date object, or "
- "timestamp string in a recognizable format.")
- if parsed.tzinfo:
- ts = parsed.astimezone(pytz.utc)
- else:
- # Doesn't have timezone info in the string; assume UTC
- ts = pytz.utc.localize(parsed)
-
- # Ensure correct precision
- if not precision:
- return ts
- ms = ts.microsecond
- if precision == 'second':
- ts = ts.replace(microsecond=0)
- elif precision == 'millisecond':
- ms_len = len(str(ms))
- if ms_len > 3:
- # Truncate to millisecond precision
- factor = 10 ** (ms_len - 3)
- ts = ts.replace(microsecond=(ts.microsecond // factor) * factor)
- else:
- ts = ts.replace(microsecond=0)
- return STIXdatetime(ts, precision=precision)
+ # Doesn't have timezone info in the string; assume UTC
+ return pytz.utc.localize(parsed)
def get_dict(data):
| Passing a list to Bundle constructor creates list within a list
Currently if you pass a list to the Bundle constructor it creates a list within a list:
```json
{
"objects":
[
[
{ },
{ }
]
]
}
```
If the first argument is a list, the constructor should just assign it to `objects`. | oasis-open/cti-python-stix2 | diff --git a/stix2/test/conftest.py b/stix2/test/conftest.py
index 9f61bc2..d1f3330 100644
--- a/stix2/test/conftest.py
+++ b/stix2/test/conftest.py
@@ -1,3 +1,4 @@
+import datetime as dt
import uuid
import pytest
@@ -12,12 +13,12 @@ from .constants import (FAKE_TIME, INDICATOR_KWARGS, MALWARE_KWARGS,
@pytest.fixture
def clock(monkeypatch):
- class mydatetime(stix2.utils.STIXdatetime):
+ class mydatetime(dt.datetime):
@classmethod
def now(cls, tz=None):
return FAKE_TIME
- monkeypatch.setattr(stix2.utils, 'STIXdatetime', mydatetime)
+ monkeypatch.setattr(dt, 'datetime', mydatetime)
@pytest.fixture
diff --git a/stix2/test/test_attack_pattern.py b/stix2/test/test_attack_pattern.py
index 5bd5af2..7510888 100644
--- a/stix2/test/test_attack_pattern.py
+++ b/stix2/test/test_attack_pattern.py
@@ -9,7 +9,7 @@ from .constants import ATTACK_PATTERN_ID
EXPECTED = """{
- "created": "2016-05-12T08:17:27.000Z",
+ "created": "2016-05-12T08:17:27Z",
"description": "...",
"external_references": [
{
@@ -18,7 +18,7 @@ EXPECTED = """{
}
],
"id": "attack-pattern--0c7b5b88-8ff7-4a4d-aa9d-feb398cd0061",
- "modified": "2016-05-12T08:17:27.000Z",
+ "modified": "2016-05-12T08:17:27Z",
"name": "Spear Phishing",
"type": "attack-pattern"
}"""
@@ -27,8 +27,8 @@ EXPECTED = """{
def test_attack_pattern_example():
ap = stix2.AttackPattern(
id="attack-pattern--0c7b5b88-8ff7-4a4d-aa9d-feb398cd0061",
- created="2016-05-12T08:17:27.000Z",
- modified="2016-05-12T08:17:27.000Z",
+ created="2016-05-12T08:17:27Z",
+ modified="2016-05-12T08:17:27Z",
name="Spear Phishing",
external_references=[{
"source_name": "capec",
@@ -45,8 +45,8 @@ def test_attack_pattern_example():
{
"type": "attack-pattern",
"id": "attack-pattern--0c7b5b88-8ff7-4a4d-aa9d-feb398cd0061",
- "created": "2016-05-12T08:17:27.000Z",
- "modified": "2016-05-12T08:17:27.000Z",
+ "created": "2016-05-12T08:17:27Z",
+ "modified": "2016-05-12T08:17:27Z",
"description": "...",
"external_references": [
{
diff --git a/stix2/test/test_bundle.py b/stix2/test/test_bundle.py
index fc3e350..9c2dd19 100644
--- a/stix2/test/test_bundle.py
+++ b/stix2/test/test_bundle.py
@@ -7,30 +7,30 @@ EXPECTED_BUNDLE = """{
"id": "bundle--00000000-0000-0000-0000-000000000004",
"objects": [
{
- "created": "2017-01-01T12:34:56.000Z",
+ "created": "2017-01-01T12:34:56Z",
"id": "indicator--00000000-0000-0000-0000-000000000001",
"labels": [
"malicious-activity"
],
- "modified": "2017-01-01T12:34:56.000Z",
+ "modified": "2017-01-01T12:34:56Z",
"pattern": "[file:hashes.MD5 = 'd41d8cd98f00b204e9800998ecf8427e']",
"type": "indicator",
"valid_from": "2017-01-01T12:34:56Z"
},
{
- "created": "2017-01-01T12:34:56.000Z",
+ "created": "2017-01-01T12:34:56Z",
"id": "malware--00000000-0000-0000-0000-000000000002",
"labels": [
"ransomware"
],
- "modified": "2017-01-01T12:34:56.000Z",
+ "modified": "2017-01-01T12:34:56Z",
"name": "Cryptolocker",
"type": "malware"
},
{
- "created": "2017-01-01T12:34:56.000Z",
+ "created": "2017-01-01T12:34:56Z",
"id": "relationship--00000000-0000-0000-0000-000000000003",
- "modified": "2017-01-01T12:34:56.000Z",
+ "modified": "2017-01-01T12:34:56Z",
"relationship_type": "indicates",
"source_ref": "indicator--01234567-89ab-cdef-0123-456789abcdef",
"target_ref": "malware--fedcba98-7654-3210-fedc-ba9876543210",
@@ -92,3 +92,27 @@ def test_create_bundle_with_positional_args(indicator, malware, relationship):
bundle = stix2.Bundle(indicator, malware, relationship)
assert str(bundle) == EXPECTED_BUNDLE
+
+
+def test_create_bundle_with_positional_listarg(indicator, malware, relationship):
+ bundle = stix2.Bundle([indicator, malware, relationship])
+
+ assert str(bundle) == EXPECTED_BUNDLE
+
+
+def test_create_bundle_with_listarg_and_positional_arg(indicator, malware, relationship):
+ bundle = stix2.Bundle([indicator, malware], relationship)
+
+ assert str(bundle) == EXPECTED_BUNDLE
+
+
+def test_create_bundle_with_listarg_and_kwarg(indicator, malware, relationship):
+ bundle = stix2.Bundle([indicator, malware], objects=[relationship])
+
+ assert str(bundle) == EXPECTED_BUNDLE
+
+
+def test_create_bundle_with_arg_listarg_and_kwarg(indicator, malware, relationship):
+ bundle = stix2.Bundle([indicator], malware, objects=[relationship])
+
+ assert str(bundle) == EXPECTED_BUNDLE
diff --git a/stix2/test/test_campaign.py b/stix2/test/test_campaign.py
index 30b9444..9920019 100644
--- a/stix2/test/test_campaign.py
+++ b/stix2/test/test_campaign.py
@@ -9,11 +9,11 @@ from .constants import CAMPAIGN_ID
EXPECTED = """{
- "created": "2016-04-06T20:03:00.000Z",
+ "created": "2016-04-06T20:03:00Z",
"created_by_ref": "identity--f431f809-377b-45e0-aa1c-6a4751cae5ff",
"description": "Campaign by Green Group against a series of targets in the financial services sector.",
"id": "campaign--8e2e2d2b-17d4-4cbf-938f-98ee46b3cd3f",
- "modified": "2016-04-06T20:03:00.000Z",
+ "modified": "2016-04-06T20:03:00Z",
"name": "Green Group Attacks Against Finance",
"type": "campaign"
}"""
diff --git a/stix2/test/test_course_of_action.py b/stix2/test/test_course_of_action.py
index e7a1b22..263eae2 100644
--- a/stix2/test/test_course_of_action.py
+++ b/stix2/test/test_course_of_action.py
@@ -9,11 +9,11 @@ from .constants import COURSE_OF_ACTION_ID
EXPECTED = """{
- "created": "2016-04-06T20:03:48.000Z",
+ "created": "2016-04-06T20:03:48Z",
"created_by_ref": "identity--f431f809-377b-45e0-aa1c-6a4751cae5ff",
"description": "This is how to add a filter rule to block inbound access to TCP port 80 to the existing UDP 1434 filter ...",
"id": "course-of-action--8e2e2d2b-17d4-4cbf-938f-98ee46b3cd3f",
- "modified": "2016-04-06T20:03:48.000Z",
+ "modified": "2016-04-06T20:03:48Z",
"name": "Add TCP port 80 Filter Rule to the existing Block UDP 1434 Filter",
"type": "course-of-action"
}"""
@@ -23,8 +23,8 @@ def test_course_of_action_example():
coa = stix2.CourseOfAction(
id="course-of-action--8e2e2d2b-17d4-4cbf-938f-98ee46b3cd3f",
created_by_ref="identity--f431f809-377b-45e0-aa1c-6a4751cae5ff",
- created="2016-04-06T20:03:48.000Z",
- modified="2016-04-06T20:03:48.000Z",
+ created="2016-04-06T20:03:48Z",
+ modified="2016-04-06T20:03:48Z",
name="Add TCP port 80 Filter Rule to the existing Block UDP 1434 Filter",
description="This is how to add a filter rule to block inbound access to TCP port 80 to the existing UDP 1434 filter ..."
)
@@ -35,11 +35,11 @@ def test_course_of_action_example():
@pytest.mark.parametrize("data", [
EXPECTED,
{
- "created": "2016-04-06T20:03:48.000Z",
+ "created": "2016-04-06T20:03:48Z",
"created_by_ref": "identity--f431f809-377b-45e0-aa1c-6a4751cae5ff",
"description": "This is how to add a filter rule to block inbound access to TCP port 80 to the existing UDP 1434 filter ...",
"id": "course-of-action--8e2e2d2b-17d4-4cbf-938f-98ee46b3cd3f",
- "modified": "2016-04-06T20:03:48.000Z",
+ "modified": "2016-04-06T20:03:48Z",
"name": "Add TCP port 80 Filter Rule to the existing Block UDP 1434 Filter",
"type": "course-of-action"
},
diff --git a/stix2/test/test_fixtures.py b/stix2/test/test_fixtures.py
index 83d5f85..9078972 100644
--- a/stix2/test/test_fixtures.py
+++ b/stix2/test/test_fixtures.py
@@ -1,12 +1,11 @@
+import datetime as dt
import uuid
-from stix2 import utils
-
from .constants import FAKE_TIME
def test_clock(clock):
- assert utils.STIXdatetime.now() == FAKE_TIME
+ assert dt.datetime.now() == FAKE_TIME
def test_my_uuid4_fixture(uuid4):
diff --git a/stix2/test/test_identity.py b/stix2/test/test_identity.py
index ed51958..b2c166c 100644
--- a/stix2/test/test_identity.py
+++ b/stix2/test/test_identity.py
@@ -9,10 +9,10 @@ from .constants import IDENTITY_ID
EXPECTED = """{
- "created": "2015-12-21T19:59:11.000Z",
+ "created": "2015-12-21T19:59:11Z",
"id": "identity--311b2d2d-f010-5473-83ec-1edf84858f4c",
"identity_class": "individual",
- "modified": "2015-12-21T19:59:11.000Z",
+ "modified": "2015-12-21T19:59:11Z",
"name": "John Smith",
"type": "identity"
}"""
@@ -21,8 +21,8 @@ EXPECTED = """{
def test_identity_example():
identity = stix2.Identity(
id="identity--311b2d2d-f010-5473-83ec-1edf84858f4c",
- created="2015-12-21T19:59:11.000Z",
- modified="2015-12-21T19:59:11.000Z",
+ created="2015-12-21T19:59:11Z",
+ modified="2015-12-21T19:59:11Z",
name="John Smith",
identity_class="individual",
)
@@ -33,10 +33,10 @@ def test_identity_example():
@pytest.mark.parametrize("data", [
EXPECTED,
{
- "created": "2015-12-21T19:59:11.000Z",
+ "created": "2015-12-21T19:59:11Z",
"id": "identity--311b2d2d-f010-5473-83ec-1edf84858f4c",
"identity_class": "individual",
- "modified": "2015-12-21T19:59:11.000Z",
+ "modified": "2015-12-21T19:59:11Z",
"name": "John Smith",
"type": "identity"
},
@@ -56,8 +56,8 @@ def test_parse_no_type():
stix2.parse("""
{
"id": "identity--311b2d2d-f010-5473-83ec-1edf84858f4c",
- "created": "2015-12-21T19:59:11.000Z",
- "modified": "2015-12-21T19:59:11.000Z",
+ "created": "2015-12-21T19:59:11Z",
+ "modified": "2015-12-21T19:59:11Z",
"name": "John Smith",
"identity_class": "individual"
}""")
diff --git a/stix2/test/test_indicator.py b/stix2/test/test_indicator.py
index 5daa0f6..5db50e6 100644
--- a/stix2/test/test_indicator.py
+++ b/stix2/test/test_indicator.py
@@ -10,22 +10,22 @@ from .constants import FAKE_TIME, INDICATOR_ID, INDICATOR_KWARGS
EXPECTED_INDICATOR = """{
- "created": "2017-01-01T00:00:01.000Z",
+ "created": "2017-01-01T00:00:01Z",
"id": "indicator--01234567-89ab-cdef-0123-456789abcdef",
"labels": [
"malicious-activity"
],
- "modified": "2017-01-01T00:00:01.000Z",
+ "modified": "2017-01-01T00:00:01Z",
"pattern": "[file:hashes.MD5 = 'd41d8cd98f00b204e9800998ecf8427e']",
"type": "indicator",
"valid_from": "1970-01-01T00:00:01Z"
}"""
EXPECTED_INDICATOR_REPR = "Indicator(" + " ".join("""
- created=STIXdatetime(2017, 1, 1, 0, 0, 1, tzinfo=<UTC>),
+ created=datetime.datetime(2017, 1, 1, 0, 0, 1, tzinfo=<UTC>),
id='indicator--01234567-89ab-cdef-0123-456789abcdef',
labels=['malicious-activity'],
- modified=STIXdatetime(2017, 1, 1, 0, 0, 1, tzinfo=<UTC>),
+ modified=datetime.datetime(2017, 1, 1, 0, 0, 1, tzinfo=<UTC>),
pattern="[file:hashes.MD5 = 'd41d8cd98f00b204e9800998ecf8427e']",
type='indicator',
valid_from=datetime.datetime(1970, 1, 1, 0, 0, 1, tzinfo=<UTC>)
@@ -48,8 +48,6 @@ def test_indicator_with_all_required_properties():
assert str(ind) == EXPECTED_INDICATOR
rep = re.sub(r"(\[|=| )u('|\"|\\\'|\\\")", r"\g<1>\g<2>", repr(ind))
- print(rep)
- print(EXPECTED_INDICATOR_REPR)
assert rep == EXPECTED_INDICATOR_REPR
diff --git a/stix2/test/test_intrusion_set.py b/stix2/test/test_intrusion_set.py
index a6eee7f..3241ced 100644
--- a/stix2/test/test_intrusion_set.py
+++ b/stix2/test/test_intrusion_set.py
@@ -12,7 +12,7 @@ EXPECTED = """{
"aliases": [
"Zookeeper"
],
- "created": "2016-04-06T20:03:48.000Z",
+ "created": "2016-04-06T20:03:48Z",
"created_by_ref": "identity--f431f809-377b-45e0-aa1c-6a4751cae5ff",
"description": "Incidents usually feature a shared TTP of a bobcat being released...",
"goals": [
@@ -21,7 +21,7 @@ EXPECTED = """{
"damage"
],
"id": "intrusion-set--4e78f46f-a023-4e5f-bc24-71b3ca22ec29",
- "modified": "2016-04-06T20:03:48.000Z",
+ "modified": "2016-04-06T20:03:48Z",
"name": "Bobcat Breakin",
"type": "intrusion-set"
}"""
@@ -31,8 +31,8 @@ def test_intrusion_set_example():
intrusion_set = stix2.IntrusionSet(
id="intrusion-set--4e78f46f-a023-4e5f-bc24-71b3ca22ec29",
created_by_ref="identity--f431f809-377b-45e0-aa1c-6a4751cae5ff",
- created="2016-04-06T20:03:48.000Z",
- modified="2016-04-06T20:03:48.000Z",
+ created="2016-04-06T20:03:48Z",
+ modified="2016-04-06T20:03:48Z",
name="Bobcat Breakin",
description="Incidents usually feature a shared TTP of a bobcat being released...",
aliases=["Zookeeper"],
@@ -48,7 +48,7 @@ def test_intrusion_set_example():
"aliases": [
"Zookeeper"
],
- "created": "2016-04-06T20:03:48.000Z",
+ "created": "2016-04-06T20:03:48Z",
"created_by_ref": "identity--f431f809-377b-45e0-aa1c-6a4751cae5ff",
"description": "Incidents usually feature a shared TTP of a bobcat being released...",
"goals": [
@@ -57,7 +57,7 @@ def test_intrusion_set_example():
"damage"
],
"id": "intrusion-set--4e78f46f-a023-4e5f-bc24-71b3ca22ec29",
- "modified": "2016-04-06T20:03:48.000Z",
+ "modified": "2016-04-06T20:03:48Z",
"name": "Bobcat Breakin",
"type": "intrusion-set"
},
diff --git a/stix2/test/test_malware.py b/stix2/test/test_malware.py
index ff0b394..266d012 100644
--- a/stix2/test/test_malware.py
+++ b/stix2/test/test_malware.py
@@ -10,12 +10,12 @@ from .constants import FAKE_TIME, MALWARE_ID, MALWARE_KWARGS
EXPECTED_MALWARE = """{
- "created": "2016-05-12T08:17:27.000Z",
+ "created": "2016-05-12T08:17:27Z",
"id": "malware--fedcba98-7654-3210-fedc-ba9876543210",
"labels": [
"ransomware"
],
- "modified": "2016-05-12T08:17:27.000Z",
+ "modified": "2016-05-12T08:17:27Z",
"name": "Cryptolocker",
"type": "malware"
}"""
@@ -109,8 +109,8 @@ def test_invalid_kwarg_to_malware():
{
"type": "malware",
"id": "malware--fedcba98-7654-3210-fedc-ba9876543210",
- "created": "2016-05-12T08:17:27.000Z",
- "modified": "2016-05-12T08:17:27.000Z",
+ "created": "2016-05-12T08:17:27Z",
+ "modified": "2016-05-12T08:17:27Z",
"labels": ["ransomware"],
"name": "Cryptolocker",
},
diff --git a/stix2/test/test_markings.py b/stix2/test/test_markings.py
index c2e0276..f1f07db 100644
--- a/stix2/test/test_markings.py
+++ b/stix2/test/test_markings.py
@@ -40,7 +40,7 @@ EXPECTED_GRANULAR_MARKING = """{
}"""
EXPECTED_CAMPAIGN_WITH_GRANULAR_MARKINGS = """{
- "created": "2016-04-06T20:03:00.000Z",
+ "created": "2016-04-06T20:03:00Z",
"created_by_ref": "identity--f431f809-377b-45e0-aa1c-6a4751cae5ff",
"description": "Campaign by Green Group against a series of targets in the financial services sector.",
"granular_markings": [
@@ -52,7 +52,7 @@ EXPECTED_CAMPAIGN_WITH_GRANULAR_MARKINGS = """{
}
],
"id": "campaign--8e2e2d2b-17d4-4cbf-938f-98ee46b3cd3f",
- "modified": "2016-04-06T20:03:00.000Z",
+ "modified": "2016-04-06T20:03:00Z",
"name": "Green Group Attacks Against Finance",
"type": "campaign"
}"""
diff --git a/stix2/test/test_observed_data.py b/stix2/test/test_observed_data.py
index 75f3070..0ed9954 100644
--- a/stix2/test/test_observed_data.py
+++ b/stix2/test/test_observed_data.py
@@ -10,12 +10,12 @@ from .constants import OBSERVED_DATA_ID
EXPECTED = """{
- "created": "2016-04-06T19:58:16.000Z",
+ "created": "2016-04-06T19:58:16Z",
"created_by_ref": "identity--f431f809-377b-45e0-aa1c-6a4751cae5ff",
"first_observed": "2015-12-21T19:00:00Z",
"id": "observed-data--b67d30ff-02ac-498a-92f9-32f845f448cf",
"last_observed": "2015-12-21T19:00:00Z",
- "modified": "2016-04-06T19:58:16.000Z",
+ "modified": "2016-04-06T19:58:16Z",
"number_observed": 50,
"objects": {
"0": {
@@ -31,8 +31,8 @@ def test_observed_data_example():
observed_data = stix2.ObservedData(
id="observed-data--b67d30ff-02ac-498a-92f9-32f845f448cf",
created_by_ref="identity--f431f809-377b-45e0-aa1c-6a4751cae5ff",
- created="2016-04-06T19:58:16.000Z",
- modified="2016-04-06T19:58:16.000Z",
+ created="2016-04-06T19:58:16Z",
+ modified="2016-04-06T19:58:16Z",
first_observed="2015-12-21T19:00:00Z",
last_observed="2015-12-21T19:00:00Z",
number_observed=50,
@@ -48,12 +48,12 @@ def test_observed_data_example():
EXPECTED_WITH_REF = """{
- "created": "2016-04-06T19:58:16.000Z",
+ "created": "2016-04-06T19:58:16Z",
"created_by_ref": "identity--f431f809-377b-45e0-aa1c-6a4751cae5ff",
"first_observed": "2015-12-21T19:00:00Z",
"id": "observed-data--b67d30ff-02ac-498a-92f9-32f845f448cf",
"last_observed": "2015-12-21T19:00:00Z",
- "modified": "2016-04-06T19:58:16.000Z",
+ "modified": "2016-04-06T19:58:16Z",
"number_observed": 50,
"objects": {
"0": {
@@ -76,8 +76,8 @@ def test_observed_data_example_with_refs():
observed_data = stix2.ObservedData(
id="observed-data--b67d30ff-02ac-498a-92f9-32f845f448cf",
created_by_ref="identity--f431f809-377b-45e0-aa1c-6a4751cae5ff",
- created="2016-04-06T19:58:16.000Z",
- modified="2016-04-06T19:58:16.000Z",
+ created="2016-04-06T19:58:16Z",
+ modified="2016-04-06T19:58:16Z",
first_observed="2015-12-21T19:00:00Z",
last_observed="2015-12-21T19:00:00Z",
number_observed=50,
@@ -102,8 +102,8 @@ def test_observed_data_example_with_bad_refs():
stix2.ObservedData(
id="observed-data--b67d30ff-02ac-498a-92f9-32f845f448cf",
created_by_ref="identity--f431f809-377b-45e0-aa1c-6a4751cae5ff",
- created="2016-04-06T19:58:16.000Z",
- modified="2016-04-06T19:58:16.000Z",
+ created="2016-04-06T19:58:16Z",
+ modified="2016-04-06T19:58:16Z",
first_observed="2015-12-21T19:00:00Z",
last_observed="2015-12-21T19:00:00Z",
number_observed=50,
@@ -130,11 +130,11 @@ def test_observed_data_example_with_bad_refs():
{
"type": "observed-data",
"id": "observed-data--b67d30ff-02ac-498a-92f9-32f845f448cf",
- "created": "2016-04-06T19:58:16.000Z",
+ "created": "2016-04-06T19:58:16Z",
"created_by_ref": "identity--f431f809-377b-45e0-aa1c-6a4751cae5ff",
"first_observed": "2015-12-21T19:00:00Z",
"last_observed": "2015-12-21T19:00:00Z",
- "modified": "2016-04-06T19:58:16.000Z",
+ "modified": "2016-04-06T19:58:16Z",
"number_observed": 50,
"objects": {
"0": {
@@ -466,12 +466,12 @@ def test_parse_basic_tcp_traffic_with_error(data):
EXPECTED_PROCESS_OD = """{
- "created": "2016-04-06T19:58:16.000Z",
+ "created": "2016-04-06T19:58:16Z",
"created_by_ref": "identity--f431f809-377b-45e0-aa1c-6a4751cae5ff",
"first_observed": "2015-12-21T19:00:00Z",
"id": "observed-data--b67d30ff-02ac-498a-92f9-32f845f448cf",
"last_observed": "2015-12-21T19:00:00Z",
- "modified": "2016-04-06T19:58:16.000Z",
+ "modified": "2016-04-06T19:58:16Z",
"number_observed": 50,
"objects": {
"0": {
@@ -499,8 +499,8 @@ def test_observed_data_with_process_example():
observed_data = stix2.ObservedData(
id="observed-data--b67d30ff-02ac-498a-92f9-32f845f448cf",
created_by_ref="identity--f431f809-377b-45e0-aa1c-6a4751cae5ff",
- created="2016-04-06T19:58:16.000Z",
- modified="2016-04-06T19:58:16.000Z",
+ created="2016-04-06T19:58:16Z",
+ modified="2016-04-06T19:58:16Z",
first_observed="2015-12-21T19:00:00Z",
last_observed="2015-12-21T19:00:00Z",
number_observed=50,
diff --git a/stix2/test/test_relationship.py b/stix2/test/test_relationship.py
index 362348b..1ad792c 100644
--- a/stix2/test/test_relationship.py
+++ b/stix2/test/test_relationship.py
@@ -10,9 +10,9 @@ from .constants import (FAKE_TIME, INDICATOR_ID, MALWARE_ID, RELATIONSHIP_ID,
EXPECTED_RELATIONSHIP = """{
- "created": "2016-04-06T20:06:37.000Z",
+ "created": "2016-04-06T20:06:37Z",
"id": "relationship--00000000-1111-2222-3333-444444444444",
- "modified": "2016-04-06T20:06:37.000Z",
+ "modified": "2016-04-06T20:06:37Z",
"relationship_type": "indicates",
"source_ref": "indicator--01234567-89ab-cdef-0123-456789abcdef",
"target_ref": "malware--fedcba98-7654-3210-fedc-ba9876543210",
diff --git a/stix2/test/test_report.py b/stix2/test/test_report.py
index 4a1f905..cec217a 100644
--- a/stix2/test/test_report.py
+++ b/stix2/test/test_report.py
@@ -9,14 +9,14 @@ from .constants import INDICATOR_KWARGS, REPORT_ID
EXPECTED = """{
- "created": "2015-12-21T19:59:11.000Z",
+ "created": "2015-12-21T19:59:11Z",
"created_by_ref": "identity--a463ffb3-1bd9-4d94-b02d-74e4f1658283",
"description": "A simple report with an indicator and campaign",
"id": "report--84e4d88f-44ea-4bcd-bbf3-b2c1c320bcb3",
"labels": [
"campaign"
],
- "modified": "2015-12-21T19:59:11.000Z",
+ "modified": "2015-12-21T19:59:11Z",
"name": "The Black Vine Cyberespionage Group",
"object_refs": [
"indicator--26ffb872-1dd9-446e-b6f5-d58527e5b5d2",
@@ -32,8 +32,8 @@ def test_report_example():
report = stix2.Report(
id="report--84e4d88f-44ea-4bcd-bbf3-b2c1c320bcb3",
created_by_ref="identity--a463ffb3-1bd9-4d94-b02d-74e4f1658283",
- created="2015-12-21T19:59:11.000Z",
- modified="2015-12-21T19:59:11.000Z",
+ created="2015-12-21T19:59:11Z",
+ modified="2015-12-21T19:59:11Z",
name="The Black Vine Cyberespionage Group",
description="A simple report with an indicator and campaign",
published="2016-01-20T17:00:00Z",
@@ -95,14 +95,14 @@ def test_report_example_objects_in_object_refs_with_bad_id():
@pytest.mark.parametrize("data", [
EXPECTED,
{
- "created": "2015-12-21T19:59:11.000Z",
+ "created": "2015-12-21T19:59:11Z",
"created_by_ref": "identity--a463ffb3-1bd9-4d94-b02d-74e4f1658283",
"description": "A simple report with an indicator and campaign",
"id": "report--84e4d88f-44ea-4bcd-bbf3-b2c1c320bcb3",
"labels": [
"campaign"
],
- "modified": "2015-12-21T19:59:11.000Z",
+ "modified": "2015-12-21T19:59:11Z",
"name": "The Black Vine Cyberespionage Group",
"object_refs": [
"indicator--26ffb872-1dd9-446e-b6f5-d58527e5b5d2",
diff --git a/stix2/test/test_sighting.py b/stix2/test/test_sighting.py
index 2036457..6c9b7d8 100644
--- a/stix2/test/test_sighting.py
+++ b/stix2/test/test_sighting.py
@@ -9,9 +9,9 @@ from .constants import INDICATOR_ID, SIGHTING_ID, SIGHTING_KWARGS
EXPECTED_SIGHTING = """{
- "created": "2016-04-06T20:06:37.000Z",
+ "created": "2016-04-06T20:06:37Z",
"id": "sighting--bfbc19db-ec35-4e45-beed-f8bde2a772fb",
- "modified": "2016-04-06T20:06:37.000Z",
+ "modified": "2016-04-06T20:06:37Z",
"sighting_of_ref": "indicator--01234567-89ab-cdef-0123-456789abcdef",
"type": "sighting",
"where_sighted_refs": [
@@ -20,9 +20,9 @@ EXPECTED_SIGHTING = """{
}"""
BAD_SIGHTING = """{
- "created": "2016-04-06T20:06:37.000Z",
+ "created": "2016-04-06T20:06:37Z",
"id": "sighting--bfbc19db-ec35-4e45-beed-f8bde2a772fb",
- "modified": "2016-04-06T20:06:37.000Z",
+ "modified": "2016-04-06T20:06:37Z",
"sighting_of_ref": "indicator--01234567-89ab-cdef-0123-456789abcdef",
"type": "sighting",
"where_sighted_refs": [
diff --git a/stix2/test/test_threat_actor.py b/stix2/test/test_threat_actor.py
index 1bab744..93e8179 100644
--- a/stix2/test/test_threat_actor.py
+++ b/stix2/test/test_threat_actor.py
@@ -9,14 +9,14 @@ from .constants import THREAT_ACTOR_ID
EXPECTED = """{
- "created": "2016-04-06T20:03:48.000Z",
+ "created": "2016-04-06T20:03:48Z",
"created_by_ref": "identity--f431f809-377b-45e0-aa1c-6a4751cae5ff",
"description": "The Evil Org threat actor group",
"id": "threat-actor--8e2e2d2b-17d4-4cbf-938f-98ee46b3cd3f",
"labels": [
"crime-syndicate"
],
- "modified": "2016-04-06T20:03:48.000Z",
+ "modified": "2016-04-06T20:03:48Z",
"name": "Evil Org",
"type": "threat-actor"
}"""
@@ -26,8 +26,8 @@ def test_threat_actor_example():
threat_actor = stix2.ThreatActor(
id="threat-actor--8e2e2d2b-17d4-4cbf-938f-98ee46b3cd3f",
created_by_ref="identity--f431f809-377b-45e0-aa1c-6a4751cae5ff",
- created="2016-04-06T20:03:48.000Z",
- modified="2016-04-06T20:03:48.000Z",
+ created="2016-04-06T20:03:48Z",
+ modified="2016-04-06T20:03:48Z",
name="Evil Org",
description="The Evil Org threat actor group",
labels=["crime-syndicate"],
@@ -39,14 +39,14 @@ def test_threat_actor_example():
@pytest.mark.parametrize("data", [
EXPECTED,
{
- "created": "2016-04-06T20:03:48.000Z",
+ "created": "2016-04-06T20:03:48Z",
"created_by_ref": "identity--f431f809-377b-45e0-aa1c-6a4751cae5ff",
"description": "The Evil Org threat actor group",
"id": "threat-actor--8e2e2d2b-17d4-4cbf-938f-98ee46b3cd3f",
"labels": [
"crime-syndicate"
],
- "modified": "2016-04-06T20:03:48.000Z",
+ "modified": "2016-04-06T20:03:48Z",
"name": "Evil Org",
"type": "threat-actor"
},
diff --git a/stix2/test/test_tool.py b/stix2/test/test_tool.py
index 04da7b3..d7d3bef 100644
--- a/stix2/test/test_tool.py
+++ b/stix2/test/test_tool.py
@@ -9,13 +9,13 @@ from .constants import TOOL_ID
EXPECTED = """{
- "created": "2016-04-06T20:03:48.000Z",
+ "created": "2016-04-06T20:03:48Z",
"created_by_ref": "identity--f431f809-377b-45e0-aa1c-6a4751cae5ff",
"id": "tool--8e2e2d2b-17d4-4cbf-938f-98ee46b3cd3f",
"labels": [
"remote-access"
],
- "modified": "2016-04-06T20:03:48.000Z",
+ "modified": "2016-04-06T20:03:48Z",
"name": "VNC",
"type": "tool"
}"""
@@ -25,8 +25,8 @@ def test_tool_example():
tool = stix2.Tool(
id="tool--8e2e2d2b-17d4-4cbf-938f-98ee46b3cd3f",
created_by_ref="identity--f431f809-377b-45e0-aa1c-6a4751cae5ff",
- created="2016-04-06T20:03:48.000Z",
- modified="2016-04-06T20:03:48.000Z",
+ created="2016-04-06T20:03:48Z",
+ modified="2016-04-06T20:03:48Z",
name="VNC",
labels=["remote-access"],
)
diff --git a/stix2/test/test_utils.py b/stix2/test/test_utils.py
index c73bcd2..dbc0ed5 100644
--- a/stix2/test/test_utils.py
+++ b/stix2/test/test_utils.py
@@ -17,8 +17,6 @@ eastern = pytz.timezone('US/Eastern')
(eastern.localize(dt.datetime(2017, 7, 1)), '2017-07-01T04:00:00Z'),
(dt.datetime(2017, 7, 1), '2017-07-01T00:00:00Z'),
(dt.datetime(2017, 7, 1, 0, 0, 0, 1), '2017-07-01T00:00:00.000001Z'),
- (stix2.utils.STIXdatetime(2017, 7, 1, 0, 0, 0, 1, precision='millisecond'), '2017-07-01T00:00:00.000Z'),
- (stix2.utils.STIXdatetime(2017, 7, 1, 0, 0, 0, 1, precision='second'), '2017-07-01T00:00:00Z'),
])
def test_timestamp_formatting(dttm, timestamp):
assert stix2.utils.format_datetime(dttm) == timestamp
@@ -35,17 +33,6 @@ def test_parse_datetime(timestamp, dttm):
assert stix2.utils.parse_into_datetime(timestamp) == dttm
[email protected]('timestamp, dttm, precision', [
- ('2017-01-01T01:02:03.000001', dt.datetime(2017, 1, 1, 1, 2, 3, 0, tzinfo=pytz.utc), 'millisecond'),
- ('2017-01-01T01:02:03.001', dt.datetime(2017, 1, 1, 1, 2, 3, 1000, tzinfo=pytz.utc), 'millisecond'),
- ('2017-01-01T01:02:03.1', dt.datetime(2017, 1, 1, 1, 2, 3, 100000, tzinfo=pytz.utc), 'millisecond'),
- ('2017-01-01T01:02:03.45', dt.datetime(2017, 1, 1, 1, 2, 3, 450000, tzinfo=pytz.utc), 'millisecond'),
- ('2017-01-01T01:02:03.45', dt.datetime(2017, 1, 1, 1, 2, 3, tzinfo=pytz.utc), 'second'),
-])
-def test_parse_datetime_precision(timestamp, dttm, precision):
- assert stix2.utils.parse_into_datetime(timestamp, precision) == dttm
-
-
@pytest.mark.parametrize('ts', [
'foobar',
1,
diff --git a/stix2/test/test_vulnerability.py b/stix2/test/test_vulnerability.py
index 27ab85f..751460c 100644
--- a/stix2/test/test_vulnerability.py
+++ b/stix2/test/test_vulnerability.py
@@ -9,7 +9,7 @@ from .constants import VULNERABILITY_ID
EXPECTED = """{
- "created": "2016-05-12T08:17:27.000Z",
+ "created": "2016-05-12T08:17:27Z",
"external_references": [
{
"external_id": "CVE-2016-1234",
@@ -17,7 +17,7 @@ EXPECTED = """{
}
],
"id": "vulnerability--0c7b5b88-8ff7-4a4d-aa9d-feb398cd0061",
- "modified": "2016-05-12T08:17:27.000Z",
+ "modified": "2016-05-12T08:17:27Z",
"name": "CVE-2016-1234",
"type": "vulnerability"
}"""
@@ -26,8 +26,8 @@ EXPECTED = """{
def test_vulnerability_example():
vulnerability = stix2.Vulnerability(
id="vulnerability--0c7b5b88-8ff7-4a4d-aa9d-feb398cd0061",
- created="2016-05-12T08:17:27.000Z",
- modified="2016-05-12T08:17:27.000Z",
+ created="2016-05-12T08:17:27Z",
+ modified="2016-05-12T08:17:27Z",
name="CVE-2016-1234",
external_references=[
stix2.ExternalReference(source_name='cve',
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 7
} | 0.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": null,
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs==22.2.0
Babel==2.11.0
bump2version==1.0.1
bumpversion==0.6.0
certifi==2021.5.30
cfgv==3.3.1
charset-normalizer==2.0.12
coverage==6.2
distlib==0.3.9
docutils==0.18.1
filelock==3.4.1
identify==2.4.4
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
importlib-resources==5.2.3
iniconfig==1.1.1
Jinja2==3.0.3
MarkupSafe==2.0.1
nodeenv==1.6.0
packaging==21.3
platformdirs==2.4.0
pluggy==1.0.0
pre-commit==2.17.0
py==1.11.0
Pygments==2.14.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.1
requests==2.27.1
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-prompt==1.5.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
-e git+https://github.com/oasis-open/cti-python-stix2.git@f8e3a4f0e895da95fbef109041dec26d6f968690#egg=stix2
toml==0.10.2
tomli==1.2.3
tox==3.28.0
typing_extensions==4.1.1
urllib3==1.26.20
virtualenv==20.16.2
zipp==3.6.0
| name: cti-python-stix2
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- attrs==22.2.0
- babel==2.11.0
- bump2version==1.0.1
- bumpversion==0.6.0
- cfgv==3.3.1
- charset-normalizer==2.0.12
- coverage==6.2
- distlib==0.3.9
- docutils==0.18.1
- filelock==3.4.1
- identify==2.4.4
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- importlib-resources==5.2.3
- iniconfig==1.1.1
- jinja2==3.0.3
- markupsafe==2.0.1
- nodeenv==1.6.0
- packaging==21.3
- platformdirs==2.4.0
- pluggy==1.0.0
- pre-commit==2.17.0
- py==1.11.0
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.1
- requests==2.27.1
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-prompt==1.5.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- toml==0.10.2
- tomli==1.2.3
- tox==3.28.0
- typing-extensions==4.1.1
- urllib3==1.26.20
- virtualenv==20.16.2
- zipp==3.6.0
prefix: /opt/conda/envs/cti-python-stix2
| [
"stix2/test/test_attack_pattern.py::test_attack_pattern_example",
"stix2/test/test_bundle.py::test_create_bundle",
"stix2/test/test_bundle.py::test_create_bundle_with_positional_args",
"stix2/test/test_bundle.py::test_create_bundle_with_positional_listarg",
"stix2/test/test_bundle.py::test_create_bundle_with_listarg_and_positional_arg",
"stix2/test/test_bundle.py::test_create_bundle_with_listarg_and_kwarg",
"stix2/test/test_bundle.py::test_create_bundle_with_arg_listarg_and_kwarg",
"stix2/test/test_campaign.py::test_campaign_example",
"stix2/test/test_course_of_action.py::test_course_of_action_example",
"stix2/test/test_identity.py::test_identity_example",
"stix2/test/test_indicator.py::test_indicator_with_all_required_properties",
"stix2/test/test_indicator.py::test_indicator_autogenerated_properties",
"stix2/test/test_indicator.py::test_cannot_assign_to_indicator_attributes",
"stix2/test/test_intrusion_set.py::test_intrusion_set_example",
"stix2/test/test_malware.py::test_malware_with_all_required_properties",
"stix2/test/test_malware.py::test_malware_autogenerated_properties",
"stix2/test/test_malware.py::test_cannot_assign_to_malware_attributes",
"stix2/test/test_markings.py::test_campaign_with_granular_markings_example",
"stix2/test/test_observed_data.py::test_observed_data_example",
"stix2/test/test_observed_data.py::test_observed_data_example_with_refs",
"stix2/test/test_relationship.py::test_relationship_all_required_properties",
"stix2/test/test_relationship.py::test_relationship_autogenerated_properties",
"stix2/test/test_relationship.py::test_cannot_assign_to_relationship_attributes",
"stix2/test/test_relationship.py::test_create_relationship_from_objects_rather_than_ids",
"stix2/test/test_relationship.py::test_create_relationship_with_positional_args",
"stix2/test/test_report.py::test_report_example",
"stix2/test/test_report.py::test_report_example_objects_in_object_refs",
"stix2/test/test_sighting.py::test_sighting_all_required_properties",
"stix2/test/test_sighting.py::test_create_sighting_from_objects_rather_than_ids",
"stix2/test/test_threat_actor.py::test_threat_actor_example",
"stix2/test/test_tool.py::test_tool_example",
"stix2/test/test_vulnerability.py::test_vulnerability_example"
]
| []
| [
"stix2/test/test_attack_pattern.py::test_parse_attack_pattern[{\\n",
"stix2/test/test_attack_pattern.py::test_parse_attack_pattern[data1]",
"stix2/test/test_attack_pattern.py::test_attack_pattern_invalid_labels",
"stix2/test/test_bundle.py::test_empty_bundle",
"stix2/test/test_bundle.py::test_bundle_with_wrong_type",
"stix2/test/test_bundle.py::test_bundle_id_must_start_with_bundle",
"stix2/test/test_bundle.py::test_bundle_with_wrong_spec_version",
"stix2/test/test_campaign.py::test_parse_campaign[{\\n",
"stix2/test/test_campaign.py::test_parse_campaign[data1]",
"stix2/test/test_course_of_action.py::test_parse_course_of_action[{\\n",
"stix2/test/test_course_of_action.py::test_parse_course_of_action[data1]",
"stix2/test/test_fixtures.py::test_clock",
"stix2/test/test_fixtures.py::test_my_uuid4_fixture",
"stix2/test/test_identity.py::test_parse_identity[{\\n",
"stix2/test/test_identity.py::test_parse_identity[data1]",
"stix2/test/test_identity.py::test_parse_no_type",
"stix2/test/test_indicator.py::test_indicator_type_must_be_indicator",
"stix2/test/test_indicator.py::test_indicator_id_must_start_with_indicator",
"stix2/test/test_indicator.py::test_indicator_required_properties",
"stix2/test/test_indicator.py::test_indicator_required_property_pattern",
"stix2/test/test_indicator.py::test_indicator_created_ref_invalid_format",
"stix2/test/test_indicator.py::test_indicator_revoked_invalid",
"stix2/test/test_indicator.py::test_invalid_kwarg_to_indicator",
"stix2/test/test_indicator.py::test_created_modified_time_are_identical_by_default",
"stix2/test/test_indicator.py::test_parse_indicator[{\\n",
"stix2/test/test_indicator.py::test_parse_indicator[data1]",
"stix2/test/test_intrusion_set.py::test_parse_intrusion_set[{\\n",
"stix2/test/test_intrusion_set.py::test_parse_intrusion_set[data1]",
"stix2/test/test_malware.py::test_malware_type_must_be_malware",
"stix2/test/test_malware.py::test_malware_id_must_start_with_malware",
"stix2/test/test_malware.py::test_malware_required_properties",
"stix2/test/test_malware.py::test_malware_required_property_name",
"stix2/test/test_malware.py::test_invalid_kwarg_to_malware",
"stix2/test/test_malware.py::test_parse_malware[{\\n",
"stix2/test/test_malware.py::test_parse_malware[data1]",
"stix2/test/test_malware.py::test_parse_malware_invalid_labels",
"stix2/test/test_malware.py::test_parse_malware_kill_chain_phases",
"stix2/test/test_malware.py::test_parse_malware_clean_kill_chain_phases",
"stix2/test/test_markings.py::test_marking_def_example_with_tlp",
"stix2/test/test_markings.py::test_marking_def_example_with_statement",
"stix2/test/test_markings.py::test_marking_def_example_with_positional_statement",
"stix2/test/test_markings.py::test_granular_example",
"stix2/test/test_markings.py::test_granular_example_with_bad_selector",
"stix2/test/test_markings.py::test_parse_marking_definition[{\\n",
"stix2/test/test_markings.py::test_parse_marking_definition[data1]",
"stix2/test/test_observed_data.py::test_observed_data_example_with_bad_refs",
"stix2/test/test_observed_data.py::test_parse_observed_data[{\\n",
"stix2/test/test_observed_data.py::test_parse_observed_data[data1]",
"stix2/test/test_observed_data.py::test_parse_artifact_valid[\"0\":",
"stix2/test/test_observed_data.py::test_parse_artifact_invalid[\"0\":",
"stix2/test/test_observed_data.py::test_artifact_example_dependency_error",
"stix2/test/test_observed_data.py::test_parse_autonomous_system_valid[\"0\":",
"stix2/test/test_observed_data.py::test_parse_email_address[{\\n",
"stix2/test/test_observed_data.py::test_parse_email_message[\\n",
"stix2/test/test_observed_data.py::test_parse_email_message_not_multipart[\\n",
"stix2/test/test_observed_data.py::test_parse_file_archive[\"0\":",
"stix2/test/test_observed_data.py::test_parse_email_message_with_at_least_one_error[\\n",
"stix2/test/test_observed_data.py::test_parse_basic_tcp_traffic[\\n",
"stix2/test/test_observed_data.py::test_parse_basic_tcp_traffic_with_error[\\n",
"stix2/test/test_observed_data.py::test_observed_data_with_process_example",
"stix2/test/test_observed_data.py::test_artifact_example",
"stix2/test/test_observed_data.py::test_artifact_mutual_exclusion_error",
"stix2/test/test_observed_data.py::test_directory_example",
"stix2/test/test_observed_data.py::test_directory_example_ref_error",
"stix2/test/test_observed_data.py::test_domain_name_example",
"stix2/test/test_observed_data.py::test_domain_name_example_invalid_ref_type",
"stix2/test/test_observed_data.py::test_file_example",
"stix2/test/test_observed_data.py::test_file_example_with_NTFSExt",
"stix2/test/test_observed_data.py::test_file_example_with_empty_NTFSExt",
"stix2/test/test_observed_data.py::test_file_example_with_PDFExt",
"stix2/test/test_observed_data.py::test_file_example_with_PDFExt_Object",
"stix2/test/test_observed_data.py::test_file_example_with_RasterImageExt_Object",
"stix2/test/test_observed_data.py::test_file_example_with_WindowsPEBinaryExt",
"stix2/test/test_observed_data.py::test_file_example_encryption_error",
"stix2/test/test_observed_data.py::test_ip4_address_example",
"stix2/test/test_observed_data.py::test_ip4_address_example_cidr",
"stix2/test/test_observed_data.py::test_ip6_address_example",
"stix2/test/test_observed_data.py::test_mac_address_example",
"stix2/test/test_observed_data.py::test_network_traffic_example",
"stix2/test/test_observed_data.py::test_network_traffic_http_request_example",
"stix2/test/test_observed_data.py::test_network_traffic_icmp_example",
"stix2/test/test_observed_data.py::test_network_traffic_socket_example",
"stix2/test/test_observed_data.py::test_network_traffic_tcp_example",
"stix2/test/test_observed_data.py::test_mutex_example",
"stix2/test/test_observed_data.py::test_process_example",
"stix2/test/test_observed_data.py::test_process_example_empty_error",
"stix2/test/test_observed_data.py::test_process_example_empty_with_extensions",
"stix2/test/test_observed_data.py::test_process_example_windows_process_ext",
"stix2/test/test_observed_data.py::test_process_example_windows_process_ext_empty",
"stix2/test/test_observed_data.py::test_process_example_extensions_empty",
"stix2/test/test_observed_data.py::test_process_example_with_WindowsProcessExt_Object",
"stix2/test/test_observed_data.py::test_process_example_with_WindowsServiceExt",
"stix2/test/test_observed_data.py::test_process_example_with_WindowsProcessServiceExt",
"stix2/test/test_observed_data.py::test_software_example",
"stix2/test/test_observed_data.py::test_url_example",
"stix2/test/test_observed_data.py::test_user_account_example",
"stix2/test/test_observed_data.py::test_user_account_unix_account_ext_example",
"stix2/test/test_observed_data.py::test_windows_registry_key_example",
"stix2/test/test_observed_data.py::test_x509_certificate_example",
"stix2/test/test_relationship.py::test_relationship_type_must_be_relationship",
"stix2/test/test_relationship.py::test_relationship_id_must_start_with_relationship",
"stix2/test/test_relationship.py::test_relationship_required_property_relationship_type",
"stix2/test/test_relationship.py::test_relationship_missing_some_required_properties",
"stix2/test/test_relationship.py::test_relationship_required_properties_target_ref",
"stix2/test/test_relationship.py::test_invalid_kwarg_to_relationship",
"stix2/test/test_relationship.py::test_parse_relationship[{\\n",
"stix2/test/test_relationship.py::test_parse_relationship[data1]",
"stix2/test/test_report.py::test_report_example_objects_in_object_refs_with_bad_id",
"stix2/test/test_report.py::test_parse_report[{\\n",
"stix2/test/test_report.py::test_parse_report[data1]",
"stix2/test/test_sighting.py::test_sighting_bad_where_sighted_refs",
"stix2/test/test_sighting.py::test_sighting_type_must_be_sightings",
"stix2/test/test_sighting.py::test_invalid_kwarg_to_sighting",
"stix2/test/test_sighting.py::test_parse_sighting[{\\n",
"stix2/test/test_sighting.py::test_parse_sighting[data1]",
"stix2/test/test_threat_actor.py::test_parse_threat_actor[{\\n",
"stix2/test/test_threat_actor.py::test_parse_threat_actor[data1]",
"stix2/test/test_tool.py::test_parse_tool[{\\n",
"stix2/test/test_tool.py::test_parse_tool[data1]",
"stix2/test/test_utils.py::test_timestamp_formatting[dttm0-2017-01-01T00:00:00Z]",
"stix2/test/test_utils.py::test_timestamp_formatting[dttm1-2016-12-31T23:00:00Z]",
"stix2/test/test_utils.py::test_timestamp_formatting[dttm2-2017-01-01T17:34:56Z]",
"stix2/test/test_utils.py::test_timestamp_formatting[dttm3-2017-07-01T04:00:00Z]",
"stix2/test/test_utils.py::test_timestamp_formatting[dttm4-2017-07-01T00:00:00Z]",
"stix2/test/test_utils.py::test_timestamp_formatting[dttm5-2017-07-01T00:00:00.000001Z]",
"stix2/test/test_utils.py::test_parse_datetime[timestamp0-dttm0]",
"stix2/test/test_utils.py::test_parse_datetime[timestamp1-dttm1]",
"stix2/test/test_utils.py::test_parse_datetime[2017-01-01T00:00:00Z-dttm2]",
"stix2/test/test_utils.py::test_parse_datetime[2017-01-01T02:00:00+2:00-dttm3]",
"stix2/test/test_utils.py::test_parse_datetime[2017-01-01T00:00:00-dttm4]",
"stix2/test/test_utils.py::test_parse_datetime_invalid[foobar]",
"stix2/test/test_utils.py::test_parse_datetime_invalid[1]",
"stix2/test/test_utils.py::test_get_dict[data0]",
"stix2/test/test_utils.py::test_get_dict[{\"a\":",
"stix2/test/test_utils.py::test_get_dict[data2]",
"stix2/test/test_utils.py::test_get_dict[data3]",
"stix2/test/test_utils.py::test_get_dict_invalid[1]",
"stix2/test/test_utils.py::test_get_dict_invalid[data1]",
"stix2/test/test_utils.py::test_get_dict_invalid[data2]",
"stix2/test/test_utils.py::test_get_dict_invalid[foobar]",
"stix2/test/test_vulnerability.py::test_parse_vulnerability[{\\n",
"stix2/test/test_vulnerability.py::test_parse_vulnerability[data1]"
]
| []
| BSD 3-Clause "New" or "Revised" License | 1,434 | [
"stix2/base.py",
"stix2/common.py",
"stix2/bundle.py",
"stix2/properties.py",
"docs/overview.rst",
"stix2/observables.py",
"stix2/utils.py"
]
| [
"stix2/base.py",
"stix2/common.py",
"stix2/bundle.py",
"stix2/properties.py",
"docs/overview.rst",
"stix2/observables.py",
"stix2/utils.py"
]
|
|
jupyter__nbgrader-782 | 1af1162f65ab38a7e4d55f1c0c1daf55ff8f5250 | 2017-07-05 18:50:27 | ed23f4484b084451da5b691df28031f39b2ce9ca | dsblank: Great news! I look forward to trying this out!
lgpage: @jhamrick this really really looks awesome :D
I get an error if I navigate `Gradbook tab -> ps1 -> problem1` without ps1 having been assigned first under `Manage Assignments`. Although this may be a buggy env on my side. This was tried out with only the files created from `nbgrader quickstart`
I think maybe as a future PR a way to view the log files
jhamrick: @lgpage Good catch, thanks!
jhamrick: Thanks @willingc for the review! 😄 Sorry there's so much there to go through... if it helps, the high level changes that I made were to make the webapp source the data for the tables through ajax using a MVC pattern through backbone. I made an earlier PR ( #780 ) which created a high level API that the REST API then accesses for actually querying and modifying the data. | diff --git a/nbgrader/apps/api.py b/nbgrader/apps/api.py
index bdc8a545..1dd939f1 100644
--- a/nbgrader/apps/api.py
+++ b/nbgrader/apps/api.py
@@ -6,13 +6,13 @@ import six
import logging
from traitlets.config import LoggingConfigurable
-from traitlets import Instance, Enum, observe
+from traitlets import Instance, Enum, Unicode, observe
from ..coursedir import CourseDirectory
from ..converters import Assign, Autograde
from ..exchange import ExchangeList, ExchangeRelease, ExchangeCollect
from ..api import MissingEntry, Gradebook
-from ..utils import parse_utc, temp_attrs, capture_log
+from ..utils import parse_utc, temp_attrs, capture_log, as_timezone
class NbGraderAPI(LoggingConfigurable):
@@ -27,6 +27,16 @@ class NbGraderAPI(LoggingConfigurable):
help="Set the log level by value or name."
).tag(config=True)
+ timezone = Unicode(
+ "UTC",
+ help="Timezone for displaying timestamps"
+ ).tag(config=True)
+
+ timestamp_format = Unicode(
+ "%Y-%m-%d %H:%M:%S %Z",
+ help="Format string for displaying timestamps"
+ ).tag(config=True)
+
@observe('log_level')
def _log_level_changed(self, change):
"""Adjust the log level when log_level is set."""
@@ -284,7 +294,16 @@ class NbGraderAPI(LoggingConfigurable):
# see if there is information about the assignment in the database
try:
with self.gradebook as gb:
- assignment = gb.find_assignment(assignment_id).to_dict()
+ db_assignment = gb.find_assignment(assignment_id)
+ assignment = db_assignment.to_dict()
+ if db_assignment.duedate:
+ ts = as_timezone(db_assignment.duedate, self.timezone)
+ assignment["display_duedate"] = ts.strftime(self.timestamp_format)
+ assignment["duedate_notimezone"] = ts.replace(tzinfo=None).isoformat()
+ else:
+ assignment["display_duedate"] = None
+ assignment["duedate_notimezone"] = None
+ assignment["duedate_timezone"] = self.timezone
assignment["average_score"] = gb.average_assignment_score(assignment_id)
assignment["average_code_score"] = gb.average_assignment_code_score(assignment_id)
assignment["average_written_score"] = gb.average_assignment_written_score(assignment_id)
@@ -294,6 +313,9 @@ class NbGraderAPI(LoggingConfigurable):
"id": None,
"name": assignment_id,
"duedate": None,
+ "display_duedate": None,
+ "duedate_notimezone": None,
+ "duedate_timezone": self.timezone,
"average_score": 0,
"average_code_score": 0,
"average_written_score": 0,
@@ -370,7 +392,7 @@ class NbGraderAPI(LoggingConfigurable):
assignment = None
# if the assignment exists in the database
- if assignment:
+ if assignment and assignment.notebooks:
notebooks = []
for notebook in assignment.notebooks:
x = notebook.to_dict()
@@ -435,11 +457,19 @@ class NbGraderAPI(LoggingConfigurable):
ungraded = self.get_submitted_students(assignment_id) - autograded
if student_id in ungraded:
- timestamp = self.get_submitted_timestamp(assignment_id, student_id).isoformat()
+ ts = self.get_submitted_timestamp(assignment_id, student_id)
+ if ts:
+ timestamp = ts.isoformat()
+ display_timestamp = as_timezone(ts, self.timezone).strftime(self.timestamp_format)
+ else:
+ timestamp = None
+ display_timestamp = None
+
submission = {
"id": None,
"name": assignment_id,
"timestamp": timestamp,
+ "display_timestamp": display_timestamp,
"score": 0.0,
"max_score": 0.0,
"code_score": 0.0,
@@ -465,7 +495,10 @@ class NbGraderAPI(LoggingConfigurable):
elif student_id in autograded:
with self.gradebook as gb:
try:
- submission = gb.find_submission(assignment_id, student_id).to_dict()
+ db_submission = gb.find_submission(assignment_id, student_id)
+ submission = db_submission.to_dict()
+ submission["display_timestamp"] = as_timezone(
+ db_submission.timestamp, self.timezone).strftime(self.timestamp_format)
except MissingEntry:
return None
@@ -477,6 +510,7 @@ class NbGraderAPI(LoggingConfigurable):
"id": None,
"name": assignment_id,
"timestamp": None,
+ "display_timestamp": None,
"score": 0.0,
"max_score": 0.0,
"code_score": 0.0,
@@ -523,7 +557,14 @@ class NbGraderAPI(LoggingConfigurable):
for submission in db_submissions:
if submission["student"] in ungraded:
continue
- submission["timestamp"] = submission["timestamp"].isoformat()
+ ts = submission["timestamp"]
+ if ts:
+ submission["timestamp"] = ts.isoformat()
+ submission["display_timestamp"] = as_timezone(
+ ts, self.timezone).strftime(self.timestamp_format)
+ else:
+ submission["timestamp"] = None
+ submission["display_timestamp"] = None
submission["autograded"] = True
submission["submitted"] = True
submissions.append(submission)
@@ -608,8 +649,13 @@ class NbGraderAPI(LoggingConfigurable):
"""
with self.gradebook as gb:
- gb.find_notebook(notebook_id, assignment_id)
+ try:
+ gb.find_notebook(notebook_id, assignment_id)
+ except MissingEntry:
+ return []
+
submissions = gb.notebook_submission_dicts(notebook_id, assignment_id)
+
indices = self.get_notebook_submission_indices(assignment_id, notebook_id)
for nb in submissions:
nb['index'] = indices.get(nb['id'], None)
diff --git a/nbgrader/server_extensions/formgrader/apihandlers.py b/nbgrader/server_extensions/formgrader/apihandlers.py
index 377eb351..e49d2e83 100644
--- a/nbgrader/server_extensions/formgrader/apihandlers.py
+++ b/nbgrader/server_extensions/formgrader/apihandlers.py
@@ -1,13 +1,16 @@
import json
+import os
+import traceback
from tornado import web
-from .base import BaseApiHandler
+from .base import BaseApiHandler, check_xsrf
from ...api import MissingEntry
class GradeCollectionHandler(BaseApiHandler):
@web.authenticated
+ @check_xsrf
def get(self):
submission_id = self.get_argument("submission_id")
try:
@@ -19,6 +22,7 @@ class GradeCollectionHandler(BaseApiHandler):
class CommentCollectionHandler(BaseApiHandler):
@web.authenticated
+ @check_xsrf
def get(self):
submission_id = self.get_argument("submission_id")
try:
@@ -30,6 +34,7 @@ class CommentCollectionHandler(BaseApiHandler):
class GradeHandler(BaseApiHandler):
@web.authenticated
+ @check_xsrf
def get(self, grade_id):
try:
grade = self.gradebook.find_grade_by_id(grade_id)
@@ -38,6 +43,7 @@ class GradeHandler(BaseApiHandler):
self.write(json.dumps(grade.to_dict()))
@web.authenticated
+ @check_xsrf
def put(self, grade_id):
try:
grade = self.gradebook.find_grade_by_id(grade_id)
@@ -57,6 +63,7 @@ class GradeHandler(BaseApiHandler):
class CommentHandler(BaseApiHandler):
@web.authenticated
+ @check_xsrf
def get(self, grade_id):
try:
comment = self.gradebook.find_comment_by_id(grade_id)
@@ -65,6 +72,7 @@ class CommentHandler(BaseApiHandler):
self.write(json.dumps(comment.to_dict()))
@web.authenticated
+ @check_xsrf
def put(self, grade_id):
try:
comment = self.gradebook.find_comment_by_id(grade_id)
@@ -79,6 +87,7 @@ class CommentHandler(BaseApiHandler):
class FlagSubmissionHandler(BaseApiHandler):
@web.authenticated
+ @check_xsrf
def post(self, submission_id):
try:
submission = self.gradebook.find_submission_notebook_by_id(submission_id)
@@ -90,10 +99,180 @@ class FlagSubmissionHandler(BaseApiHandler):
self.write(json.dumps(submission.to_dict()))
+class AssignmentCollectionHandler(BaseApiHandler):
+ @web.authenticated
+ @check_xsrf
+ def get(self):
+ assignments = self.api.get_assignments()
+ self.write(json.dumps(assignments))
+
+
+class AssignmentHandler(BaseApiHandler):
+ @web.authenticated
+ @check_xsrf
+ def get(self, assignment_id):
+ assignment = self.api.get_assignment(assignment_id)
+ if assignment is None:
+ raise web.HTTPError(404)
+ self.write(json.dumps(assignment))
+
+ @web.authenticated
+ @check_xsrf
+ def put(self, assignment_id):
+ data = self.get_json_body()
+ duedate = data.get("duedate_notimezone", None)
+ timezone = data.get("duedate_timezone", None)
+ if duedate and timezone:
+ duedate = duedate + " " + timezone
+ assignment = {"duedate": duedate}
+ self.gradebook.update_or_create_assignment(assignment_id, **assignment)
+ sourcedir = os.path.abspath(self.coursedir.format_path(self.coursedir.source_directory, '.', assignment_id))
+ if not os.path.isdir(sourcedir):
+ os.makedirs(sourcedir)
+ self.write(json.dumps(self.api.get_assignment(assignment_id)))
+
+
+class NotebookCollectionHandler(BaseApiHandler):
+ @web.authenticated
+ @check_xsrf
+ def get(self, assignment_id):
+ notebooks = self.api.get_notebooks(assignment_id)
+ self.write(json.dumps(notebooks))
+
+
+class SubmissionCollectionHandler(BaseApiHandler):
+ @web.authenticated
+ @check_xsrf
+ def get(self, assignment_id):
+ submissions = self.api.get_submissions(assignment_id)
+ self.write(json.dumps(submissions))
+
+
+class SubmissionHandler(BaseApiHandler):
+ @web.authenticated
+ @check_xsrf
+ def get(self, assignment_id, student_id):
+ submission = self.api.get_submission(assignment_id, student_id)
+ if submission is None:
+ raise web.HTTPError(404)
+ self.write(json.dumps(submission))
+
+
+class SubmittedNotebookCollectionHandler(BaseApiHandler):
+ @web.authenticated
+ @check_xsrf
+ def get(self, assignment_id, notebook_id):
+ submissions = self.api.get_notebook_submissions(assignment_id, notebook_id)
+ self.write(json.dumps(submissions))
+
+
+class StudentCollectionHandler(BaseApiHandler):
+ @web.authenticated
+ @check_xsrf
+ def get(self):
+ students = self.api.get_students()
+ self.write(json.dumps(students))
+
+
+class StudentHandler(BaseApiHandler):
+ @web.authenticated
+ @check_xsrf
+ def get(self, student_id):
+ student = self.api.get_student(student_id)
+ if student is None:
+ raise web.HTTPError(404)
+ self.write(json.dumps(student))
+
+ @web.authenticated
+ @check_xsrf
+ def put(self, student_id):
+ data = self.get_json_body()
+ student = {
+ "last_name": data.get("last_name", None),
+ "first_name": data.get("first_name", None),
+ "email": data.get("email", None),
+ }
+ self.gradebook.update_or_create_student(student_id, **student)
+ self.write(json.dumps(self.api.get_student(student_id)))
+
+
+class StudentSubmissionCollectionHandler(BaseApiHandler):
+ @web.authenticated
+ @check_xsrf
+ def get(self, student_id):
+ submissions = self.api.get_student_submissions(student_id)
+ self.write(json.dumps(submissions))
+
+
+class StudentNotebookSubmissionCollectionHandler(BaseApiHandler):
+ @web.authenticated
+ @check_xsrf
+ def get(self, student_id, assignment_id):
+ submissions = self.api.get_student_notebook_submissions(student_id, assignment_id)
+ self.write(json.dumps(submissions))
+
+
+class AssignHandler(BaseApiHandler):
+ @web.authenticated
+ @check_xsrf
+ def post(self, assignment_id):
+ self.write(json.dumps(self.api.assign(assignment_id)))
+
+
+class UnReleaseHandler(BaseApiHandler):
+ @web.authenticated
+ @check_xsrf
+ def post(self, assignment_id):
+ self.write(json.dumps(self.api.unrelease(assignment_id)))
+
+
+class ReleaseHandler(BaseApiHandler):
+ @web.authenticated
+ @check_xsrf
+ def post(self, assignment_id):
+ self.write(json.dumps(self.api.release(assignment_id)))
+
+
+class CollectHandler(BaseApiHandler):
+ @web.authenticated
+ @check_xsrf
+ def post(self, assignment_id):
+ self.write(json.dumps(self.api.collect(assignment_id)))
+
+
+class AutogradeHandler(BaseApiHandler):
+ @web.authenticated
+ @check_xsrf
+ def post(self, assignment_id, student_id):
+ self.write(json.dumps(self.api.autograde(assignment_id, student_id)))
+
+
default_handlers = [
+ (r"/formgrader/api/assignments", AssignmentCollectionHandler),
+ (r"/formgrader/api/assignment/([^/]+)", AssignmentHandler),
+ (r"/formgrader/api/assignment/([^/]+)/assign", AssignHandler),
+ (r"/formgrader/api/assignment/([^/]+)/unrelease", UnReleaseHandler),
+ (r"/formgrader/api/assignment/([^/]+)/release", ReleaseHandler),
+ (r"/formgrader/api/assignment/([^/]+)/collect", CollectHandler),
+
+ (r"/formgrader/api/notebooks/([^/]+)", NotebookCollectionHandler),
+
+ (r"/formgrader/api/submissions/([^/]+)", SubmissionCollectionHandler),
+ (r"/formgrader/api/submission/([^/]+)/([^/]+)", SubmissionHandler),
+ (r"/formgrader/api/submission/([^/]+)/([^/]+)/autograde", AutogradeHandler),
+
+ (r"/formgrader/api/submitted_notebooks/([^/]+)/([^/]+)", SubmittedNotebookCollectionHandler),
+ (r"/formgrader/api/submitted_notebook/([^/]+)/flag", FlagSubmissionHandler),
+
(r"/formgrader/api/grades", GradeCollectionHandler),
- (r"/formgrader/api/comments", CommentCollectionHandler),
(r"/formgrader/api/grade/([^/]+)", GradeHandler),
+
+ (r"/formgrader/api/comments", CommentCollectionHandler),
(r"/formgrader/api/comment/([^/]+)", CommentHandler),
- (r"/formgrader/api/submission/([^/]+)/flag", FlagSubmissionHandler)
+
+ (r"/formgrader/api/students", StudentCollectionHandler),
+ (r"/formgrader/api/student/([^/]+)", StudentHandler),
+
+ (r"/formgrader/api/student_submissions/([^/]+)", StudentSubmissionCollectionHandler),
+ (r"/formgrader/api/student_notebook_submissions/([^/]+)/([^/]+)", StudentNotebookSubmissionCollectionHandler),
]
diff --git a/nbgrader/server_extensions/formgrader/base.py b/nbgrader/server_extensions/formgrader/base.py
index dbb63212..7a8473e5 100644
--- a/nbgrader/server_extensions/formgrader/base.py
+++ b/nbgrader/server_extensions/formgrader/base.py
@@ -1,9 +1,11 @@
import os
import json
+import functools
from tornado import web
from notebook.base.handlers import IPythonHandler
from ...api import Gradebook
+from ...apps.api import NbGraderAPI
class BaseHandler(IPythonHandler):
@@ -16,11 +18,15 @@ class BaseHandler(IPythonHandler):
def db_url(self):
return self.settings['nbgrader_db_url']
+ @property
+ def coursedir(self):
+ return self.settings['nbgrader_coursedir']
+
@property
def gradebook(self):
- self.log.debug("getting gradebook")
gb = self.settings['nbgrader_gradebook']
if gb is None:
+ self.log.debug("creating gradebook")
gb = Gradebook(self.db_url)
self.settings['nbgrader_gradebook'] = gb
return gb
@@ -29,50 +35,16 @@ class BaseHandler(IPythonHandler):
def mathjax_url(self):
return self.settings['mathjax_url']
- @property
- def notebook_dir(self):
- return self.settings['nbgrader_notebook_dir']
-
- @property
- def notebook_dir_format(self):
- return self.settings['nbgrader_notebook_dir_format']
-
- @property
- def nbgrader_step(self):
- return self.settings['nbgrader_step']
-
@property
def exporter(self):
return self.settings['nbgrader_exporter']
@property
- def notebook_url_prefix(self):
- return self.settings['nbgrader_notebook_url_prefix']
-
- def _filter_existing_notebooks(self, assignment_id, notebooks):
- path = os.path.join(
- self.notebook_dir,
- self.notebook_dir_format,
- "{notebook_id}.ipynb"
- )
-
- submissions = list()
- for nb in notebooks:
- filename = path.format(
- nbgrader_step=self.nbgrader_step,
- assignment_id=assignment_id,
- notebook_id=nb.name,
- student_id=nb.student.id
- )
- if os.path.exists(filename):
- submissions.append(nb)
-
- return sorted(submissions, key=lambda x: x.id)
-
- def _notebook_submission_indexes(self, assignment_id, notebook_id):
- notebooks = self.gradebook.notebook_submissions(notebook_id, assignment_id)
- submissions = self._filter_existing_notebooks(assignment_id, notebooks)
- return dict([(x.id, i) for i, x in enumerate(submissions)])
+ def api(self):
+ level = self.log.level
+ api = NbGraderAPI(self.coursedir, parent=self.coursedir.parent)
+ api.log_level = level
+ return api
def render(self, name, **ns):
template = self.settings['nbgrader_jinja2_env'].get_template(name)
@@ -81,19 +53,19 @@ class BaseHandler(IPythonHandler):
def write_error(self, status_code, **kwargs):
if status_code == 500:
html = self.render(
- 'gradebook_500.tpl',
+ 'base_500.tpl',
base_url=self.base_url,
error_code=500)
elif status_code == 502:
html = self.render(
- 'gradebook_500.tpl',
+ 'base_500.tpl',
base_url=self.base_url,
error_code=502)
elif status_code == 403:
html = self.render(
- 'gradebook_403.tpl',
+ 'base_403.tpl',
base_url=self.base_url,
error_code=403)
@@ -118,3 +90,11 @@ class BaseApiHandler(BaseHandler):
self.log.error("Couldn't parse JSON", exc_info=True)
raise web.HTTPError(400, 'Invalid JSON in body of request')
return model
+
+
+def check_xsrf(f):
+ @functools.wraps(f)
+ def wrapper(self, *args, **kwargs):
+ _ = self.xsrf_token
+ return f(self, *args, **kwargs)
+ return wrapper
diff --git a/nbgrader/server_extensions/formgrader/formgrader.py b/nbgrader/server_extensions/formgrader/formgrader.py
index a6d79243..38006146 100644
--- a/nbgrader/server_extensions/formgrader/formgrader.py
+++ b/nbgrader/server_extensions/formgrader/formgrader.py
@@ -33,14 +33,11 @@ class FormgradeExtension(NbGrader):
# Configure the formgrader settings
tornado_settings = dict(
- nbgrader_notebook_dir=self.coursedir.root,
- nbgrader_notebook_dir_format=self.coursedir.directory_structure,
- nbgrader_step=self.coursedir.autograded_directory,
+ nbgrader_coursedir=self.coursedir,
nbgrader_exporter=HTMLExporter(config=self.config),
nbgrader_gradebook=None,
nbgrader_db_url=self.coursedir.db_url,
nbgrader_jinja2_env=jinja_env,
- nbgrader_notebook_url_prefix=os.path.relpath(self.coursedir.root)
)
webapp.settings.update(tornado_settings)
@@ -66,7 +63,7 @@ class FormgradeExtension(NbGrader):
def load_jupyter_server_extension(nbapp):
"""Load the formgrader extension"""
-
+ nbapp.log.info("Loading the formgrader nbgrader serverextension")
webapp = nbapp.web_app
formgrader = FormgradeExtension(parent=nbapp)
formgrader.initialize([])
diff --git a/nbgrader/server_extensions/formgrader/handlers.py b/nbgrader/server_extensions/formgrader/handlers.py
index b28b798f..40957153 100644
--- a/nbgrader/server_extensions/formgrader/handlers.py
+++ b/nbgrader/server_extensions/formgrader/handlers.py
@@ -3,157 +3,61 @@ import re
from tornado import web
-from .base import BaseHandler
+from .base import BaseHandler, check_xsrf
from ...api import MissingEntry
-class AssignmentsHandler(BaseHandler):
+class ManageAssignmentsHandler(BaseHandler):
@web.authenticated
+ @check_xsrf
def get(self):
- assignments = []
- for assignment in self.gradebook.assignments:
- x = assignment.to_dict()
- x["average_score"] = self.gradebook.average_assignment_score(assignment.name)
- x["average_code_score"] = self.gradebook.average_assignment_code_score(assignment.name)
- x["average_written_score"] = self.gradebook.average_assignment_written_score(assignment.name)
- assignments.append(x)
-
html = self.render(
- "assignments.tpl",
- assignments=assignments,
+ "manage_assignments.tpl",
base_url=self.base_url)
-
self.write(html)
-class AssignmentNotebooksHandler(BaseHandler):
+class ManageSubmissionsHandler(BaseHandler):
@web.authenticated
+ @check_xsrf
def get(self, assignment_id):
- try:
- assignment = self.gradebook.find_assignment(assignment_id)
- except MissingEntry:
- raise web.HTTPError(404, "Invalid assignment: {}".format(assignment_id))
-
- notebooks = []
- for notebook in assignment.notebooks:
- x = notebook.to_dict()
- x["average_score"] = self.gradebook.average_notebook_score(notebook.name, assignment.name)
- x["average_code_score"] = self.gradebook.average_notebook_code_score(notebook.name, assignment.name)
- x["average_written_score"] = self.gradebook.average_notebook_written_score(notebook.name, assignment.name)
- notebooks.append(x)
- assignment = assignment.to_dict()
-
html = self.render(
- "assignment_notebooks.tpl",
- assignment=assignment,
- notebooks=notebooks,
- base_url=self.base_url)
-
- self.write(html)
-
-
-class AssignmentNotebookSubmissionsHandler(BaseHandler):
- @web.authenticated
- def get(self, assignment_id, notebook_id):
- try:
- self.gradebook.find_notebook(notebook_id, assignment_id)
- except MissingEntry:
- raise web.HTTPError(404, "Invalid notebook: {}/{}".format(assignment_id, notebook_id))
-
- submissions = self.gradebook.notebook_submission_dicts(notebook_id, assignment_id)
- indexes = self._notebook_submission_indexes(assignment_id, notebook_id)
- for nb in submissions:
- nb['index'] = indexes.get(nb['id'], None)
-
- submissions = [x for x in submissions if x['index'] is not None]
- submissions.sort(key=lambda x: x["id"])
-
- html = self.render(
- "notebook_submissions.tpl",
- notebook_id=notebook_id,
+ "manage_submissions.tpl",
+ course_dir=self.coursedir.root,
assignment_id=assignment_id,
- submissions=submissions,
- base_url=self.base_url
- )
+ base_url=self.base_url)
self.write(html)
-class StudentsHandler(BaseHandler):
+class GradebookAssignmentsHandler(BaseHandler):
@web.authenticated
+ @check_xsrf
def get(self):
- students = self.gradebook.student_dicts()
- students.sort(key=lambda x: x.get("last_name") or "no last name")
-
html = self.render(
- "students.tpl",
- students=students,
+ "gradebook_assignments.tpl",
base_url=self.base_url)
-
self.write(html)
-class StudentAssignmentsHandler(BaseHandler):
+class GradebookNotebooksHandler(BaseHandler):
@web.authenticated
- def get(self, student_id):
- try:
- student = self.gradebook.find_student(student_id)
- except MissingEntry:
- raise web.HTTPError(404, "Invalid student: {}".format(student_id))
-
- submissions = []
- for assignment in self.gradebook.assignments:
- try:
- submission = self.gradebook.find_submission(assignment.name, student.id).to_dict()
- except MissingEntry:
- submission = {
- "id": None,
- "name": assignment.name,
- "student": student.id,
- "duedate": None,
- "timestamp": None,
- "extension": None,
- "total_seconds_late": 0,
- "score": 0,
- "max_score": assignment.max_score,
- "code_score": 0,
- "max_code_score": assignment.max_code_score,
- "written_score": 0,
- "max_written_score": assignment.max_written_score,
- "needs_manual_grade": False
- }
- submissions.append(submission)
-
- submissions.sort(key=lambda x: x.get("duedate") or "no due date")
- student = student.to_dict()
-
+ @check_xsrf
+ def get(self, assignment_id):
html = self.render(
- "student_assignments.tpl",
- assignments=submissions,
- student=student,
+ "gradebook_notebooks.tpl",
+ assignment_id=assignment_id,
base_url=self.base_url)
-
self.write(html)
-class StudentAssignmentNotebooksHandler(BaseHandler):
+class GradebookNotebookSubmissionsHandler(BaseHandler):
@web.authenticated
- def get(self, student_id, assignment_id):
- try:
- assignment = self.gradebook.find_submission(assignment_id, student_id)
- except MissingEntry:
- raise web.HTTPError(404, "Invalid assignment: {} for {}".format(assignment_id, student_id))
-
- notebooks = assignment.notebooks
- submissions = self._filter_existing_notebooks(assignment_id, notebooks)
- submissions = [x.to_dict() for x in assignment.notebooks]
- for i, nb in enumerate(submissions):
- nb['index'] = i
-
+ @check_xsrf
+ def get(self, assignment_id, notebook_id):
html = self.render(
- "student_submissions.tpl",
+ "gradebook_notebook_submissions.tpl",
assignment_id=assignment_id,
- student=assignment.student.to_dict(),
- submissions=submissions,
+ notebook_id=notebook_id,
base_url=self.base_url
)
self.write(html)
@@ -161,6 +65,7 @@ class StudentAssignmentNotebooksHandler(BaseHandler):
class SubmissionHandler(BaseHandler):
@web.authenticated
+ @check_xsrf
def get(self, submission_id):
try:
submission = self.gradebook.find_submission_notebook_by_id(submission_id)
@@ -177,28 +82,24 @@ class SubmissionHandler(BaseHandler):
url += '?' + self.request.query
return self.redirect(url, permanent=True)
- notebook_dir_format = os.path.join(self.notebook_dir_format, "{notebook_id}.ipynb")
- filename = os.path.join(self.notebook_dir, notebook_dir_format.format(
- nbgrader_step=self.nbgrader_step,
- assignment_id=assignment_id,
- notebook_id=notebook_id,
- student_id=student_id)
- )
- relative_path = os.path.relpath(filename, self.notebook_dir)
- indexes = self._notebook_submission_indexes(assignment_id, notebook_id)
- ix = indexes.get(submission.id, -2)
+ filename = os.path.join(os.path.abspath(self.coursedir.format_path(
+ self.coursedir.autograded_directory, student_id, assignment_id)), '{}.ipynb'.format(notebook_id))
+ relative_path = os.path.relpath(filename, self.coursedir.root)
+ indices = self.api.get_notebook_submission_indices(assignment_id, notebook_id)
+ ix = indices.get(submission.id, -2)
+ url_prefix = os.path.relpath(self.coursedir.root)
resources = {
'assignment_id': assignment_id,
'notebook_id': notebook_id,
'submission_id': submission.id,
'index': ix,
- 'total': len(indexes),
+ 'total': len(indices),
'base_url': self.base_url,
'mathjax_url': self.mathjax_url,
'last_name': submission.student.last_name,
'first_name': submission.student.first_name,
- 'notebook_path': self.notebook_url_prefix + '/' + relative_path
+ 'notebook_path': url_prefix + '/' + relative_path
}
if not os.path.exists(filename):
@@ -216,7 +117,7 @@ class SubmissionHandler(BaseHandler):
class SubmissionNavigationHandler(BaseHandler):
def _assignment_notebook_list_url(self, assignment_id, notebook_id):
- return '{}/formgrader/assignments/{}/{}'.format(self.base_url, assignment_id, notebook_id)
+ return '{}/formgrader/gradebook/{}/{}'.format(self.base_url, assignment_id, notebook_id)
def _submission_url(self, submission_id):
url = '{}/formgrader/submissions/{}'.format(self.base_url, submission_id)
@@ -227,12 +128,12 @@ class SubmissionNavigationHandler(BaseHandler):
def _get_submission_ids(self, assignment_id, notebook_id):
notebooks = self.gradebook.notebook_submissions(notebook_id, assignment_id)
- submissions = self._filter_existing_notebooks(assignment_id, notebooks)
+ submissions = self.api._filter_existing_notebooks(assignment_id, notebooks)
return sorted([x.id for x in submissions])
def _get_incorrect_submission_ids(self, assignment_id, notebook_id, submission):
notebooks = self.gradebook.notebook_submissions(notebook_id, assignment_id)
- submissions = self._filter_existing_notebooks(assignment_id, notebooks)
+ submissions = self.api._filter_existing_notebooks(assignment_id, notebooks)
incorrect_ids = set([x.id for x in submissions if x.failed_tests])
incorrect_ids.add(submission.id)
incorrect_ids = sorted(incorrect_ids)
@@ -275,6 +176,7 @@ class SubmissionNavigationHandler(BaseHandler):
return self._submission_url(submission_ids[ix_incorrect - 1])
@web.authenticated
+ @check_xsrf
def get(self, submission_id, action):
try:
submission = self.gradebook.find_submission_notebook_by_id(submission_id)
@@ -290,7 +192,7 @@ class SubmissionNavigationHandler(BaseHandler):
class SubmissionFilesHandler(web.StaticFileHandler, BaseHandler):
def initialize(self, default_filename=None):
super(SubmissionFilesHandler, self).initialize(
- self.notebook_dir, default_filename=default_filename)
+ self.coursedir.root, default_filename=default_filename)
def parse_url_path(self, url_path):
submission_id, path = re.match(r"([^/]+)/(.*)", url_path.lstrip("/")).groups()
@@ -302,19 +204,52 @@ class SubmissionFilesHandler(web.StaticFileHandler, BaseHandler):
except MissingEntry:
raise web.HTTPError(404, "Invalid submission: {}".format(submission_id))
- dirname = os.path.join(self.notebook_dir, self.notebook_dir_format.format(
- nbgrader_step=self.nbgrader_step,
- assignment_id=assignment_id,
- student_id=student_id))
-
+ dirname = os.path.abspath(self.coursedir.format_path(
+ self.coursedir.autograded_directory, student_id, assignment_id))
full_path = os.path.join(dirname, path)
return super(SubmissionFilesHandler, self).parse_url_path(full_path)
@web.authenticated
+ @check_xsrf
def get(self, *args, **kwargs):
return super(SubmissionFilesHandler, self).get(*args, **kwargs)
+class ManageStudentsHandler(BaseHandler):
+ @web.authenticated
+ @check_xsrf
+ def get(self):
+ html = self.render(
+ "manage_students.tpl",
+ base_url=self.base_url)
+ self.write(html)
+
+
+class ManageStudentsAssignmentsHandler(BaseHandler):
+ @web.authenticated
+ @check_xsrf
+ def get(self, student_id):
+ html = self.render(
+ "manage_students_assignments.tpl",
+ student_id=student_id,
+ base_url=self.base_url)
+
+ self.write(html)
+
+
+class ManageStudentNotebookSubmissionsHandler(BaseHandler):
+ @web.authenticated
+ @check_xsrf
+ def get(self, student_id, assignment_id):
+ html = self.render(
+ "manage_students_notebook_submissions.tpl",
+ assignment_id=assignment_id,
+ student_id=student_id,
+ base_url=self.base_url
+ )
+ self.write(html)
+
+
class Template404(BaseHandler):
"""Render our 404 template"""
def prepare(self):
@@ -330,19 +265,22 @@ fonts_path = os.path.join(components_path, 'bootstrap', 'fonts')
_navigation_regex = r"(?P<action>next_incorrect|prev_incorrect|next|prev)"
default_handlers = [
- (r"/formgrader/?", AssignmentsHandler),
- (r"/formgrader/assignments/?", AssignmentsHandler),
- (r"/formgrader/assignments/([^/]+)/?", AssignmentNotebooksHandler),
- (r"/formgrader/assignments/([^/]+)/([^/]+)/?", AssignmentNotebookSubmissionsHandler),
+ (r"/formgrader/?", ManageAssignmentsHandler),
+ (r"/formgrader/manage_assignments/?", ManageAssignmentsHandler),
+ (r"/formgrader/manage_submissions/([^/]+)/?", ManageSubmissionsHandler),
+
+ (r"/formgrader/gradebook/?", GradebookAssignmentsHandler),
+ (r"/formgrader/gradebook/([^/]+)/?", GradebookNotebooksHandler),
+ (r"/formgrader/gradebook/([^/]+)/([^/]+)/?", GradebookNotebookSubmissionsHandler),
- (r"/formgrader/students/?", StudentsHandler),
- (r"/formgrader/students/([^/]+)/?", StudentAssignmentsHandler),
- (r"/formgrader/students/([^/]+)/([^/]+)/?", StudentAssignmentNotebooksHandler),
+ (r"/formgrader/manage_students/?", ManageStudentsHandler),
+ (r"/formgrader/manage_students/([^/]+)/?", ManageStudentsAssignmentsHandler),
+ (r"/formgrader/manage_students/([^/]+)/([^/]+)/?", ManageStudentNotebookSubmissionsHandler),
(r"/formgrader/submissions/components/(.*)", web.StaticFileHandler, {'path': components_path}),
(r"/formgrader/submissions/([^/]+)/?", SubmissionHandler),
(r"/formgrader/submissions/(?P<submission_id>[^/]+)/%s/?" % _navigation_regex, SubmissionNavigationHandler),
(r"/formgrader/submissions/(.*)", SubmissionFilesHandler),
- (r"/formgrader/fonts/(.*)", web.StaticFileHandler, {'path': fonts_path})
+ (r"/formgrader/fonts/(.*)", web.StaticFileHandler, {'path': fonts_path}),
]
diff --git a/nbgrader/server_extensions/formgrader/static/css/nbgrader.css b/nbgrader/server_extensions/formgrader/static/css/nbgrader.css
new file mode 100644
index 00000000..61cc0d77
--- /dev/null
+++ b/nbgrader/server_extensions/formgrader/static/css/nbgrader.css
@@ -0,0 +1,46 @@
+table.inner-table {
+ width: 50%;
+ max-width: 50%;
+ margin-left: 1em;
+}
+
+table.inner-table td {
+ padding: 0.25em;
+}
+
+td.inner-table {
+ border-top: none !important;
+ padding-top: 0 !important;
+}
+
+td.inner-table span.glyphicon {
+ font-size: 0.75em;
+}
+
+td.assignment-id span.glyphicon {
+ font-size: 0.5em;
+}
+
+.name-shown {
+ display: none;
+}
+
+.glyphicon.name-shown, .glyphicon.name-hidden {
+ cursor: pointer;
+ cursor: hand;
+ color: #000;
+}
+
+table.form-table > tbody > tr > td {
+ vertical-align: middle;
+}
+
+div.modal div.panel pre {
+ border: none;
+ font-size: 8pt;
+ max-height: 30em;
+}
+
+div.modal p {
+ margin-bottom: 20px;
+}
diff --git a/nbgrader/server_extensions/formgrader/static/js/backbone_xsrf.js b/nbgrader/server_extensions/formgrader/static/js/backbone_xsrf.js
new file mode 100644
index 00000000..c055ab22
--- /dev/null
+++ b/nbgrader/server_extensions/formgrader/static/js/backbone_xsrf.js
@@ -0,0 +1,20 @@
+// Make sure put and post requests are sent with the xsrf cookie
+function getCookie(name) {
+ var r = document.cookie.match("\\b" + name + "=([^;]*)\\b");
+ return r ? r[1] : undefined;
+}
+
+// Backbone requests
+var oldSync = Backbone.sync;
+Backbone.sync = function(method, model, options) {
+ options.beforeSend = function(xhr) {
+ xhr.setRequestHeader('X-CSRFToken', getCookie("_xsrf"));
+ };
+ return oldSync(method, model, options);
+};
+
+$.ajaxPrefilter(function(options, originalOptions, jqXHR){
+ if (options['type'].toLowerCase() === "post") {
+ jqXHR.setRequestHeader('X-CSRFToken', getCookie("_xsrf"));
+ }
+});
diff --git a/nbgrader/server_extensions/formgrader/static/js/formgrade.js b/nbgrader/server_extensions/formgrader/static/js/formgrade.js
index 7b59a5ec..1211f6cc 100644
--- a/nbgrader/server_extensions/formgrader/static/js/formgrade.js
+++ b/nbgrader/server_extensions/formgrader/static/js/formgrade.js
@@ -298,7 +298,7 @@ FormGrader.prototype.configureScrolling = function () {
FormGrader.prototype.flag = function () {
$.ajax({
'method': 'POST',
- 'url': base_url + '/api/submission/' + submission_id + '/flag',
+ 'url': base_url + '/api/submitted_notebook/' + submission_id + '/flag',
'headers': {'X-CSRFToken': getCookie("_xsrf")},
'success': function (data, status, xhr) {
var elem = $("#statusmessage");
@@ -323,21 +323,6 @@ FormGrader.prototype.flag = function () {
});
};
-// Make sure put and post requests are sent with the xsrf cookie
-function getCookie(name) {
- var r = document.cookie.match("\\b" + name + "=([^;]*)\\b");
- return r ? r[1] : undefined;
-}
-
-// Backbone requests
-var oldSync = Backbone.sync;
-Backbone.sync = function(method, model, options) {
- options.beforeSend = function(xhr) {
- xhr.setRequestHeader('X-CSRFToken', getCookie("_xsrf"));
- };
- return oldSync(method, model, options);
-};
-
var formgrader = new FormGrader(base_url, submission_id);
$(window).load(function () {
formgrader.init()
diff --git a/nbgrader/server_extensions/formgrader/static/js/keyboardmanager.js b/nbgrader/server_extensions/formgrader/static/js/formgrade_keyboardmanager.js
similarity index 100%
rename from nbgrader/server_extensions/formgrader/static/js/keyboardmanager.js
rename to nbgrader/server_extensions/formgrader/static/js/formgrade_keyboardmanager.js
diff --git a/nbgrader/server_extensions/formgrader/static/js/models.js b/nbgrader/server_extensions/formgrader/static/js/formgrade_models.js
similarity index 100%
rename from nbgrader/server_extensions/formgrader/static/js/models.js
rename to nbgrader/server_extensions/formgrader/static/js/formgrade_models.js
diff --git a/nbgrader/server_extensions/formgrader/static/js/gradebook_assignments.js b/nbgrader/server_extensions/formgrader/static/js/gradebook_assignments.js
new file mode 100644
index 00000000..b599fac3
--- /dev/null
+++ b/nbgrader/server_extensions/formgrader/static/js/gradebook_assignments.js
@@ -0,0 +1,101 @@
+var Assignment = Backbone.Model.extend({});
+var Assignments = Backbone.Collection.extend({
+ model: Assignment,
+ url: base_url + "/formgrader/api/assignments"
+});
+
+var AssignmentUI = Backbone.View.extend({
+
+ events: {},
+
+ initialize: function () {
+ this.$name = this.$el.find(".name");
+ this.$duedate = this.$el.find(".duedate");
+ this.$num_submissions = this.$el.find(".num-submissions");
+ this.$score = this.$el.find(".score");
+
+ this.render();
+ },
+
+ clear: function () {
+ this.$name.empty();
+ this.$duedate.empty();
+ this.$num_submissions.empty();
+ this.$score.empty();
+ },
+
+ render: function () {
+ this.clear();
+
+ // assignment name
+ var name = this.model.get("name");
+ this.$name.attr("data-order", name);
+ this.$name.append($("<a/>")
+ .attr("href", base_url + "/formgrader/gradebook/" + name)
+ .text(name));
+
+ // duedate
+ var duedate = this.model.get("duedate");
+ var display_duedate = this.model.get("display_duedate");
+ if (duedate === null) {
+ duedate = "None";
+ display_duedate = "None";
+ }
+ this.$duedate.attr("data-order", duedate);
+ this.$duedate.text(display_duedate);
+
+ // number of submissions
+ var num_submissions = this.model.get("num_submissions");
+ this.$num_submissions.attr("data-order", num_submissions)
+ this.$num_submissions.text(num_submissions);
+
+ // score
+ var score = roundToPrecision(this.model.get("average_score"), 2);
+ var max_score = roundToPrecision(this.model.get("max_score"), 2);
+ if (max_score === 0) {
+ this.$score.attr("data-order", 0.0)
+ } else {
+ this.$score.attr("data-order", score / max_score);
+ }
+ this.$score.text(score + " / " + max_score);
+
+ },
+});
+
+var insertRow = function (table) {
+ var row = $("<tr/>");
+ row.append($("<td/>").addClass("name"));
+ row.append($("<td/>").addClass("text-center duedate"));
+ row.append($("<td/>").addClass("text-center num-submissions"));
+ row.append($("<td/>").addClass("text-center score"));
+ table.append(row)
+ return row;
+};
+
+var loadAssignments = function () {
+ var tbl = $("#main-table");
+
+ models = new Assignments();
+ views = [];
+ models.loaded = false;
+ models.fetch({
+ success: function () {
+ tbl.empty();
+ models.each(function (model) {
+ var view = new AssignmentUI({
+ "model": model,
+ "el": insertRow(tbl)
+ });
+ views.push(view);
+ });
+ insertDataTable(tbl.parent());
+ models.loaded = true;
+ }
+ });
+};
+
+var models = undefined;
+var views = [];
+$(window).load(function () {
+ loadAssignments();
+});
diff --git a/nbgrader/server_extensions/formgrader/static/js/gradebook_notebook_submissions.js b/nbgrader/server_extensions/formgrader/static/js/gradebook_notebook_submissions.js
new file mode 100644
index 00000000..ce4e3adb
--- /dev/null
+++ b/nbgrader/server_extensions/formgrader/static/js/gradebook_notebook_submissions.js
@@ -0,0 +1,176 @@
+var SubmittedNotebook = Backbone.Model.extend({});
+var SubmittedNotebooks = Backbone.Collection.extend({
+ model: SubmittedNotebook,
+ url: base_url + "/formgrader/api/submitted_notebooks/" + assignment_id + "/" + notebook_id
+});
+
+var SubmittedNotebookUI = Backbone.View.extend({
+
+ events: {},
+
+ initialize: function () {
+ this.$reveal = this.$el.find(".reveal");
+ this.$name = this.$el.find(".name");
+ this.$score = this.$el.find(".score");
+ this.$code_score = this.$el.find(".code-score");
+ this.$written_score = this.$el.find(".written-score");
+ this.$needs_manual_grade = this.$el.find(".needs-manual-grade");
+ this.$tests_failed = this.$el.find(".tests-failed");
+ this.$flagged = this.$el.find(".flagged");
+
+ this.render();
+ },
+
+ clear: function () {
+ this.$reveal.empty();
+ this.$name.empty();
+ this.$score.empty();
+ this.$code_score.empty();
+ this.$written_score.empty();
+ this.$needs_manual_grade.empty();
+ this.$tests_failed.empty();
+ this.$flagged.empty();
+ },
+
+ showName: function () {
+ this.$reveal.parent().find(".name-shown").show();
+ this.$reveal.parent().find(".name-hidden").hide();
+ },
+
+ hideName: function () {
+ this.$reveal.parent().find(".name-hidden").show();
+ this.$reveal.parent().find(".name-shown").hide();
+ },
+
+ render: function () {
+ this.clear();
+
+ // show/hide real name
+ this.$reveal.append($("<span/>")
+ .addClass("glyphicon glyphicon-eye-open name-hidden")
+ .attr("aria-hidden", "true")
+ .click(_.bind(this.showName, this)));
+ this.$reveal.append($("<span/>")
+ .addClass("glyphicon glyphicon-eye-close name-shown")
+ .attr("aria-hidden", "true")
+ .click(_.bind(this.hideName, this)));
+
+ // notebook name
+ this.$name.attr("data-order", this.model.get("index"));
+ this.$name.append($("<a/>")
+ .addClass("name-hidden")
+ .attr("href", base_url + "/formgrader/submissions/" + this.model.get("id"))
+ .text("Submission #" + (this.model.get("index") + 1)));
+ this.$name.append($("<a/>")
+ .addClass("name-shown")
+ .attr("href", base_url + "/formgrader/submissions/" + this.model.get("id"))
+ .text(this.model.get("last_name") + ", " + this.model.get("first_name")));
+
+ // score
+ var score = roundToPrecision(this.model.get("score"), 2);
+ var max_score = roundToPrecision(this.model.get("max_score"), 2);
+ if (max_score === 0) {
+ this.$score.attr("data-order", 0.0);
+ } else {
+ this.$score.attr("data-order", score / max_score);
+ }
+ this.$score.text(score + " / " + max_score);
+
+ // code score
+ score = roundToPrecision(this.model.get("code_score"), 2);
+ max_score = roundToPrecision(this.model.get("max_code_score"), 2);
+ if (max_score === 0) {
+ this.$code_score.attr("data-order", 0.0);
+ } else {
+ this.$code_score.attr("data-order", score / max_score);
+ }
+ this.$code_score.text(score + " / " + max_score);
+
+ // written score
+ score = roundToPrecision(this.model.get("written_score"), 2);
+ max_score = roundToPrecision(this.model.get("max_written_score"), 2);
+ if (max_score === 0) {
+ this.$written_score.attr("data-order", 0.0);
+ } else {
+ this.$written_score.attr("data-order", score / max_score);
+ }
+ this.$written_score.text(score + " / " + max_score);
+
+ // needs manual grade?
+ if (this.model.get("needs_manual_grade")) {
+ this.$needs_manual_grade.attr("data-search", "needs manual grade");
+ this.$needs_manual_grade.attr("data-order", 1);
+ this.$needs_manual_grade.append($("<span/>")
+ .addClass("glyphicon glyphicon-ok"));
+ } else {
+ this.$needs_manual_grade.attr("data-search", "");
+ this.$needs_manual_grade.attr("data-order", 0);
+ }
+
+ // tests failed?
+ if (this.model.get("failed_tests")) {
+ this.$tests_failed.attr("data-search", "tests failed");
+ this.$tests_failed.attr("data-order", 1);
+ this.$tests_failed.append($("<span/>")
+ .addClass("glyphicon glyphicon-ok"));
+ } else {
+ this.$tests_failed.attr("data-search", "");
+ this.$tests_failed.attr("data-order", 0);
+ }
+
+ // flagged?
+ if (this.model.get("flagged")) {
+ this.$flagged.attr("data-search", "flagged");
+ this.$flagged.attr("data-order", 1);
+ this.$flagged.append($("<span/>")
+ .addClass("glyphicon glyphicon-flag"));
+ } else {
+ this.$flagged.attr("data-search", "");
+ this.$flagged.attr("data-order", 0);
+ }
+ },
+});
+
+var insertRow = function (table) {
+ var row = $("<tr/>");
+ row.append($("<td/>").addClass("reveal"));
+ row.append($("<td/>").addClass("name"));
+ row.append($("<td/>").addClass("text-center score"));
+ row.append($("<td/>").addClass("text-center code-score"));
+ row.append($("<td/>").addClass("text-center written-score"));
+ row.append($("<td/>").addClass("text-center needs-manual-grade"));
+ row.append($("<td/>").addClass("text-center tests-failed"));
+ row.append($("<td/>").addClass("text-center flagged"));
+ table.append(row)
+ return row;
+};
+
+var loadSubmittedNotebooks = function () {
+ var tbl = $("#main-table");
+
+ models = new SubmittedNotebooks();
+ views = [];
+ models.loaded = false;
+ models.fetch({
+ success: function () {
+ tbl.empty();
+ models.each(function (model) {
+ var view = new SubmittedNotebookUI({
+ "model": model,
+ "el": insertRow(tbl)
+ });
+ views.push(view);
+ });
+ insertDataTable(tbl.parent());
+ $('span.glyphicon.name-hidden').tooltip({title: "Show student name"});
+ $('span.glyphicon.name-shown').tooltip({title: "Hide student name"});
+ models.loaded = true;
+ }
+ });
+};
+
+var models = undefined;
+var views = [];
+$(window).load(function () {
+ loadSubmittedNotebooks();
+});
diff --git a/nbgrader/server_extensions/formgrader/static/js/gradebook_notebooks.js b/nbgrader/server_extensions/formgrader/static/js/gradebook_notebooks.js
new file mode 100644
index 00000000..5fea47e6
--- /dev/null
+++ b/nbgrader/server_extensions/formgrader/static/js/gradebook_notebooks.js
@@ -0,0 +1,119 @@
+var Notebook = Backbone.Model.extend({});
+var Notebooks = Backbone.Collection.extend({
+ model: Notebook,
+ url: base_url + "/formgrader/api/notebooks/" + assignment_id
+});
+
+var NotebookUI = Backbone.View.extend({
+
+ events: {},
+
+ initialize: function () {
+ this.$name = this.$el.find(".name");
+ this.$avg_score = this.$el.find(".avg-score");
+ this.$avg_code_score = this.$el.find(".avg-code-score");
+ this.$avg_written_score = this.$el.find(".avg-written-score");
+ this.$needs_manual_grade = this.$el.find(".needs-manual-grade");
+
+ this.render();
+ },
+
+ clear: function () {
+ this.$name.empty();
+ this.$avg_score.empty();
+ this.$avg_code_score.empty();
+ this.$avg_written_score.empty();
+ this.$needs_manual_grade.empty();
+ },
+
+ render: function () {
+ this.clear();
+
+ // notebook name
+ var name = this.model.get("name");
+ this.$name.attr("data-order", name);
+ this.$name.append($("<a/>")
+ .attr("href", base_url + "/formgrader/gradebook/" + assignment_id + "/" + name)
+ .text(name));
+
+ // average score
+ var score = roundToPrecision(this.model.get("average_score"), 2);
+ var max_score = roundToPrecision(this.model.get("max_score"), 2);
+ if (max_score === 0) {
+ this.$avg_score.attr("data-order", 0.0);
+ } else {
+ this.$avg_score.attr("data-order", score / max_score);
+ }
+ this.$avg_score.text(score + " / " + max_score);
+
+ // average code score
+ score = roundToPrecision(this.model.get("average_code_score"), 2);
+ max_score = roundToPrecision(this.model.get("max_code_score"), 2);
+ if (max_score === 0) {
+ this.$avg_code_score.attr("data-order", 0.0);
+ } else {
+ this.$avg_code_score.attr("data-order", score / max_score);
+ }
+ this.$avg_code_score.text(score + " / " + max_score);
+
+ // average written score
+ score = roundToPrecision(this.model.get("average_written_score"), 2);
+ max_score = roundToPrecision(this.model.get("max_written_score"), 2);
+ if (max_score === 0) {
+ this.$avg_written_score.attr("data-order", 0.0);
+ } else {
+ this.$avg_written_score.attr("data-order", score / max_score);
+ }
+ this.$avg_written_score.text(score + " / " + max_score);
+
+ // needs manual grade
+ if (this.model.get("needs_manual_grade")) {
+ this.$needs_manual_grade.attr("data-search", "needs manual grade");
+ this.$needs_manual_grade.attr("data-order", 1);
+ this.$needs_manual_grade.append($("<span/>")
+ .addClass("glyphicon glyphicon-ok"));
+ } else {
+ this.$needs_manual_grade.attr("data-search", "");
+ this.$needs_manual_grade.attr("data-order", 0);
+ }
+ },
+});
+
+var insertRow = function (table) {
+ var row = $("<tr/>");
+ row.append($("<td/>").addClass("name"));
+ row.append($("<td/>").addClass("text-center avg-score"));
+ row.append($("<td/>").addClass("text-center avg-code-score"));
+ row.append($("<td/>").addClass("text-center avg-written-score"));
+ row.append($("<td/>").addClass("text-center needs-manual-grade"));
+ table.append(row)
+ return row;
+};
+
+var loadNotebooks = function () {
+ var tbl = $("#main-table");
+
+ models = new Notebooks();
+ views = [];
+ models.loaded = false;
+ models.fetch({
+ success: function () {
+ tbl.empty();
+ models.each(function (model) {
+ var view = new NotebookUI({
+ "model": model,
+ "el": insertRow(tbl)
+ });
+ views.push(view);
+ });
+ insertDataTable(tbl.parent());
+ models.loaded = true;
+ }
+ });
+};
+
+var models = undefined;
+var views = [];
+$(window).load(function () {
+ loadNotebooks();
+});
diff --git a/nbgrader/server_extensions/formgrader/static/js/manage_assignments.js b/nbgrader/server_extensions/formgrader/static/js/manage_assignments.js
new file mode 100644
index 00000000..820386ab
--- /dev/null
+++ b/nbgrader/server_extensions/formgrader/static/js/manage_assignments.js
@@ -0,0 +1,477 @@
+var Assignment = Backbone.Model.extend({
+ idAttribute: 'name',
+ urlRoot: base_url + "/formgrader/api/assignment"
+});
+
+var Assignments = Backbone.Collection.extend({
+ model: Assignment,
+ url: base_url + "/formgrader/api/assignments"
+});
+
+var AssignmentUI = Backbone.View.extend({
+
+ events: {},
+
+ initialize: function () {
+ this.$modal = undefined;
+ this.$modal_duedate = undefined;
+ this.$modal_timezone = undefined;
+ this.$modal_save = undefined;
+
+ this.$name = this.$el.find(".name");
+ this.$duedate = this.$el.find(".duedate");
+ this.$status = this.$el.find(".status");
+ this.$edit = this.$el.find(".edit");
+ this.$assign = this.$el.find(".assign");
+ this.$preview = this.$el.find(".preview");
+ this.$release = this.$el.find(".release");
+ this.$collect = this.$el.find(".collect");
+ this.$num_submissions = this.$el.find(".num-submissions");
+
+ this.listenTo(this.model, "change", this.render);
+ this.listenTo(this.model, "request", this.animateSaving);
+ this.listenTo(this.model, "sync", this.closeModal);
+
+ this.render();
+ },
+
+ openModal: function () {
+ var body = $("<table/>").addClass("table table-striped form-table");
+ var name = $("<tr/>");
+ body.append(name);
+ name.append($("<td/>").addClass("align-middle").text("Name"));
+ name.append($("<td/>").append($("<input/>")
+ .addClass("modal-name")
+ .attr("type", "text")
+ .attr("disabled", "disabled")));
+
+ var duedate = $("<tr/>");
+ body.append(duedate);
+ duedate.append($("<td/>").addClass("align-middle").text("Due date (optional)"));
+ duedate.append($("<td/>").append($("<input/>").addClass("modal-duedate").attr("type", "datetime-local")));
+
+ var timezone = $("<tr/>");
+ body.append(timezone);
+ timezone.append($("<td/>").addClass("align-middle").text("Timezone (optional)"));
+ timezone.append($("<td/>").append($("<input/>").addClass("modal-timezone").attr("type", "text")));
+
+ var footer = $("<div/>");
+ footer.append($("<button/>")
+ .addClass("btn btn-primary save")
+ .attr("type", "button")
+ .text("Save"));
+ footer.append($("<button/>")
+ .addClass("btn btn-danger")
+ .attr("type", "button")
+ .attr("data-dismiss", "modal")
+ .text("Cancel"));
+
+ this.$modal = createModal("edit-assignment-modal", "Editing " + this.model.get("name"), body, footer);
+ this.$modal.find("input.modal-name").val(this.model.get("name"));
+ this.$modal_duedate = this.$modal.find("input.modal-duedate");
+ this.$modal_duedate.val(this.model.get("duedate_notimezone"));
+ this.$modal_timezone = this.$modal.find("input.modal-timezone");
+ this.$modal_timezone.val(this.model.get("duedate_timezone"));
+ this.$modal_save = this.$modal.find("button.save");
+ this.$modal_save.click(_.bind(this.save, this));
+ },
+
+ clear: function () {
+ this.$name.empty();
+ this.$duedate.empty();
+ this.$status.empty();
+ this.$edit.empty();
+ this.$assign.empty();
+ this.$preview.empty();
+ this.$release.empty();
+ this.$collect.empty();
+ this.$num_submissions.empty();
+ },
+
+ render: function () {
+ this.clear();
+
+ // assignment name
+ var name = this.model.get("name")
+ this.$name.attr("data-order", name);
+ this.$name.append($("<a/>")
+ .attr("target", "_blank")
+ .attr("href", base_url + "/tree/" + this.model.get("source_path"))
+ .text(name));
+
+ // duedate
+ var duedate = this.model.get("duedate");
+ var display_duedate = this.model.get("display_duedate");
+ if (duedate === null) {
+ duedate = "None";
+ display_duedate = "None";
+ }
+ this.$duedate.attr("data-order", duedate);
+ this.$duedate.text(display_duedate);
+
+ // status
+ var status = this.model.get("status");
+ if (status === "draft") {
+ this.$status.attr("data-order", "draft");
+ this.$status.append($("<span/>").addClass("label label-info").text("draft"));
+ } else if (status === "released") {
+ this.$status.attr("data-order", "released");
+ this.$status.append($("<span/>").addClass("label label-success").text("released"));
+ }
+
+ // edit metadata
+ this.$edit.append($("<a/>")
+ .attr("href", "#")
+ .click(_.bind(this.openModal, this))
+ .append($("<span/>")
+ .addClass("glyphicon glyphicon-pencil")
+ .attr("aria-hidden", "true")));
+
+ // generate student version
+ this.$assign.append($("<a/>")
+ .attr("href", "#")
+ .click(_.bind(this.assign, this))
+ .append($("<span/>")
+ .addClass("glyphicon glyphicon-education")
+ .attr("aria-hidden", "true")));
+
+ // preview student version
+ var release_path = this.model.get("release_path");
+ if (release_path) {
+ this.$preview.append($("<a/>")
+ .attr("target", "_blank")
+ .attr("href", base_url + "/tree/" + release_path)
+ .append($("<span/>")
+ .addClass("glyphicon glyphicon-search")
+ .attr("aria-hidden", "true")));
+ }
+
+ // release
+ var releaseable = this.model.get("releaseable");
+ if (release_path && releaseable) {
+ if (status === "draft") {
+ this.$release.append($("<a/>")
+ .attr("href", "#")
+ .click(_.bind(this.release, this))
+ .append($("<span/>")
+ .addClass("glyphicon glyphicon-cloud-upload")
+ .attr("aria-hidden", "true")));
+ } else {
+ this.$release.append($("<a/>")
+ .attr("href", "#")
+ .click(_.bind(this.unrelease, this))
+ .append($("<span/>")
+ .addClass("glyphicon glyphicon-remove")
+ .attr("aria-hidden", "true")));
+ }
+ }
+
+ // collect
+ if (release_path && releaseable) {
+ if (status === "released") {
+ this.$collect.append($("<a/>")
+ .attr("href", "#")
+ .click(_.bind(this.collect, this))
+ .append($("<span/>")
+ .addClass("glyphicon glyphicon-cloud-download")
+ .attr("aria-hidden", "true")));
+ }
+ }
+
+ // number of submissions
+ var num_submissions = this.model.get("num_submissions");
+ this.$num_submissions.attr("data-order", num_submissions);
+ if (num_submissions === 0) {
+ this.$num_submissions.text(0);
+ } else {
+ this.$num_submissions.append($("<a/>")
+ .attr("href", base_url + "/formgrader/manage_submissions/" + this.model.get("name"))
+ .text(num_submissions));
+ }
+ },
+
+ assign: function () {
+ this.clear();
+ this.$name.text("Please wait...");
+ $.post(base_url + "/formgrader/api/assignment/" + this.model.get("name") + "/assign")
+ .done(_.bind(this.assign_success, this))
+ .fail(_.bind(this.assign_failure, this));
+ },
+
+ assign_success: function (response) {
+ this.model.fetch();
+ response = JSON.parse(response);
+ if (response["success"]) {
+ createLogModal(
+ "success-modal",
+ "Success",
+ "Successfully created the student version of '" + this.model.get("name") + "':",
+ response["log"]);
+
+ } else {
+ createLogModal(
+ "error-modal",
+ "Error",
+ "There was an error creating the student version of '" + this.model.get("name") + "':",
+ response["log"],
+ response["error"]);
+ }
+ },
+
+ assign_failure: function (response) {
+ this.model.fetch();
+ createModal(
+ "error-modal",
+ "Error",
+ "There was an error creating the student version of '" + this.model.get("name") + "'.");
+ },
+
+ unrelease: function () {
+ this.clear();
+ this.$name.text("Please wait...");
+ $.post(base_url + "/formgrader/api/assignment/" + this.model.get("name") + "/unrelease")
+ .done(_.bind(this.unrelease_success, this))
+ .fail(_.bind(this.unrelase_failure, this));
+ },
+
+ unrelease_success: function (response) {
+ this.model.fetch();
+ response = JSON.parse(response);
+ if (response["success"]) {
+ createLogModal(
+ "success-modal",
+ "Success",
+ "Successfully unreleased '" + this.model.get("name") + "'.",
+ response["log"]);
+
+ } else {
+ createLogModal(
+ "error-modal",
+ "Error",
+ "There was an error unreleasing '" + this.model.get("name") + "':",
+ response["log"],
+ response["error"]);
+ }
+ },
+
+ unrelease_failure: function () {
+ this.model.fetch();
+ createModal(
+ "error-modal",
+ "Error",
+ "There was an error unreleasing '" + this.model.get("name") + "'.");
+ },
+
+ release: function () {
+ this.clear();
+ this.$name.text("Please wait...");
+ $.post(base_url + "/formgrader/api/assignment/" + this.model.get("name") + "/release")
+ .done(_.bind(this.release_success, this))
+ .fail(_.bind(this.release_failure, this));
+ },
+
+ release_success: function (response) {
+ this.model.fetch();
+ response = JSON.parse(response);
+ if (response["success"]) {
+ createLogModal(
+ "success-modal",
+ "Success",
+ "Successfully released '" + this.model.get("name") + "'.",
+ response["log"]);
+
+ } else {
+ createLogModal(
+ "error-modal",
+ "Error",
+ "There was an error releasing '" + this.model.get("name") + "':",
+ response["log"],
+ response["error"]);
+ }
+ },
+
+ release_failure: function () {
+ this.model.fetch();
+ createModal(
+ "error-modal",
+ "Error",
+ "There was an error releasing '" + this.model.get("name") + "'.");
+ },
+
+ collect: function () {
+ this.clear();
+ this.$name.text("Please wait...");
+ $.post(base_url + "/formgrader/api/assignment/" + this.model.get("name") + "/collect")
+ .done(_.bind(this.collect_success, this))
+ .fail(_.bind(this.collect_failure, this));
+ },
+
+ collect_success: function (response) {
+ this.model.fetch();
+ response = JSON.parse(response);
+ if (response["success"]) {
+ createLogModal(
+ "success-modal",
+ "Success",
+ "Successfully collected submissions of '" + this.model.get("name") + "'.",
+ response["log"]);
+
+ } else {
+ createLogModal(
+ "error-modal",
+ "Error",
+ "There was an error collecting '" + this.model.get("name") + "':",
+ response["log"],
+ response["error"]);
+ }
+ },
+
+ collect_failure: function () {
+ this.model.fetch();
+ createModal(
+ "error-modal",
+ "Error",
+ "There was an error collecting submissions of '" + this.model.get("name") + "'.");
+ },
+
+ save: function () {
+ var duedate = this.$modal_duedate.val();
+ var timezone = this.$modal_timezone.val();
+ if (duedate === "") {
+ duedate = null;
+ timezone = null;
+ }
+ this.model.save({"duedate_notimezone": duedate, "duedate_timezone": timezone});
+ },
+
+ animateSaving: function () {
+ if (this.$modal_save) {
+ this.$modal_save.text("Saving...");
+ }
+ },
+
+ closeModal: function () {
+ if (this.$modal) {
+ this.$modal.modal('hide')
+ this.$modal = undefined;
+ this.$modal_duedate = undefined;
+ this.$modal_timezone = undefined;
+ this.$modal_save = undefined;
+ }
+
+ this.render();
+ },
+});
+
+var insertRow = function (table) {
+ var row = $("<tr/>");
+ row.append($("<td/>").addClass("name"));
+ row.append($("<td/>").addClass("text-center duedate"));
+ row.append($("<td/>").addClass("text-center status"));
+ row.append($("<td/>").addClass("text-center edit"));
+ row.append($("<td/>").addClass("text-center assign"));
+ row.append($("<td/>").addClass("text-center preview"));
+ row.append($("<td/>").addClass("text-center release"));
+ row.append($("<td/>").addClass("text-center collect"));
+ row.append($("<td/>").addClass("text-center num-submissions"));
+ table.append(row)
+ return row;
+};
+
+var createAssignmentModal = function () {
+ var modal;
+ var createAssignment = function () {
+ var name = modal.find(".name").val();
+ var duedate = modal.find(".duedate").val();
+ var timezone = modal.find(".timezone").val();
+ if (duedate === "") {
+ duedate = null;
+ timezone = null;
+ }
+ if (timezone == "") {
+ timezone = null;
+ }
+ if (name === "") {
+ modal.modal('hide');
+ return;
+ }
+
+ var model = new Assignment({
+ "name": name,
+ "duedate_notimezone": duedate,
+ "duedate_timezone": timezone,
+ }, {
+ "collection": models
+ });
+
+ var tbl = $("#main-table");
+ var row = insertRow(tbl);
+ var view = new AssignmentUI({
+ "model": model,
+ "el": row
+ });
+ views.push(view);
+ model.save();
+ tbl.parent().DataTable().row.add(row).draw();
+
+ modal.modal('hide');
+ };
+
+ var body = $("<table/>").addClass("table table-striped form-table");
+ var name = $("<tr/>");
+ body.append(name);
+ name.append($("<td/>").addClass("align-middle").text("Name"));
+ name.append($("<td/>").append($("<input/>").addClass("name").attr("type", "text").attr("size", "31")));
+
+ var duedate = $("<tr/>");
+ body.append(duedate);
+ duedate.append($("<td/>").addClass("align-middle").text("Due date (optional)"));
+ duedate.append($("<td/>").append($("<input/>").addClass("duedate").attr("type", "datetime-local")));
+
+ var timezone = $("<tr/>");
+ body.append(timezone);
+ timezone.append($("<td/>").addClass("align-middle").text("Timezone (optional)"));
+ timezone.append($("<td/>").append($("<input/>").addClass("timezone").attr("type", "text")));
+
+ var footer = $("<div/>");
+ footer.append($("<button/>")
+ .addClass("btn btn-primary save")
+ .attr("type", "button")
+ .click(createAssignment)
+ .text("Save"));
+ footer.append($("<button/>")
+ .addClass("btn btn-danger")
+ .attr("type", "button")
+ .attr("data-dismiss", "modal")
+ .text("Cancel"));
+
+ modal = createModal("add-assignment-modal", "Add New Assignment", body, footer);
+};
+
+var loadAssignments = function () {
+ var tbl = $("#main-table");
+
+ models = new Assignments();
+ views = [];
+ models.loaded = false;
+ models.fetch({
+ success: function () {
+ tbl.empty();
+ models.each(function (model) {
+ var view = new AssignmentUI({
+ "model": model,
+ "el": insertRow(tbl)
+ });
+ views.push(view);
+ });
+ insertDataTable(tbl.parent());
+ models.loaded = true;
+ }
+ });
+};
+
+var models = undefined;
+var views = [];
+$(window).load(function () {
+ loadAssignments();
+});
diff --git a/nbgrader/server_extensions/formgrader/static/js/manage_students.js b/nbgrader/server_extensions/formgrader/static/js/manage_students.js
new file mode 100644
index 00000000..bb929deb
--- /dev/null
+++ b/nbgrader/server_extensions/formgrader/static/js/manage_students.js
@@ -0,0 +1,284 @@
+var Student = Backbone.Model.extend({
+ urlRoot: base_url + "/formgrader/api/student"
+});
+
+var Students = Backbone.Collection.extend({
+ model: Student,
+ url: base_url + "/formgrader/api/students"
+});
+
+var StudentUI = Backbone.View.extend({
+
+ events: {},
+
+ initialize: function () {
+ this.$modal = undefined;
+ this.$modal_first_name = undefined;
+ this.$modal_last_name = undefined;
+ this.$modal_email = undefined;
+ this.$modal_save = undefined;
+
+ this.$name = this.$el.find(".name");
+ this.$id = this.$el.find(".id");
+ this.$email = this.$el.find(".email");
+ this.$score = this.$el.find(".score");
+ this.$edit = this.$el.find(".edit");
+
+ this.listenTo(this.model, "change", this.render);
+ this.listenTo(this.model, "request", this.animateSaving);
+ this.listenTo(this.model, "sync", this.closeModal);
+
+ this.render();
+ },
+
+ openModal: function () {
+ var body = $("<table/>").addClass("table table-striped form-table");
+ var id = $("<tr/>");
+ body.append(id);
+ id.append($("<td/>").addClass("align-middle").text("Student ID"));
+ id.append($("<td/>").append($("<input/>")
+ .addClass("modal-id")
+ .attr("type", "text")
+ .attr("disabled", "disabled")));
+
+ var first_name = $("<tr/>");
+ body.append(first_name);
+ first_name.append($("<td/>").addClass("align-middle").text("First name (optional)"));
+ first_name.append($("<td/>").append($("<input/>").addClass("modal-first-name").attr("type", "text")));
+
+ var last_name = $("<tr/>");
+ body.append(last_name);
+ last_name.append($("<td/>").addClass("align-middle").text("Last name (optional)"));
+ last_name.append($("<td/>").append($("<input/>").addClass("modal-last-name").attr("type", "text")));
+
+ var email = $("<tr/>");
+ body.append(email);
+ email.append($("<td/>").addClass("align-middle").text("Email (optional)"));
+ email.append($("<td/>").append($("<input/>").addClass("modal-email").attr("type", "text")));
+
+ var footer = $("<div/>");
+ footer.append($("<button/>")
+ .addClass("btn btn-primary save")
+ .attr("type", "button")
+ .text("Save"));
+ footer.append($("<button/>")
+ .addClass("btn btn-danger")
+ .attr("type", "button")
+ .attr("data-dismiss", "modal")
+ .text("Cancel"));
+
+ this.$modal = createModal("edit-student-modal", "Editing " + this.model.get("id"), body, footer);
+ this.$modal.find("input.modal-id").val(this.model.get("id"));
+ this.$modal_first_name = this.$modal.find("input.modal-first-name");
+ this.$modal_first_name.val(this.model.get("first_name"));
+ this.$modal_last_name = this.$modal.find("input.modal-last-name");
+ this.$modal_last_name.val(this.model.get("last_name"));
+ this.$modal_email = this.$modal.find("input.modal-email");
+ this.$modal_email.val(this.model.get("email"));
+ this.$modal_save = this.$modal.find("button.save");
+ this.$modal_save.click(_.bind(this.save, this));
+ },
+
+ clear: function () {
+ this.$name.empty();
+ this.$id.empty();
+ this.$email.empty();
+ this.$score.empty();
+ this.$edit.empty();
+ },
+
+ render: function () {
+ this.clear();
+
+ // student name
+ var last_name = this.model.get("last_name");
+ if (last_name === null) last_name = "None";
+ var first_name = this.model.get("first_name");
+ if (first_name === null) first_name = "None";
+ var name = last_name + ", " + first_name;
+ this.$name.attr("data-order", name);
+ this.$name.append($("<a/>")
+ .attr("href", base_url + "/formgrader/manage_students/" + this.model.get("id"))
+ .text(name));
+
+ // id
+ var id = this.model.get("id");
+ this.$id.attr("data-order", id);
+ this.$id.text(id);
+
+ // email
+ var email = this.model.get("email");
+ if (email === null) email = "None";
+ this.$email.attr("data-order", email);
+ this.$email.text(email);
+
+ // score
+ var score = roundToPrecision(this.model.get("score"), 2);
+ var max_score = roundToPrecision(this.model.get("max_score"), 2);
+ if (max_score === 0) {
+ this.$score.attr("data-order", 0.0);
+ } else {
+ this.$score.attr("data-order", score / max_score);
+ }
+ this.$score.text(score + " / " + max_score);
+
+ // edit metadata
+ this.$edit.append($("<a/>")
+ .attr("href", "#")
+ .click(_.bind(this.openModal, this))
+ .append($("<span/>")
+ .addClass("glyphicon glyphicon-pencil")
+ .attr("aria-hidden", "true")));
+ },
+
+ save: function () {
+ var first_name = this.$modal_first_name.val();
+ if (first_name === "") first_name = null;
+
+ var last_name = this.$modal_last_name.val();
+ if (last_name === "") last_name = null;
+
+ var email = this.$modal_email.val();
+ if (email === "") email = null;
+
+ this.model.save({
+ "first_name": first_name,
+ "last_name": last_name,
+ "email": email
+ });
+ },
+
+ animateSaving: function () {
+ if (this.$modal_save) {
+ this.$modal_save.text("Saving...");
+ }
+ },
+
+ closeModal: function () {
+ if (this.$modal) {
+ this.$modal.modal('hide')
+ this.$modal = undefined;
+ this.$modal_first_name = undefined;
+ this.$modal_last_name = undefined;
+ this.$modal_email = undefined;
+ this.$modal_save = undefined;
+ }
+
+ this.render();
+ },
+});
+
+var insertRow = function (table) {
+ var row = $("<tr/>");
+ row.append($("<td/>").addClass("name"));
+ row.append($("<td/>").addClass("text-center id"));
+ row.append($("<td/>").addClass("text-center email"));
+ row.append($("<td/>").addClass("text-center score"));
+ row.append($("<td/>").addClass("text-center edit"));
+ table.append(row)
+ return row;
+};
+
+var createStudentModal = function () {
+ var modal;
+ var createStudent = function () {
+ var id = modal.find(".id").val();
+ if (id === "") {
+ modal.modal('hide');
+ return;
+ }
+
+ var first_name = modal.find(".first-name").val();
+ if (first_name === "") first_name = null;
+
+ var last_name = modal.find(".last-name").val();
+ if (last_name === "") last_name = null;
+
+ var email = modal.find(".email").val();
+ if (email === "") email = null;
+
+ var model = new Student({
+ "id": id,
+ "first_name": first_name,
+ "last_name": last_name,
+ "email": email
+ }, {
+ "collection": models
+ });
+
+ var tbl = $("#main-table");
+ var row = insertRow(tbl);
+ var view = new StudentUI({
+ "model": model,
+ "el": row
+ });
+ views.push(view);
+ model.save();
+ tbl.parent().DataTable().row.add(row).draw();
+
+ modal.modal('hide');
+ };
+
+ var body = $("<table/>").addClass("table table-striped form-table");
+ var id = $("<tr/>");
+ body.append(id);
+ id.append($("<td/>").addClass("align-middle").text("Student ID"));
+ id.append($("<td/>").append($("<input/>").addClass("id").attr("type", "text")));
+
+ var first_name = $("<tr/>");
+ body.append(first_name);
+ first_name.append($("<td/>").addClass("align-middle").text("First name (optional)"));
+ first_name.append($("<td/>").append($("<input/>").addClass("first-name").attr("type", "text")));
+
+ var last_name = $("<tr/>");
+ body.append(last_name);
+ last_name.append($("<td/>").addClass("align-middle").text("Last name (optional)"));
+ last_name.append($("<td/>").append($("<input/>").addClass("last-name").attr("type", "text")));
+
+ var email = $("<tr/>");
+ body.append(email);
+ email.append($("<td/>").addClass("align-middle").text("Email (optional)"));
+ email.append($("<td/>").append($("<input/>").addClass("email").attr("type", "text")));
+
+ var footer = $("<div/>");
+ footer.append($("<button/>")
+ .addClass("btn btn-primary save")
+ .attr("type", "button")
+ .click(createStudent)
+ .text("Save"));
+ footer.append($("<button/>")
+ .addClass("btn btn-danger")
+ .attr("type", "button")
+ .attr("data-dismiss", "modal")
+ .text("Cancel"));
+
+ modal = createModal("add-student-modal", "Add New Student", body, footer);
+};
+
+var loadStudents = function () {
+ var tbl = $("#main-table");
+
+ models = new Students();
+ views = [];
+ models.loaded = false;
+ models.fetch({
+ success: function () {
+ tbl.empty();
+ models.each(function (model) {
+ var view = new StudentUI({
+ "model": model,
+ "el": insertRow(tbl)
+ });
+ views.push(view);
+ });
+ insertDataTable(tbl.parent());
+ models.loaded = true;
+ }
+ });
+};
+
+var models = undefined;
+var views = [];
+$(window).load(function () {
+ loadStudents();
+});
diff --git a/nbgrader/server_extensions/formgrader/static/js/manage_students_assignments.js b/nbgrader/server_extensions/formgrader/static/js/manage_students_assignments.js
new file mode 100644
index 00000000..00d0c798
--- /dev/null
+++ b/nbgrader/server_extensions/formgrader/static/js/manage_students_assignments.js
@@ -0,0 +1,125 @@
+var StudentSubmission = Backbone.Model.extend({});
+var StudentSubmissions = Backbone.Collection.extend({
+ model: StudentSubmission,
+ url: base_url + "/formgrader/api/student_submissions/" + student_id
+});
+
+var StudentSubmissionUI = Backbone.View.extend({
+
+ events: {},
+
+ initialize: function () {
+ this.$name = this.$el.find(".name");
+ this.$score = this.$el.find(".score");
+ this.$code_score = this.$el.find(".code-score");
+ this.$written_score = this.$el.find(".written-score");
+ this.$needs_manual_grade = this.$el.find(".needs-manual-grade");
+
+ this.render();
+ },
+
+ clear: function () {
+ this.$name.empty();
+ this.$score.empty();
+ this.$code_score.empty();
+ this.$written_score.empty();
+ this.$needs_manual_grade.empty();
+ },
+
+ render: function () {
+ this.clear();
+
+ // name
+ var name = this.model.get("name");
+ this.$name.attr("data-order", name);
+ if (!this.model.get("submitted")) {
+ this.$name.text(name + " (no submission)");
+ } else if (!this.model.get("autograded")) {
+ this.$name.text(name + " (not autograded)");
+ } else {
+ this.$name.append($("<a/>")
+ .attr("href", base_url + "/formgrader/manage_students/" + student_id + "/" + name)
+ .text(name));
+ }
+
+ // score
+ var score = roundToPrecision(this.model.get("score"), 2);
+ var max_score = roundToPrecision(this.model.get("max_score"), 2);
+ if (max_score === 0) {
+ this.$score.attr("data-order", 0.0);
+ } else {
+ this.$score.attr("data-order", score / max_score);
+ }
+ this.$score.text(score + " / " + max_score);
+
+ // code score
+ var code_score = roundToPrecision(this.model.get("code_score"), 2);
+ var max_code_score = roundToPrecision(this.model.get("max_code_score"), 2);
+ if (max_code_score === 0) {
+ this.$code_score.attr("data-order", 0.0);
+ } else {
+ this.$code_score.attr("data-order", code_score / max_code_score);
+ }
+ this.$code_score.text(code_score + " / " + max_code_score);
+
+ // written score
+ var written_score = roundToPrecision(this.model.get("written_score"), 2);
+ var max_written_score = roundToPrecision(this.model.get("max_written_score"), 2);
+ if (max_written_score === 0) {
+ this.$written_score.attr("data-order", 0.0);
+ } else {
+ this.$written_score.attr("data-order", written_score / max_written_score);
+ }
+ this.$written_score.text(written_score + " / " + max_written_score);
+
+ // needs manual grade?
+ if (this.model.get("needs_manual_grade")) {
+ this.$needs_manual_grade.attr("data-search", "needs manual grade");
+ this.$needs_manual_grade.attr("data-order", 1);
+ this.$needs_manual_grade.append($("<span/>")
+ .addClass("glyphicon glyphicon-ok"));
+ } else {
+ this.$needs_manual_grade.attr("data-search", "");
+ this.$needs_manual_grade.attr("data-order", 0);
+ }
+ },
+});
+
+var insertRow = function (table) {
+ var row = $("<tr/>");
+ row.append($("<td/>").addClass("name"));
+ row.append($("<td/>").addClass("text-center score"));
+ row.append($("<td/>").addClass("text-center code-score"));
+ row.append($("<td/>").addClass("text-center written-score"));
+ row.append($("<td/>").addClass("text-center needs-manual-grade"));
+ table.append(row)
+ return row;
+};
+
+var loadStudentSubmissions = function () {
+ var tbl = $("#main-table");
+
+ models = new StudentSubmissions();
+ views = [];
+ models.loaded = false;
+ models.fetch({
+ success: function () {
+ tbl.empty();
+ models.each(function (model) {
+ var view = new StudentSubmissionUI({
+ "model": model,
+ "el": insertRow(tbl)
+ });
+ views.push(view);
+ });
+ insertDataTable(tbl.parent());
+ models.loaded = true;
+ }
+ });
+};
+
+var models = undefined;
+var views = [];
+$(window).load(function () {
+ loadStudentSubmissions();
+});
diff --git a/nbgrader/server_extensions/formgrader/static/js/manage_students_notebook_submissions.js b/nbgrader/server_extensions/formgrader/static/js/manage_students_notebook_submissions.js
new file mode 100644
index 00000000..f1f087e1
--- /dev/null
+++ b/nbgrader/server_extensions/formgrader/static/js/manage_students_notebook_submissions.js
@@ -0,0 +1,151 @@
+var StudentSubmittedNotebook = Backbone.Model.extend({});
+var StudentSubmittedNotebooks = Backbone.Collection.extend({
+ model: StudentSubmittedNotebook,
+ url: base_url + "/formgrader/api/student_notebook_submissions/" + student_id + "/" + assignment_id
+});
+
+var StudentSubmittedNotebookUI = Backbone.View.extend({
+
+ events: {},
+
+ initialize: function () {
+ this.$name = this.$el.find(".name");
+ this.$score = this.$el.find(".score");
+ this.$code_score = this.$el.find(".code-score");
+ this.$written_score = this.$el.find(".written-score");
+ this.$needs_manual_grade = this.$el.find(".needs-manual-grade");
+ this.$tests_failed = this.$el.find(".tests-failed");
+ this.$flagged = this.$el.find(".flagged");
+
+ this.render();
+ },
+
+ clear: function () {
+ this.$name.empty();
+ this.$score.empty();
+ this.$code_score.empty();
+ this.$written_score.empty();
+ this.$needs_manual_grade.empty();
+ this.$tests_failed.empty();
+ this.$flagged.empty();
+ },
+
+ render: function () {
+ this.clear();
+
+ // notebook name
+ var name = this.model.get("name");
+ this.$name.attr("data-order", name);
+ if (this.model.get("id") === null) {
+ this.$name.append(name + " (file missing)");
+ } else {
+ this.$name.append($("<a/>")
+ .attr("href", base_url + "/formgrader/submissions/" + this.model.get("id"))
+ .text(name));
+ }
+
+ // score
+ var score = roundToPrecision(this.model.get("score"), 2);
+ var max_score = roundToPrecision(this.model.get("max_score"), 2);
+ if (max_score === 0) {
+ this.$score.attr("data-order", 0.0);
+ } else {
+ this.$score.attr("data-order", score / max_score);
+ }
+ this.$score.text(score + " / " + max_score);
+
+ // code score
+ score = roundToPrecision(this.model.get("code_score"), 2);
+ max_score = roundToPrecision(this.model.get("max_code_score"), 2);
+ if (max_score === 0) {
+ this.$code_score.attr("data-order", 0.0);
+ } else {
+ this.$code_score.attr("data-order", score / max_score);
+ }
+ this.$code_score.text(score + " / " + max_score);
+
+ // written score
+ score = roundToPrecision(this.model.get("written_score"), 2);
+ max_score = roundToPrecision(this.model.get("max_written_score"), 2);
+ if (max_score === 0) {
+ this.$written_score.attr("data-order", 0.0);
+ } else {
+ this.$written_score.attr("data-order", score / max_score);
+ }
+ this.$written_score.text(score + " / " + max_score);
+
+ // needs manual grade?
+ if (this.model.get("needs_manual_grade")) {
+ this.$needs_manual_grade.attr("data-search", "needs manual grade");
+ this.$needs_manual_grade.attr("data-order", 1);
+ this.$needs_manual_grade.append($("<span/>")
+ .addClass("glyphicon glyphicon-ok"));
+ } else {
+ this.$needs_manual_grade.attr("data-search", "");
+ this.$needs_manual_grade.attr("data-order", 0);
+ }
+
+ // tests failed?
+ if (this.model.get("failed_tests")) {
+ this.$tests_failed.attr("data-search", "tests failed");
+ this.$tests_failed.attr("data-order", 1);
+ this.$tests_failed.append($("<span/>")
+ .addClass("glyphicon glyphicon-ok"));
+ } else {
+ this.$tests_failed.attr("data-search", "");
+ this.$tests_failed.attr("data-order", 0);
+ }
+
+ // flagged?
+ if (this.model.get("flagged")) {
+ this.$flagged.attr("data-search", "flagged");
+ this.$flagged.attr("data-order", 1);
+ this.$flagged.append($("<span/>")
+ .addClass("glyphicon glyphicon-flag"));
+ } else {
+ this.$flagged.attr("data-search", "");
+ this.$flagged.attr("data-order", 0);
+ }
+ },
+});
+
+var insertRow = function (table) {
+ var row = $("<tr/>");
+ row.append($("<td/>").addClass("name"));
+ row.append($("<td/>").addClass("text-center score"));
+ row.append($("<td/>").addClass("text-center code-score"));
+ row.append($("<td/>").addClass("text-center written-score"));
+ row.append($("<td/>").addClass("text-center needs-manual-grade"));
+ row.append($("<td/>").addClass("text-center tests-failed"));
+ row.append($("<td/>").addClass("text-center flagged"));
+ table.append(row)
+ return row;
+};
+
+var loadStudentSubmittedNotebooks = function () {
+ var tbl = $("#main-table");
+
+ models = new StudentSubmittedNotebooks();
+ views = [];
+ models.loaded = false;
+ models.fetch({
+ success: function () {
+ tbl.empty();
+ models.each(function (model) {
+ var view = new StudentSubmittedNotebookUI({
+ "model": model,
+ "el": insertRow(tbl)
+ });
+ views.push(view);
+ });
+ insertDataTable(tbl.parent());
+ models.loaded = true;
+ }
+ });
+};
+
+var models = undefined;
+var views = [];
+$(window).load(function () {
+ loadStudentSubmittedNotebooks();
+});
diff --git a/nbgrader/server_extensions/formgrader/static/js/manage_submissions.js b/nbgrader/server_extensions/formgrader/static/js/manage_submissions.js
new file mode 100644
index 00000000..2b2b40d0
--- /dev/null
+++ b/nbgrader/server_extensions/formgrader/static/js/manage_submissions.js
@@ -0,0 +1,194 @@
+var Submission = Backbone.Model.extend({
+ idAttribute: 'student',
+ urlRoot: base_url + "/formgrader/api/submission/" + assignment_id
+});
+
+var Submissions = Backbone.Collection.extend({
+ model: Submission,
+ url: base_url + "/formgrader/api/submissions/" + assignment_id
+});
+
+var SubmissionUI = Backbone.View.extend({
+
+ events: {},
+
+ initialize: function () {
+ this.$student_name = this.$el.find(".student-name");
+ this.$student_id = this.$el.find(".student-id");
+ this.$timestamp = this.$el.find(".timestamp");
+ this.$status = this.$el.find(".status");
+ this.$score = this.$el.find(".score");
+ this.$autograde = this.$el.find(".autograde");
+
+ this.listenTo(this.model, "sync", this.render);
+
+ this.render();
+ },
+
+ clear: function () {
+ this.$student_name.empty();
+ this.$student_id.empty();
+ this.$timestamp.empty();
+ this.$status.empty();
+ this.$score.empty();
+ this.$autograde.empty();
+ },
+
+ render: function () {
+ this.clear();
+
+ var student = this.model.get("student");
+ var assignment = this.model.get("name");
+
+ // student name
+ var last_name = this.model.get("last_name");
+ var first_name = this.model.get("first_name");
+ if (last_name === null) last_name = "None";
+ if (first_name === null) first_name = "None";
+ var name = last_name + ", " + first_name;
+ this.$student_name.attr("data-order", name);
+ if (this.model.get("autograded")) {
+ this.$student_name.append($("<a/>")
+ .attr("href", base_url + "/formgrader/manage_students/" + student + "/" + assignment)
+ .text(name));
+ } else {
+ this.$student_name.text(name);
+ }
+
+ // student id
+ this.$student_id.attr("data-order", student);
+ this.$student_id.text(student);
+
+ // timestamp
+ var timestamp = this.model.get("timestamp");
+ var display_timestamp = this.model.get("display_timestamp");
+ if (timestamp === null) {
+ timestamp = "None";
+ display_timestamp = "None";
+ }
+ this.$timestamp.attr("data-order", timestamp);
+ this.$timestamp.text(display_timestamp);
+
+ // status
+ if (!this.model.get("autograded")) {
+ this.$status.attr("data-order", 0);
+ this.$status.append($("<span/>")
+ .addClass("label label-warning")
+ .text("needs autograding"));
+ } else if (this.model.get("needs_manual_grade")) {
+ this.$status.attr("data-order", 1);
+ this.$status.append($("<span/>")
+ .addClass("label label-info")
+ .text("needs manual grading"));
+ } else {
+ this.$status.attr("data-order", 2);
+ this.$status.append($("<span/>")
+ .addClass("label label-success")
+ .text("graded"));
+ }
+
+ // score
+ if (this.model.get("autograded")) {
+ var score = roundToPrecision(this.model.get("score"), 2);
+ var max_score = roundToPrecision(this.model.get("max_score"), 2);
+ if (max_score === 0) {
+ this.$score.attr("data-order", 0.0);
+ } else {
+ this.$score.attr("data-order", score / max_score);
+ }
+ this.$score.text(score + " / " + max_score);
+ } else {
+ this.$score.attr("data-order", 0.0);
+ }
+
+ // autograde
+ this.$autograde.append($("<a/>")
+ .attr("href", "#")
+ .click(_.bind(this.autograde, this))
+ .append($("<span/>")
+ .addClass("glyphicon glyphicon-flash")
+ .attr("aria-hidden", "true")));
+ },
+
+ autograde: function () {
+ this.clear();
+ this.$student_name.text("Please wait...");
+ var student = this.model.get("student");
+ var assignment = this.model.get("name");
+ $.post(base_url + "/formgrader/api/submission/" + assignment + "/" + student + "/autograde")
+ .done(_.bind(this.autograde_success, this))
+ .fail(_.bind(this.autograde_failure, this));
+ },
+
+ autograde_success: function (response) {
+ this.model.fetch();
+ response = JSON.parse(response);
+ var student = this.model.get("student");
+ var assignment = this.model.get("name");
+ if (response["success"]) {
+ createLogModal(
+ "success-modal",
+ "Success",
+ "Successfully autograded '" + assignment + "' for student '" + student + "'.",
+ response["log"]);
+
+ } else {
+ createLogModal(
+ "error-modal",
+ "Error",
+ "There was an error autograding '" + assignment + "' for student '" + student + "':",
+ response["log"],
+ response["error"]);
+ }
+ },
+
+ autograde_failure: function (response) {
+ this.model.fetch();
+ var student = this.model.get("student");
+ var assignment = this.model.get("name");
+ createModal(
+ "error-modal",
+ "Error",
+ "There was an error autograding '" + assignment + "' for student '" + student + "'.");
+ },
+});
+
+var insertRow = function (table) {
+ var row = $("<tr/>");
+ row.append($("<td/>").addClass("student-name"));
+ row.append($("<td/>").addClass("text-center student-id"));
+ row.append($("<td/>").addClass("text-center timestamp"));
+ row.append($("<td/>").addClass("text-center status"));
+ row.append($("<td/>").addClass("text-center score"));
+ row.append($("<td/>").addClass("text-center autograde"));
+ table.append(row)
+ return row;
+};
+
+var loadSubmissions = function () {
+ var tbl = $("#main-table");
+
+ models = new Submissions();
+ views = [];
+ models.loaded = false;
+ models.fetch({
+ success: function () {
+ tbl.empty();
+ models.each(function (model) {
+ var view = new SubmissionUI({
+ "model": model,
+ "el": insertRow(tbl)
+ });
+ views.push(view);
+ });
+ insertDataTable(tbl.parent());
+ models.loaded = true;
+ }
+ });
+};
+
+var models = undefined;
+var views = [];
+$(window).load(function () {
+ loadSubmissions();
+});
diff --git a/nbgrader/server_extensions/formgrader/static/js/utils.js b/nbgrader/server_extensions/formgrader/static/js/utils.js
new file mode 100644
index 00000000..e5adf5fa
--- /dev/null
+++ b/nbgrader/server_extensions/formgrader/static/js/utils.js
@@ -0,0 +1,89 @@
+var createModal = function (id, title, body, footer) {
+ var modal = $("<div/>")
+ .addClass("modal")
+ .addClass("fade")
+ .attr("id", id)
+ .attr("role", "dialog")
+
+ var dialog = $("<div/>").addClass("modal-dialog");
+ modal.append(dialog);
+
+ var content = $("<div/>").addClass("modal-content");
+ dialog.append(content);
+
+ var header = $("<div/>").addClass("modal-header");
+ content.append(header);
+ header.append($("<button/>")
+ .addClass("close")
+ .attr("data-dismiss", "modal")
+ .attr("aria-label", "Close")
+ .append($("<span/>")
+ .attr("aria-hidden", "true")
+ .html("×")));
+ header.append($("<h4/>")
+ .addClass("modal-title")
+ .text(title));
+
+ content.append($("<div/>").addClass("modal-body").append(body));
+
+ if (!footer) {
+ footer = $("<div/>");
+ footer.append($("<button/>")
+ .addClass("btn btn-primary close")
+ .attr("type", "button")
+ .attr("data-dismiss", "modal")
+ .text("OK"));
+ }
+ content.append($("<div/>").addClass("modal-footer").append(footer));
+
+ // remove the modal on close
+ modal.on("hidden.bs.modal", function () {
+ modal.remove();
+ });
+
+ $("body").append(modal);
+ modal.modal();
+ return modal;
+};
+
+var createLogModal = function (id, title, message, log, error) {
+ var body = $("<div>");
+ body.append($("<p/>").text(message));
+
+ if (log) {
+ var log_panel = $("<div/>").addClass("panel panel-warning");
+ log_panel.append($("<div/>").addClass("panel-heading").text("Log Output"));
+ log_panel.append($("<div/>")
+ .addClass("panel-body")
+ .append($("<pre/>").text(log)));
+ body.append(log_panel);
+ }
+
+ if (error) {
+ var err_panel = $("<div/>").addClass("panel panel-danger");
+ err_panel.append($("<div/>").addClass("panel-heading").text("Traceback"));
+ err_panel.append($("<div/>")
+ .addClass("panel-body")
+ .append($("<pre/>").text(error)));
+ body.append(err_panel);
+ }
+
+ return createModal(id, title, body);
+};
+
+var roundToPrecision = function (num, precision) {
+ var factor = Math.pow(10, precision);
+ return Math.round(num * factor) / factor;
+};
+
+var insertDataTable = function (tbl) {
+ tbl.DataTable({
+ info: false,
+ paging: false,
+ saveState: true,
+ columnDefs: [{
+ "targets": "no-sort",
+ "orderable": false
+ }]
+ });
+};
diff --git a/nbgrader/server_extensions/formgrader/templates/assignment_notebooks.tpl b/nbgrader/server_extensions/formgrader/templates/assignment_notebooks.tpl
deleted file mode 100644
index 25feab24..00000000
--- a/nbgrader/server_extensions/formgrader/templates/assignment_notebooks.tpl
+++ /dev/null
@@ -1,61 +0,0 @@
-{%- extends 'gradebook.tpl' -%}
-
-{%- block breadcrumb -%}
-<li><a href="{{ base_url }}/formgrader/assignments">Assignments</a></li>
-<li class="active">{{ assignment.name }}</li>
-{%- endblock -%}
-
-{%- block body -%}
-<div class="panel-body">
- The following table lists the notebooks that are associated with the
- assignment "{{ assignment.name }}". Click on a notebook
- name to see the list of student submissions for that notebook.
-</div>
-{%- endblock -%}
-
-{%- block table -%}
-<thead>
- <tr>
- <th>Notebook ID</th>
- <th class="center">Avg. Score</th>
- <th class="center">Avg. Code Score</th>
- <th class="center">Avg. Written Score</th>
- <th class="center">Needs manual grade?</th>
- </tr>
-</thead>
-<tbody>
- {%- for notebook in notebooks -%}
- <tr>
- <td><a href="{{ base_url }}/formgrader/assignments/{{ assignment.name }}/{{ notebook.name }}">{{ notebook.name }}</a></td>
- {%- if notebook.max_score is greaterthan 0 -%}
- <td data-order="{{ notebook.average_score / notebook.max_score | float | round(2) }}" class="center">
- {%- else -%}
- <td data-order="0.00" class="center">
- {%- endif -%}
- {{ notebook.average_score | float | round(2) }} / {{ notebook.max_score | float | round(2) }}
- </td>
- {%- if notebook.max_code_score is greaterthan 0 -%}
- <td data-order="{{ notebook.average_code_score / notebook.max_code_score | float | round(2) }}" class="center">
- {%- else -%}
- <td data-order="0.00" class="center">
- {%- endif -%}
- {{ notebook.average_code_score | float | round(2) }} / {{ notebook.max_code_score | float | round(2) }}
- </td>
- {%- if notebook.max_written_score is greaterthan 0 -%}
- <td data-order="{{ notebook.average_written_score / notebook.max_written_score | float | round(2) }}" class="center">
- {%- else -%}
- <td data-order="0.00" class="center">
- {%- endif -%}
- {{ notebook.average_written_score | float | round(2) }} / {{ notebook.max_written_score | float | round(2) }}
- </td>
- {%- if notebook.needs_manual_grade -%}
- <td data-search="needs manual grade" class="center">
- <span class="glyphicon glyphicon-ok"></span>
- {%- else -%}
- <td data-search="" class="center">
- {%- endif -%}
- </td>
- </tr>
- {%- endfor -%}
-</tbody>
-{%- endblock -%}
diff --git a/nbgrader/server_extensions/formgrader/templates/assignments.tpl b/nbgrader/server_extensions/formgrader/templates/assignments.tpl
deleted file mode 100644
index 2c5db59b..00000000
--- a/nbgrader/server_extensions/formgrader/templates/assignments.tpl
+++ /dev/null
@@ -1,36 +0,0 @@
-{%- extends 'gradebook.tpl' -%}
-
-{%- block breadcrumb -%}
-<li class="active">Assignments</li>
-{%- endblock -%}
-
-{%- block body -%}
-<div class="panel-body">
- The following table lists all of the assignments that have been
- added to the gradebook. Click on the name of an assignment to see
- the notebooks that are associated with that assignment.
-</div>
-{%- endblock -%}
-
-{%- block table -%}
-<thead>
- <tr>
- <th>Assignment ID</th>
- <th class="center">Due date</th>
- <th class="center">Submissions</th>
- <th class="center">Avg score</th>
- <th class="center">Max score</th>
- </tr>
-</thead>
-<tbody>
- {%- for assignment in assignments -%}
- <tr>
- <td><a href="{{ base_url }}/formgrader/assignments/{{ assignment.name }}">{{ assignment.name }}</a></td>
- <td class="center">{{ assignment.duedate }}</td>
- <td class="center">{{ assignment.num_submissions }}</td>
- <td class="center">{{ assignment.average_score | float | round(2) }}</td>
- <td class="center">{{ assignment.max_score | float | round(2) }}</td>
- </tr>
- {%- endfor -%}
-</tbody>
-{%- endblock -%}
diff --git a/nbgrader/server_extensions/formgrader/templates/base.tpl b/nbgrader/server_extensions/formgrader/templates/base.tpl
new file mode 100644
index 00000000..93866c2e
--- /dev/null
+++ b/nbgrader/server_extensions/formgrader/templates/base.tpl
@@ -0,0 +1,79 @@
+<!doctype html>
+<head>
+ <title>nbgrader formgrade</title>
+
+ <script src="{{ base_url }}/formgrader/static/components/jquery/jquery.min.js"></script>
+ <script src="{{ base_url }}/formgrader/static/components/underscore/underscore-min.js"></script>
+ <script src="{{ base_url }}/formgrader/static/components/backbone/backbone-min.js"></script>
+ <script src="{{ base_url }}/formgrader/static/components/bootstrap/js/bootstrap.min.js"></script>
+ <script src="{{ base_url }}/formgrader/static/components/datatables.net/js/jquery.dataTables.min.js"></script>
+ <script src="{{ base_url }}/formgrader/static/components/datatables.net-bs/js/dataTables.bootstrap.min.js"></script>
+ <script src="{{ base_url }}/formgrader/static/js/backbone_xsrf.js"></script>
+ <script src="{{ base_url }}/formgrader/static/js/utils.js"></script>
+
+ <link rel="stylesheet" href="{{ base_url }}/formgrader/static/components/bootstrap/css/bootstrap.min.css" />
+ <link rel="stylesheet" href="{{ base_url }}/formgrader/static/components/datatables.net-bs/css/dataTables.bootstrap.min.css">
+ <link rel="stylesheet" href="{{ base_url }}/formgrader/static/css/nbgrader.css">
+
+ <script>
+ var base_url = "{{ base_url }}";
+ </script>
+
+ {%- block head -%}
+ {%- endblock -%}
+</head>
+
+<body>
+ <div class="container-fluid">
+ <div class="row">
+ <div class="col-md-2">
+ <div class="page-header">
+ <h1>nbgrader</h1>
+ </div>
+ </div>
+ <div class="col-md-10">
+ <div class="page-header">
+ <h1>
+ {%- block title -%}
+ {%- endblock -%}
+ </h1>
+ </div>
+ </div>
+ </div>
+ <div class="row">
+ <div class="col-md-2">
+ <ul class="nav nav-pills nav-stacked">
+ {%- block sidebar -%}
+ <li role="presentation"><a href="{{ base_url }}/formgrader/manage_assignments">Manage Assignments</a></li>
+ <li role="presentation"><a href="{{ base_url }}/formgrader/gradebook">Gradebook</a></li>
+ <li role="presentation"><a href="{{ base_url }}/formgrader/manage_students">Manage Students</a></li>
+ {%- endblock -%}
+ </ul>
+ </div>
+ <div class="col-md-10">
+ {%- block body -%}
+ {%- block breadcrumbs -%}
+ {%- endblock -%}
+ {%- block messages -%}
+ {%- endblock -%}
+ <table class="table table-hover">
+ <thead>
+ {%- block table_header -%}
+ {%- endblock -%}
+ </thead>
+ <tbody id="main-table">
+ {%- block table_body -%}
+ {%- endblock -%}
+ </tbody>
+ <tfoot>
+ {%- block table_footer -%}
+ {%- endblock -%}
+ </tfoot>
+ </table>
+ {%- endblock -%}
+ </div>
+ </div>
+ </div>
+ {%- block script -%}
+ {%- endblock -%}
+</body>
diff --git a/nbgrader/server_extensions/formgrader/templates/gradebook_403.tpl b/nbgrader/server_extensions/formgrader/templates/base_403.tpl
similarity index 60%
rename from nbgrader/server_extensions/formgrader/templates/gradebook_403.tpl
rename to nbgrader/server_extensions/formgrader/templates/base_403.tpl
index 0183d575..6b29da31 100644
--- a/nbgrader/server_extensions/formgrader/templates/gradebook_403.tpl
+++ b/nbgrader/server_extensions/formgrader/templates/base_403.tpl
@@ -1,4 +1,8 @@
-{%- extends 'gradebook.tpl' -%}
+{%- extends 'base.tpl' -%}
+
+{%- block title -%}
+Not Authorized
+{%- endblock -%}
{%- block body -%}
<div class="panel-body">
diff --git a/nbgrader/server_extensions/formgrader/templates/gradebook_500.tpl b/nbgrader/server_extensions/formgrader/templates/base_500.tpl
similarity index 71%
rename from nbgrader/server_extensions/formgrader/templates/gradebook_500.tpl
rename to nbgrader/server_extensions/formgrader/templates/base_500.tpl
index 382d4f0e..6dfe7789 100644
--- a/nbgrader/server_extensions/formgrader/templates/gradebook_500.tpl
+++ b/nbgrader/server_extensions/formgrader/templates/base_500.tpl
@@ -1,4 +1,8 @@
-{%- extends 'gradebook.tpl' -%}
+{%- extends 'base.tpl' -%}
+
+{%- block title -%}
+Error
+{%- endblock -%}
{%- block body -%}
<div class="panel-body">
diff --git a/nbgrader/server_extensions/formgrader/templates/formgrade_macros.tpl b/nbgrader/server_extensions/formgrader/templates/formgrade_macros.tpl
index a96e3d62..f6bc8f0e 100644
--- a/nbgrader/server_extensions/formgrader/templates/formgrade_macros.tpl
+++ b/nbgrader/server_extensions/formgrader/templates/formgrade_macros.tpl
@@ -16,8 +16,9 @@ var assignment_id = "{{ resources.assignment_id }}";
var base_url = "{{ resources.base_url }}/formgrader";
</script>
-<script src="{{ resources.base_url }}/formgrader/static/js/keyboardmanager.js"></script>
-<script src="{{ resources.base_url }}/formgrader/static/js/models.js"></script>
+<script src="{{ resources.base_url }}/formgrader/static/js/backbone_xsrf.js"></script>
+<script src="{{ resources.base_url }}/formgrader/static/js/formgrade_keyboardmanager.js"></script>
+<script src="{{ resources.base_url }}/formgrader/static/js/formgrade_models.js"></script>
<script src="{{ resources.base_url }}/formgrader/static/js/formgrade.js"></script>
<script type="text/javascript">
function toggle_name(on) {
@@ -51,9 +52,9 @@ function toggle_name(on) {
<div class="col-md-8">
<ul class="nav text-center">
<ul class="breadcrumb">
- <li><a href="{{ resources.base_url }}/formgrader/assignments">Assignments</a></li>
- <li><a href="{{ resources.base_url }}/formgrader/assignments/{{ resources.assignment_id }}">{{ resources.assignment_id }}</a></li>
- <li><a href="{{ resources.base_url }}/formgrader/assignments/{{ resources.assignment_id }}/{{ resources.notebook_id }}">{{ resources.notebook_id }}</a></li>
+ <li><a href="{{ resources.base_url }}/formgrader/gradebook">Manual Grading</a></li>
+ <li><a href="{{ resources.base_url }}/formgrader/gradebook/{{ resources.assignment_id }}">{{ resources.assignment_id }}</a></li>
+ <li><a href="{{ resources.base_url }}/formgrader/gradebook/{{ resources.assignment_id }}/{{ resources.notebook_id }}">{{ resources.notebook_id }}</a></li>
<li class="active live-notebook">
<a class="name-hidden" data-toggle="tooltip" data-placement="bottom" title="Open live notebook" target="_blank" href="{{ resources.base_url }}/notebooks/{{ resources.notebook_path }}">
Submission #{{ resources.index + 1 }}
diff --git a/nbgrader/server_extensions/formgrader/templates/gradebook.tpl b/nbgrader/server_extensions/formgrader/templates/gradebook.tpl
deleted file mode 100644
index 24cd2ee0..00000000
--- a/nbgrader/server_extensions/formgrader/templates/gradebook.tpl
+++ /dev/null
@@ -1,66 +0,0 @@
-<!doctype html>
-<head>
- <title>nbgrader formgrade</title>
-
- <script src="{{ base_url }}/formgrader/static/components/jquery/jquery.min.js"></script>
- <script src="{{ base_url }}/formgrader/static/components/underscore/underscore-min.js"></script>
- <script src="{{ base_url }}/formgrader/static/components/backbone/backbone-min.js"></script>
- <script src="{{ base_url }}/formgrader/static/components/bootstrap/js/bootstrap.min.js"></script>
- <script src="{{ base_url }}/formgrader/static/components/datatables.net/js/jquery.dataTables.min.js"></script>
- <script src="{{ base_url }}/formgrader/static/components/datatables.net-bs/js/dataTables.bootstrap.min.js"></script>
-
- <link rel="stylesheet" href="{{ base_url }}/formgrader/static/components/bootstrap/css/bootstrap.min.css" />
- <link rel="stylesheet" href="{{ base_url }}/formgrader/static/components/datatables.net-bs/css/dataTables.bootstrap.min.css">
- <link rel="stylesheet" href="{{ base_url }}/formgrader/static/css/formgrade.css">
-
- {%- block head -%}
- {%- endblock -%}
-</head>
-
-<body>
- <nav class="navbar navbar-default navbar-fixed-top" role="navigation">
- <div class="container">
- <div class="navbar-header">
- <a class="navbar-brand" href="#">nbgrader formgrade</a>
- </div>
- <div>
- <ul class="nav navbar-nav navbar-left">
- <ul class="breadcrumb">
- {%- block breadcrumb -%}
- {%- endblock -%}
- </ul>
- </ul>
- <ul class="nav navbar-nav navbar-right">
- <li class="dropdown">
- <a href="#" class="dropdown-toggle" data-toggle="dropdown">Change View <b class="caret"></b></a>
- <ul class="dropdown-menu">
- <li><a href="{{ base_url }}/formgrader/assignments">Assignments</a></li>
- <li><a href="{{ base_url }}/formgrader/students">Students</a></li>
- </ul>
- </li>
- </ul>
- </div>
- </div>
- </nav>
- <div class="container" id="gradebook">
- <div class="panel panel-default">
- {%- block body -%}
- <div class="panel-body"></div>
- {%- endblock -%}
- <table id="formgrade-table" class="table table-hover">
- {%- block table -%}
- {%- endblock -%}
- </table>
- </div>
- </div>
- <script type="text/javascript">
- $(document).ready(function(){
- $('#formgrade-table').DataTable({
- info: false,
- paging: false,
- saveState: true,
- });
- });
- </script>
-</body>
-
diff --git a/nbgrader/server_extensions/formgrader/templates/gradebook_assignments.tpl b/nbgrader/server_extensions/formgrader/templates/gradebook_assignments.tpl
new file mode 100644
index 00000000..06390b49
--- /dev/null
+++ b/nbgrader/server_extensions/formgrader/templates/gradebook_assignments.tpl
@@ -0,0 +1,24 @@
+{%- extends 'gradebook_base.tpl' -%}
+
+{%- block head -%}
+<script src="{{ base_url }}/formgrader/static/js/gradebook_assignments.js"></script>
+{%- endblock -%}
+
+{%- block breadcrumbs -%}
+<ol class="breadcrumb">
+ <li class="active">Manual Grading</li>
+</ol>
+{%- endblock -%}
+
+{%- block table_header -%}
+<tr>
+ <th>Assignment ID</th>
+ <th class="text-center">Due Date</th>
+ <th class="text-center">Submissions</th>
+ <th class="text-center">Score</th>
+</tr>
+{%- endblock -%}
+
+{%- block table_body -%}
+<tr><td colspan="4">Loading, please wait...</td></tr>
+{%- endblock -%}
diff --git a/nbgrader/server_extensions/formgrader/templates/gradebook_base.tpl b/nbgrader/server_extensions/formgrader/templates/gradebook_base.tpl
new file mode 100644
index 00000000..e280d731
--- /dev/null
+++ b/nbgrader/server_extensions/formgrader/templates/gradebook_base.tpl
@@ -0,0 +1,11 @@
+{%- extends 'base.tpl' -%}
+
+{%- block title -%}
+Manual Grading
+{%- endblock -%}
+
+{%- block sidebar -%}
+<li role="presentation"><a href="{{ base_url }}/formgrader/manage_assignments">Manage Assignments</a></li>
+<li role="presentation" class="active"><a href="{{ base_url }}/formgrader/gradebook">Manual Grading</a></li>
+<li role="presentation"><a href="{{ base_url }}/formgrader/manage_students">Manage Students</a></li>
+{%- endblock -%}
\ No newline at end of file
diff --git a/nbgrader/server_extensions/formgrader/templates/gradebook_notebook_submissions.tpl b/nbgrader/server_extensions/formgrader/templates/gradebook_notebook_submissions.tpl
new file mode 100644
index 00000000..385bc76a
--- /dev/null
+++ b/nbgrader/server_extensions/formgrader/templates/gradebook_notebook_submissions.tpl
@@ -0,0 +1,35 @@
+{%- extends 'gradebook_base.tpl' -%}
+
+{%- block head -%}
+<script>
+var assignment_id = "{{ assignment_id }}";
+var notebook_id = "{{ notebook_id }}";
+</script>
+
+<script src="{{ base_url }}/formgrader/static/js/gradebook_notebook_submissions.js"></script>
+{%- endblock head -%}
+
+{%- block breadcrumbs -%}
+<ol class="breadcrumb">
+ <li><a href="{{ base_url }}/formgrader/gradebook">Manual Grading</a></li>
+ <li><a href="{{ base_url }}/formgrader/gradebook/{{ assignment_id }}">{{ assignment_id }}</a></li>
+ <li class="active">{{ notebook_id }}</li>
+</ol>
+{%- endblock -%}
+
+{%- block table_header -%}
+<tr>
+ <th></th>
+ <th>Submission ID</th>
+ <th class="text-center">Overall Score</th>
+ <th class="text-center">Code Score</th>
+ <th class="text-center">Written Score</th>
+ <th class="text-center">Needs Manual Grade?</th>
+ <th class="text-center">Tests Failed?</th>
+ <th class="text-center">Flagged?</th>
+</tr>
+{%- endblock -%}
+
+{%- block table_body -%}
+<tr><td colspan="7">Loading, please wait...</td></tr>
+{%- endblock -%}
diff --git a/nbgrader/server_extensions/formgrader/templates/gradebook_notebooks.tpl b/nbgrader/server_extensions/formgrader/templates/gradebook_notebooks.tpl
new file mode 100644
index 00000000..75312048
--- /dev/null
+++ b/nbgrader/server_extensions/formgrader/templates/gradebook_notebooks.tpl
@@ -0,0 +1,30 @@
+{%- extends 'gradebook_base.tpl' -%}
+
+{%- block head -%}
+<script>
+var assignment_id = "{{ assignment_id }}";
+</script>
+
+<script src="{{ base_url }}/formgrader/static/js/gradebook_notebooks.js"></script>
+{%- endblock head -%}
+
+{%- block breadcrumbs -%}
+<ol class="breadcrumb">
+ <li><a href="{{ base_url }}/formgrader/gradebook">Manual Grading</a></li>
+ <li class="active">{{ assignment_id }}</li>
+</ol>
+{%- endblock -%}
+
+{%- block table_header -%}
+<tr>
+ <th>Notebook ID</th>
+ <th class="text-center">Avg. Score</th>
+ <th class="text-center">Avg. Code Score</th>
+ <th class="text-center">Avg. Written Score</th>
+ <th class="text-center">Needs Manual Grade?</th>
+</tr>
+{%- endblock -%}
+
+{%- block table_body -%}
+<tr><td colspan="5">Loading, please wait...</td></tr>
+{%- endblock -%}
diff --git a/nbgrader/server_extensions/formgrader/templates/manage_assignments.tpl b/nbgrader/server_extensions/formgrader/templates/manage_assignments.tpl
new file mode 100644
index 00000000..a74e7c3d
--- /dev/null
+++ b/nbgrader/server_extensions/formgrader/templates/manage_assignments.tpl
@@ -0,0 +1,79 @@
+{%- extends 'base.tpl' -%}
+
+{%- block head -%}
+<script src="{{ base_url }}/formgrader/static/js/manage_assignments.js"></script>
+{%- endblock -%}
+
+{%- block title -%}
+Manage Assignments
+{%- endblock -%}
+
+{%- block sidebar -%}
+<li role="presentation" class="active"><a href="{{ base_url }}/formgrader/manage_assignments">Manage Assignments</a></li>
+<li role="presentation"><a href="{{ base_url }}/formgrader/gradebook">Manual Grading</a></li>
+<li role="presentation"><a href="{{ base_url }}/formgrader/manage_students">Manage Students</a></li>
+{%- endblock -%}
+
+{%- block breadcrumbs -%}
+<ol class="breadcrumb">
+ <li class="active">Assignments</li>
+</ol>
+{%- endblock -%}
+
+{%- block messages -%}
+<div class="panel-group" id="accordion" role="tablist" aria-multiselectable="true">
+ <div class="panel panel-default">
+ <div class="panel-heading" role="tab" id="headingOne">
+ <h4 class="panel-title">
+ <a class="collapsed" role="button" data-toggle="collapse" data-parent="#accordion" href="#collapseOne" aria-expanded="false" aria-controls="collapseOne">
+ Instructions (click to expand)
+ </a>
+ </h4>
+ </div>
+ <div id="collapseOne" class="panel-collapse collapse" role="tabpanel" aria-labelledby="headingOne">
+ <div class="panel-body">
+ <ol>
+ <li>To <b>create</b> an assignment, click the "Add new assignment..." button below.</li>
+ <li>To <b>edit assignment files</b>, click on the name of an assignment.</li>
+ <li>To <b>edit the assignment metadata</b>, click on the edit button.</li>
+ <li>To <b>generate</b> the student version of an assignment, click on the generate button.</li>
+ <li>To <b>preview</b> the student version of an assignment, click on the preview button.</li>
+ <li><i>(JupyterHub only)</i> To <b>release</b> the assignment to students, click the release button.
+ You can "unrelease" an assignment by clicking again, though note some students may have
+ already accessed the assignment.</li>
+ <li><i>(JupyterHub only)</i> To <b>collect</b> assignments, click the collect button.</li>
+ <li>To <b>autograde</b> submissions, click on the number of collected submissions. You must run
+ the autograder on the submissions before you can manually grade them.</li>
+ </ol>
+ </div>
+ </div>
+ </div>
+</div>
+{%- endblock -%}
+
+{%- block table_header -%}
+<tr>
+ <th>Name</th>
+ <th class="text-center">Due Date</th>
+ <th class="text-center">Status</th>
+ <th class="text-center no-sort">Edit</th>
+ <th class="text-center no-sort">Generate</th>
+ <th class="text-center no-sort">Preview</th>
+ <th class="text-center no-sort">Release</th>
+ <th class="text-center no-sort">Collect</th>
+ <th class="text-center"># Submissions</th>
+</tr>
+{%- endblock -%}
+
+{%- block table_body -%}
+<tr><td colspan="9">Loading, please wait...</td></tr>
+{%- endblock -%}
+
+{%- block table_footer -%}
+<tr>
+ <td colspan="9">
+ <span class="glyphicon glyphicon-plus" aria-hidden="true"></span>
+ <a href="#" onClick="createAssignmentModal();">Add new assignment...</a>
+ </td>
+</tr>
+{%- endblock -%}
diff --git a/nbgrader/server_extensions/formgrader/templates/manage_students.tpl b/nbgrader/server_extensions/formgrader/templates/manage_students.tpl
new file mode 100644
index 00000000..09de6552
--- /dev/null
+++ b/nbgrader/server_extensions/formgrader/templates/manage_students.tpl
@@ -0,0 +1,34 @@
+{%- extends 'manage_students_base.tpl' -%}
+
+{%- block head -%}
+<script src="{{ base_url }}/formgrader/static/js/manage_students.js"></script>
+{%- endblock -%}
+
+{%- block breadcrumbs -%}
+<ol class="breadcrumb">
+ <li class="active">Students</li>
+</ol>
+{%- endblock -%}
+
+{%- block table_header -%}
+<tr>
+ <th>Name</th>
+ <th class="text-center">Student ID</th>
+ <th class="text-center">Email</th>
+ <th class="text-center">Overall Score</th>
+ <th class="text-center no-sort">Edit Student</th>
+</tr>
+{%- endblock -%}
+
+{%- block table_body -%}
+<tr><td colspan="5">Loading, please wait...</td></tr>
+{%- endblock -%}
+
+{%- block table_footer -%}
+<tr>
+ <td colspan="5">
+ <span class="glyphicon glyphicon-plus" aria-hidden="true"></span>
+ <a href="#" onClick="createStudentModal();">Add new student...</a>
+ </td>
+</tr>
+{%- endblock -%}
\ No newline at end of file
diff --git a/nbgrader/server_extensions/formgrader/templates/manage_students_assignments.tpl b/nbgrader/server_extensions/formgrader/templates/manage_students_assignments.tpl
new file mode 100644
index 00000000..b3120755
--- /dev/null
+++ b/nbgrader/server_extensions/formgrader/templates/manage_students_assignments.tpl
@@ -0,0 +1,30 @@
+{%- extends 'manage_students_base.tpl' -%}
+
+{%- block head -%}
+<script>
+var student_id = "{{ student_id }}";
+</script>
+
+<script src="{{ base_url }}/formgrader/static/js/manage_students_assignments.js"></script>
+{%- endblock head -%}
+
+{%- block breadcrumbs -%}
+<ol class="breadcrumb">
+ <li><a href="{{ base_url }}/formgrader/manage_students">Students</a></li>
+ <li class="active">{{ student_id }}</li>
+</ol>
+{%- endblock -%}
+
+{%- block table_header -%}
+<tr>
+ <th>Assignment ID</th>
+ <th class="text-center">Overall Score</th>
+ <th class="text-center">Code Score</th>
+ <th class="text-center">Written Score</th>
+ <th class="text-center">Needs Manual Grade?</th>
+</tr>
+{%- endblock -%}
+
+{%- block table_body -%}
+<tr><td colspan="5">Loading, please wait...</td></tr>
+{%- endblock -%}
diff --git a/nbgrader/server_extensions/formgrader/templates/manage_students_base.tpl b/nbgrader/server_extensions/formgrader/templates/manage_students_base.tpl
new file mode 100644
index 00000000..86ecf339
--- /dev/null
+++ b/nbgrader/server_extensions/formgrader/templates/manage_students_base.tpl
@@ -0,0 +1,11 @@
+{%- extends 'base.tpl' -%}
+
+{%- block title -%}
+Manage Students
+{%- endblock -%}
+
+{%- block sidebar -%}
+<li role="presentation"><a href="{{ base_url }}/formgrader/manage_assignments">Manage Assignments</a></li>
+<li role="presentation"><a href="{{ base_url }}/formgrader/gradebook">Manual Grading</a></li>
+<li role="presentation" class="active"><a href="{{ base_url }}/formgrader/manage_students">Manage Students</a></li>
+{%- endblock -%}
diff --git a/nbgrader/server_extensions/formgrader/templates/manage_students_notebook_submissions.tpl b/nbgrader/server_extensions/formgrader/templates/manage_students_notebook_submissions.tpl
new file mode 100644
index 00000000..4ac51e4a
--- /dev/null
+++ b/nbgrader/server_extensions/formgrader/templates/manage_students_notebook_submissions.tpl
@@ -0,0 +1,34 @@
+{%- extends 'manage_students_base.tpl' -%}
+
+{%- block head -%}
+<script>
+var student_id = "{{ student_id }}";
+var assignment_id = "{{ assignment_id }}";
+</script>
+
+<script src="{{ base_url }}/formgrader/static/js/manage_students_notebook_submissions.js"></script>
+{%- endblock head -%}
+
+{%- block breadcrumbs -%}
+<ol class="breadcrumb">
+ <li><a href="{{ base_url }}/formgrader/manage_students">Students</a></li>
+ <li><a href="{{ base_url }}/formgrader/manage_students/{{ student_id }}">{{ student_id }}</a></li>
+ <li class="active">{{ assignment_id }}</li>
+</ol>
+{%- endblock -%}
+
+{%- block table_header -%}
+<tr>
+ <th>Notebook ID</th>
+ <th class="text-center">Overall Score</th>
+ <th class="text-center">Code Score</th>
+ <th class="text-center">Written Score</th>
+ <th class="text-center">Needs manual grade?</th>
+ <th class="text-center">Tests failed?</th>
+ <th class="text-center">Flagged?</th>
+</tr>
+{%- endblock -%}
+
+{%- block table_body -%}
+<tr><td colspan="7">Loading, please wait...</td></tr>
+{%- endblock -%}
diff --git a/nbgrader/server_extensions/formgrader/templates/manage_submissions.tpl b/nbgrader/server_extensions/formgrader/templates/manage_submissions.tpl
new file mode 100644
index 00000000..3c96e4dd
--- /dev/null
+++ b/nbgrader/server_extensions/formgrader/templates/manage_submissions.tpl
@@ -0,0 +1,77 @@
+{%- extends 'base.tpl' -%}
+
+{%- block head -%}
+<script>
+var assignment_id = "{{ assignment_id }}";
+</script>
+
+<script src="{{ base_url }}/formgrader/static/js/manage_submissions.js"></script>
+{%- endblock head -%}
+
+{%- block title -%}
+Manage Submissions
+{%- endblock -%}
+
+{%- block sidebar -%}
+<li role="presentation" class="active"><a href="{{ base_url }}/formgrader/manage_assignments">Manage Assignments</a></li>
+<li role="presentation"><a href="{{ base_url }}/formgrader/gradebook">Manual Grading</a></li>
+<li role="presentation"><a href="{{ base_url }}/formgrader/manage_students">Manage Students</a></li>
+{%- endblock -%}
+
+{%- block breadcrumbs -%}
+<ol class="breadcrumb">
+ <li><a href="{{ base_url }}/formgrader/manage_assignments">Assignments</a></li>
+ <li class="active">{{ assignment_id }}</li>
+</ol>
+{%- endblock -%}
+
+{%- block messages -%}
+<div class="panel-group" id="accordion" role="tablist" aria-multiselectable="true">
+ <div class="panel panel-default">
+ <div class="panel-heading" role="tab" id="headingOne">
+ <h4 class="panel-title">
+ <a class="collapsed" role="button" data-toggle="collapse" data-parent="#accordion" href="#collapseOne" aria-expanded="false" aria-controls="collapseOne">
+ Instructions (click to expand)
+ </a>
+ </h4>
+ </div>
+ <div id="collapseOne" class="panel-collapse collapse" role="tabpanel" aria-labelledby="headingOne">
+ <div class="panel-body">
+ <p>
+ <b>Note:</b> Here you can autograde individual students' submissions by
+ clicking on the autograde icons below. If you want to autograde
+ all submissions at once, you will need to do this via the
+ <a target="_blank" href="{{ base_url }}/terminals/1">command line</a>:
+ </p>
+ <p>
+ <pre>
+cd "{{ course_dir }}"
+nbgrader autograde "{{ assignment_id }}"</pre>
+ </p>
+ </div>
+ </div>
+ </div>
+</div>
+{%- endblock -%}
+
+{%- block table_header -%}
+<tr>
+ <th>Student Name</th>
+ <th class="text-center">Student ID</th>
+ <th class="text-center">Timestamp</th>
+ <th class="text-center">Status</th>
+ <th class="text-center">Score</th>
+ <th class="text-center no-sort">Autograde</th>
+</tr>
+{%- endblock -%}
+
+{%- block table_body -%}
+<tr>
+ <td>Loading, please wait...</td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td></td>
+</tr>
+{%- endblock -%}
diff --git a/nbgrader/server_extensions/formgrader/templates/notebook_submissions.tpl b/nbgrader/server_extensions/formgrader/templates/notebook_submissions.tpl
deleted file mode 100644
index b9714bda..00000000
--- a/nbgrader/server_extensions/formgrader/templates/notebook_submissions.tpl
+++ /dev/null
@@ -1,97 +0,0 @@
-{%- extends 'gradebook.tpl' -%}
-
-{%- block head -%}
-<script type="text/javascript">
-function toggle_name(on, index) {
- var elem = $("#submission-" + index);
- if (on) {
- elem.find(".name-shown").show();
- elem.find(".name-hidden").hide();
- } else {
- elem.find(".name-hidden").show();
- elem.find(".name-shown").hide();
- }
-}
-</script>
-{%- endblock -%}
-
-{%- block breadcrumb -%}
-<li><a href="{{ base_url }}/formgrader/assignments">Assignments</a></li>
-<li><a href="{{ base_url }}/formgrader/assignments/{{ assignment_id }}">{{ assignment_id }}</a></li>
-<li class="active">{{ notebook_id }}</li>
-{%- endblock -%}
-
-{%- block body -%}
-<div class="panel-body">
- The following table lists all the student submissions for the
- notebook "{{ notebook_id }}", which is part of the assignment "{{
- assignment_id }}". By clicking on a submission id, you
- can grade the submitted notebook.
-</div>
-{%- endblock -%}
-
-{%- block table -%}
-<thead>
- <tr>
- <th></th>
- <th>Submission ID</th>
- <th class="center">Overall Score</th>
- <th class="center">Code Score</th>
- <th class="center">Written Score</th>
- <th class="center">Needs manual grade?</th>
- <th class="center">Tests failed?</th>
- <th class="center">Flagged?</th>
- </tr>
-</thead>
-<tbody>
- {%- for submission in submissions -%}
- <tr id="submission-{{ submission.index + 1 }}">
- <td data-order="{{ submission.index + 1 }}">
- <span class="glyphicon glyphicon-eye-open name-hidden" aria-hidden="true" onclick="toggle_name(true, {{ submission.index + 1 }});"></span>
- <span class="glyphicon glyphicon-eye-close name-shown" aria-hidden="true" onclick="toggle_name(false, {{ submission.index + 1 }});"></span>
- </td>
- <td data-order="{{ submission.index + 1 }}">
- <a href="{{ base_url }}/formgrader/submissions/{{ submission.id }}" class="name-hidden">Submission #{{ submission.index + 1 }}</a>
- <a href="{{ base_url }}/formgrader/submissions/{{ submission.id }}" class="name-shown">{{ submission.last_name }}, {{ submission.first_name }}</a>
- </td>
- <td data-order="{{ submission.score }}" class="center">
- {{ submission.score | float | round(2) }} / {{ submission.max_score | float | round(2) }}
- </td>
- <td data-order="{{ submission.code_score }}" class="center">
- {{ submission.code_score | float | round(2) }} / {{ submission.max_code_score | float | round(2) }}
- </td>
- <td data-order="{{ submission.written_score }}" class="center">
- {{ submission.written_score | float | round(2) }} / {{ submission.max_written_score | float | round(2) }}
- </td>
- {%- if submission.needs_manual_grade -%}
- <td data-search="needs manual grade" class="center">
- <span class="glyphicon glyphicon-ok"></span>
- </td>
- {%- else -%}
- <td data-search="" class="center">
- </td>
- {%- endif -%}
- {%- if submission.failed_tests -%}
- <td data-search="tests failed" class="center">
- <span class="glyphicon glyphicon-ok"></span>
- </td>
- {%- else -%}
- <td data-search="" class="center">
- </td>
- {%- endif -%}
- {%- if submission.flagged -%}
- <td data-search="flagged" class="center">
- <span data-search="flagged" class="glyphicon glyphicon-flag"></span>
- </td>
- {%- else -%}
- <td data-search="" class="center">
- </td>
- {%- endif -%}
- </tr>
- {%- endfor -%}
-</tbody>
-<script type="text/javascript">
-$('span.glyphicon.name-hidden').tooltip({title: "Show student name"});
-$('span.glyphicon.name-shown').tooltip({title: "Hide student name"});
-</script>
-{%- endblock -%}
diff --git a/nbgrader/server_extensions/formgrader/templates/student_assignments.tpl b/nbgrader/server_extensions/formgrader/templates/student_assignments.tpl
deleted file mode 100644
index 81bdfde5..00000000
--- a/nbgrader/server_extensions/formgrader/templates/student_assignments.tpl
+++ /dev/null
@@ -1,64 +0,0 @@
-{%- extends 'gradebook.tpl' -%}
-
-{%- block breadcrumb -%}
-<li><a href="{{ base_url }}/formgrader/students">Students</a></li>
-<li class="active">{{ student.id }}</li>
-{%- endblock -%}
-
-{%- block body -%}
-<div class="panel-body">
- The following table lists the assignments turned in by {{ student.last_name }}, {{ student.first_name }}. Click on a notebook
- name to see the scores for individual notebooks.
-</div>
-{%- endblock -%}
-
-{%- block table -%}
-<thead>
- <tr>
- <th>Assignment ID</th>
- <th class="center">Overall Score</th>
- <th class="center">Code Score</th>
- <th class="center">Written Score</th>
- <th class="center">Needs manual grade?</th>
- </tr>
-</thead>
-<tbody>
- {%- for assignment in assignments -%}
- <tr>
- {%- if assignment.id is none -%}
- <td>{{ assignment.name }} (no submission)</td>
- {%- else -%}
- <td><a href="{{ base_url }}/formgrader/students/{{ student.id }}/{{ assignment.name }}">{{ assignment.name }}</a></td>
- {%- endif -%}
- {%- if assignment.max_score is greaterthan 0 -%}
- <td data-order="{{ assignment.score / assignment.max_score | float | round(2) }}" class="center">
- {%- else -%}
- <td data-order="0.00" class="center">
- {%- endif -%}
- {{ assignment.score | float | round(2) }} / {{ assignment.max_score | float | round(2) }}
- </td>
- {%- if assignment.max_code_score is greaterthan 0 -%}
- <td data-order="{{ assignment.code_score / assignment.max_code_score | float | round(2) }}" class="center">
- {%- else -%}
- <td data-order="0.00" class="center">
- {%- endif -%}
- {{ assignment.code_score | float | round(2) }} / {{ assignment.max_code_score | float | round(2) }}
- </td>
- {%- if assignment.max_written_score is greaterthan 0 -%}
- <td data-order="{{ assignment.written_score / assignment.max_written_score | float | round(2) }}" class="center">
- {%- else -%}
- <td data-order="0.00" class="center">
- {%- endif -%}
- {{ assignment.written_score | float | round(2) }} / {{ assignment.max_written_score | float | round(2) }}
- </td>
- {%- if assignment.needs_manual_grade -%}
- <td data-search="needs manual grade" class="center">
- <span class="glyphicon glyphicon-ok"></span>
- {%- else -%}
- <td data-search="" class="center">
- {%- endif -%}
- </td>
- </tr>
- {%- endfor -%}
-</tbody>
-{%- endblock -%}
diff --git a/nbgrader/server_extensions/formgrader/templates/student_submissions.tpl b/nbgrader/server_extensions/formgrader/templates/student_submissions.tpl
deleted file mode 100644
index 567f288f..00000000
--- a/nbgrader/server_extensions/formgrader/templates/student_submissions.tpl
+++ /dev/null
@@ -1,81 +0,0 @@
-{%- extends 'gradebook.tpl' -%}
-
-{%- block breadcrumb -%}
-<li><a href="{{ base_url }}/formgrader/students">Students</a></li>
-<li><a href="{{ base_url }}/formgrader/students/{{ student.id }}">{{ student.id }}</a></li>
-<li class="active">{{ assignment_id }}</li>
-{%- endblock -%}
-
-{%- block body -%}
-<div class="panel-body">
- The following table lists all the notebooks for the assignment "{{ assignment_id }}" by {{ student.last_name }}, {{ student.first_name }}.
- You can grade a notebook by clicking on its ID.
-</div>
-{%- endblock -%}
-
-{%- block table -%}
-<thead>
- <tr>
- <th>Notebook ID</th>
- <th class="center">Overall Score</th>
- <th class="center">Code Score</th>
- <th class="center">Written Score</th>
- <th class="center">Needs manual grade?</th>
- <th class="center">Tests failed?</th>
- <th class="center">Flagged?</th>
- </tr>
-</thead>
-<tbody>
- {%- for submission in submissions -%}
- <tr>
- <td>
- <a href="{{ base_url }}/formgrader/submissions/{{ submission.id }}">
- {{ submission.name }}
- </a>
- </td>
- {%- if submission.max_score is greaterthan 0 -%}
- <td data-order="{{ submission.score / submission.max_score | float | round(2) }}" class="center">
- {%- else -%}
- <td data-order="0.00" class="center">
- {%- endif -%}
- {{ submission.score | float | round(2) }} / {{ submission.max_score | float | round(2) }}
- </td>
- {%- if submission.max_code_score is greaterthan 0 -%}
- <td data-order="{{ submission.code_score / submission.max_code_score | float | round(2) }}" class="center">
- {%- else -%}
- <td data-order="0.00" class="center">
- {%- endif -%}
- {{ submission.code_score | float | round(2) }} / {{ submission.max_code_score | float | round(2) }}
- </td>
- {%- if submission.max_written_score is greaterthan 0 -%}
- <td data-order="{{ submission.written_score / submission.max_written_score | float | round(2) }}" class="center">
- {%- else -%}
- <td data-order="0.00" class="center">
- {%- endif -%}
- {{ submission.written_score | float | round(2) }} / {{ submission.max_written_score | float | round(2) }}
- </td>
- {%- if submission.needs_manual_grade -%}
- <td data-search="needs manual grade" class="center">
- <span class="glyphicon glyphicon-ok"></span>
- {%- else -%}
- <td data-search="" class="center">
- {%- endif -%}
- </td>
- {%- if submission.failed_tests -%}
- <td data-search="tests failed" class="center">
- <span class="glyphicon glyphicon-ok"></span>
- {%- else -%}
- <td data-search="" class="center">
- {%- endif -%}
- </td>
- {%- if submission.flagged -%}
- <td data-search="flagged" class="center">
- <span class="glyphicon glyphicon-flag"></span>
- {%- else -%}
- <td data-search="" class="center">
- {%- endif -%}
- </td>
- </tr>
- {%- endfor -%}
-</tbody>
-{%- endblock -%}
diff --git a/nbgrader/server_extensions/formgrader/templates/students.tpl b/nbgrader/server_extensions/formgrader/templates/students.tpl
deleted file mode 100644
index 0f28a3c1..00000000
--- a/nbgrader/server_extensions/formgrader/templates/students.tpl
+++ /dev/null
@@ -1,37 +0,0 @@
-{%- extends 'gradebook.tpl' -%}
-
-{%- block breadcrumb -%}
-<li class="active">Students</li>
-{%- endblock -%}
-
-{%- block body -%}
-<div class="panel-body">
- The following table lists all of the students in the class. Click on the name of a student
- to see their grades on individual assignments.
-</div>
-{%- endblock -%}
-
-{%- block table -%}
-<thead>
- <tr>
- <th>Name</th>
- <th class="center">Student ID</th>
- <th class="center">Overall score</th>
- </tr>
-</thead>
-<tbody>
- {%- for student in students -%}
- <tr>
- <td><a href="{{ base_url }}/formgrader/students/{{ student.id }}">{{ student.last_name }}, {{ student.first_name }}</a></td>
- <td class="center">{{ student.id }}
- {%- if student.max_score is greaterthan 0 -%}
- <td data-order="{{ student.score / student.max_score | float | round(2) }}" class="center">
- {%- else -%}
- <td data-order="0.00" class="center">
- {%- endif -%}
- {{ student.score | float | round(2) }} / {{ student.max_score | float | round(2) }}
- </td>
- </tr>
- {%- endfor -%}
-</tbody>
-{%- endblock -%}
diff --git a/nbgrader/utils.py b/nbgrader/utils.py
index 6c632fc4..e03b497d 100644
--- a/nbgrader/utils.py
+++ b/nbgrader/utils.py
@@ -14,6 +14,7 @@ from setuptools.archive_util import unpack_tarfile
from setuptools.archive_util import unpack_zipfile
from contextlib import contextmanager
from tornado.log import LogFormatter
+from dateutil.tz import gettz
# pwd is for unix passwords only, so we shouldn't import it on
@@ -118,6 +119,17 @@ def parse_utc(ts):
return ts
+def as_timezone(ts, timezone):
+ """Converts UTC timestamp ts to have timezone tz."""
+ if not timezone:
+ return ts
+ tz = gettz(timezone)
+ if tz:
+ return (ts + tz.utcoffset(ts)).replace(tzinfo=tz)
+ else:
+ return ts
+
+
def check_mode(path, read=False, write=False, execute=False):
"""Can the current user can rwx the path."""
mode = 0
| Nbgrader as a webapp
Much of the nbgrader complexity for the user could be reduced if it were a webapp for jupyterhub. It could:
- manage the enrolled students (add, delete, edit students)
- manage the assignments (create, edit, release, preview, grading, return)
Currently, items are spread out in nbgrader_config.py, the sqlite database, and in the file system (local source, release, autograded, and exchange versions). Initially, there doesn't need to be too much javascript, but just coding as a tornado webapp, getting its auth from jupyterhub.
One could also still allow programming uses (say, having programmatic access to the gradebook), but the webapp could allow 80% or more of what instructors do.
| jupyter/nbgrader | diff --git a/nbgrader/tests/apps/test_api.py b/nbgrader/tests/apps/test_api.py
index 14e91743..75c748e4 100644
--- a/nbgrader/tests/apps/test_api.py
+++ b/nbgrader/tests/apps/test_api.py
@@ -8,7 +8,7 @@ from datetime import datetime
from ...apps.api import NbGraderAPI
from ...coursedir import CourseDirectory
-from ...utils import rmtree, get_username
+from ...utils import rmtree, get_username, parse_utc
from .. import run_nbgrader
from .base import BaseTestApp
from .conftest import notwindows, windows
@@ -30,6 +30,11 @@ def api(request, course_dir, db, exchange):
class TestNbGraderAPI(BaseTestApp):
+ if sys.platform == 'win32':
+ tz = "Coordinated Universal Time"
+ else:
+ tz = "UTC"
+
def test_get_source_assignments(self, api, course_dir):
assert api.get_source_assignments() == set([])
@@ -120,13 +125,17 @@ class TestNbGraderAPI(BaseTestApp):
'average_code_score', 'average_score', 'average_written_score',
'duedate', 'name', 'num_submissions', 'release_path', 'releaseable',
'source_path', 'status', 'id', 'max_code_score', 'max_score',
- 'max_written_score'])
+ 'max_written_score', 'display_duedate', 'duedate_timezone',
+ 'duedate_notimezone'])
default = {
"average_code_score": 0,
"average_score": 0,
"average_written_score": 0,
"duedate": None,
+ "display_duedate": None,
+ "duedate_timezone": "UTC",
+ "duedate_notimezone": None,
"name": "ps1",
"num_submissions": 0,
"release_path": None,
@@ -172,6 +181,21 @@ class TestNbGraderAPI(BaseTestApp):
target["max_written_score"] = 1
assert a == target
+ # check that timestamps are handled correctly
+ with api.gradebook as gb:
+ assignment = gb.find_assignment("ps1")
+ assignment.duedate = parse_utc("2017-07-05 12:22:08 UTC")
+ gb.db.commit()
+
+ a = api.get_assignment("ps1")
+ default["duedate"] = "2017-07-05T12:22:08"
+ default["display_duedate"] = "2017-07-05 12:22:08 {}".format(self.tz)
+ default["duedate_notimezone"] = "2017-07-05T12:22:08"
+ assert a["duedate"] == default["duedate"]
+ assert a["display_duedate"] == default["display_duedate"]
+ assert a["duedate_notimezone"] == default["duedate_notimezone"]
+ assert a["duedate_timezone"] == default["duedate_timezone"]
+
# check the values once the assignment has been released and unreleased
if sys.platform != "win32":
run_nbgrader(["release", "ps1", "--course", "abc101", "--Exchange.root={}".format(exchange)])
@@ -251,8 +275,15 @@ class TestNbGraderAPI(BaseTestApp):
assert set(n1.keys()) == keys
assert n1 == default.copy()
+ # add it to the database (but don't assign yet)
+ with api.gradebook as gb:
+ gb.update_or_create_assignment("ps1")
+ n1, = api.get_notebooks("ps1")
+ assert set(n1.keys()) == keys
+ assert n1 == default.copy()
+
# check values after nbgrader assign is run
- run_nbgrader(["assign", "ps1", "--create", "--db", db])
+ run_nbgrader(["assign", "ps1", "--create", "--db", db, "--force"])
n1, = api.get_notebooks("ps1")
assert set(n1.keys()) == keys
target = default.copy()
@@ -267,7 +298,7 @@ class TestNbGraderAPI(BaseTestApp):
"id", "name", "student", "last_name", "first_name", "score",
"max_score", "code_score", "max_code_score", "written_score",
"max_written_score", "needs_manual_grade", "autograded",
- "timestamp", "submitted"])
+ "timestamp", "submitted", "display_timestamp"])
default = {
"id": None,
@@ -284,6 +315,7 @@ class TestNbGraderAPI(BaseTestApp):
"needs_manual_grade": False,
"autograded": False,
"timestamp": None,
+ "display_timestamp": None,
"submitted": False
}
@@ -294,13 +326,13 @@ class TestNbGraderAPI(BaseTestApp):
run_nbgrader(["assign", "ps1", "--create", "--db", db])
self._copy_file(join("files", "submitted-changed.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"))
- timestamp = datetime.now()
- self._make_file(join(course_dir, "submitted", "foo", "ps1", "timestamp.txt"), contents=timestamp.isoformat())
+ self._make_file(join(course_dir, "submitted", "foo", "ps1", "timestamp.txt"), contents="2017-07-05T12:32:56.123456")
s = api.get_submission("ps1", "foo")
assert set(s.keys()) == keys
target = default.copy()
target["submitted"] = True
- target["timestamp"] = timestamp.isoformat()
+ target["timestamp"] = "2017-07-05T12:32:56.123456"
+ target["display_timestamp"] = "2017-07-05 12:32:56 {}".format(self.tz)
assert s == target
run_nbgrader(["autograde", "ps1", "--create", "--no-execute", "--force", "--db", db])
@@ -309,7 +341,8 @@ class TestNbGraderAPI(BaseTestApp):
target["id"] = s["id"]
target["autograded"] = True
target["submitted"] = True
- target["timestamp"] = timestamp.isoformat()
+ target["timestamp"] = "2017-07-05T12:32:56.123456"
+ target["display_timestamp"] = "2017-07-05 12:32:56 {}".format(self.tz)
target["code_score"] = 2
target["max_code_score"] = 5
target["score"] = 2
@@ -368,6 +401,8 @@ class TestNbGraderAPI(BaseTestApp):
assert idx[notebooks[1].id] == 1
def test_get_notebook_submissions(self, api, course_dir, db):
+ assert api.get_notebook_submissions("ps1", "p1") == []
+
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
run_nbgrader(["assign", "ps1", "--create", "--db", db])
diff --git a/nbgrader/tests/nbextensions/conftest.py b/nbgrader/tests/nbextensions/conftest.py
index e762c7ad..03fb3523 100644
--- a/nbgrader/tests/nbextensions/conftest.py
+++ b/nbgrader/tests/nbextensions/conftest.py
@@ -92,8 +92,7 @@ def port():
return nbserver_port
[email protected](scope="module")
-def nbserver(request, port, tempdir, jupyter_config_dir, jupyter_data_dir, exchange, cache):
+def _make_nbserver(course_id, port, tempdir, jupyter_config_dir, jupyter_data_dir, exchange, cache):
env = os.environ.copy()
env['JUPYTER_CONFIG_DIR'] = jupyter_config_dir
env['JUPYTER_DATA_DIR'] = jupyter_data_dir
@@ -123,14 +122,15 @@ def nbserver(request, port, tempdir, jupyter_config_dir, jupyter_data_dir, excha
"""
c.Exchange.root = "{}"
c.Exchange.cache = "{}"
- """.format(exchange, cache)
+ c.Exchange.course_id = "{}"
+ """.format(exchange, cache, course_id)
))
kwargs = dict(env=env)
if sys.platform == 'win32':
kwargs['creationflags'] = sp.CREATE_NEW_PROCESS_GROUP
- nbserver = sp.Popen([
+ server = sp.Popen([
sys.executable, "-m", "jupyter", "notebook",
"--no-browser",
"--NotebookApp.token=''", # Notebook >=4.3
@@ -140,35 +140,33 @@ def nbserver(request, port, tempdir, jupyter_config_dir, jupyter_data_dir, excha
# wait for a few seconds to allow the notebook server to finish starting
time.sleep(5)
- def fin():
- if sys.platform == 'win32':
- nbserver.send_signal(signal.CTRL_BREAK_EVENT)
- else:
- nbserver.terminate()
+ return server
- for i in range(10):
- retcode = nbserver.poll()
- if retcode is not None:
- break
- time.sleep(0.1)
- if retcode is None:
- print("couldn't shutdown notebook server, force killing it")
- nbserver.kill()
+def _close_nbserver(server):
+ if sys.platform == 'win32':
+ server.send_signal(signal.CTRL_BREAK_EVENT)
+ else:
+ server.terminate()
- nbserver.wait()
- copy_coverage_files()
+ for _ in range(10):
+ retcode = server.poll()
+ if retcode is not None:
+ break
+ time.sleep(0.1)
- # wait a short period of time for kernels to finish shutting down
- time.sleep(1)
+ if retcode is None:
+ print("couldn't shutdown notebook server, force killing it")
+ server.kill()
- request.addfinalizer(fin)
+ server.wait()
+ copy_coverage_files()
- return nbserver
+ # wait a short period of time for kernels to finish shutting down
+ time.sleep(1)
[email protected]
-def browser(request, tempdir, nbserver):
+def _make_browser(tempdir):
for filename in glob.glob(os.path.join(os.path.dirname(__file__), "files", "*.ipynb")):
shutil.copy(filename, os.path.join(tempdir, os.path.basename(filename)))
@@ -184,21 +182,21 @@ def browser(request, tempdir, nbserver):
browser.set_page_load_timeout(30)
browser.set_script_timeout(30)
- def fin():
- console_messages = browser.get_log('browser')
- if len(console_messages) > 0:
- print("\n<-- CAPTURED JAVASCRIPT CONSOLE MESSAGES -->")
- for message in console_messages:
- print(message)
- print("<------------------------------------------>")
- browser.save_screenshot(os.path.join(os.path.dirname(__file__), 'selenium.screenshot.png'))
- browser.service.process.send_signal(signal.SIGTERM)
- browser.quit()
-
- request.addfinalizer(fin)
return browser
+def _close_browser(browser):
+ console_messages = browser.get_log('browser')
+ if len(console_messages) > 0:
+ print("\n<-- CAPTURED JAVASCRIPT CONSOLE MESSAGES -->")
+ for message in console_messages:
+ print(message)
+ print("<------------------------------------------>")
+ browser.save_screenshot(os.path.join(os.path.dirname(__file__), 'selenium.screenshot.png'))
+ browser.service.process.send_signal(signal.SIGTERM)
+ browser.quit()
+
+
notwindows = pytest.mark.skipif(
sys.platform == 'win32',
reason="Assignment List extension is not available on windows"
diff --git a/nbgrader/tests/nbextensions/formgrade_utils.py b/nbgrader/tests/nbextensions/formgrade_utils.py
index f25401f2..253994b0 100644
--- a/nbgrader/tests/nbextensions/formgrade_utils.py
+++ b/nbgrader/tests/nbextensions/formgrade_utils.py
@@ -3,7 +3,7 @@ from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
-from selenium.common.exceptions import TimeoutException
+from selenium.common.exceptions import TimeoutException, NoSuchElementException
@@ -15,6 +15,10 @@ def _notebook_url(port, url=""):
return urljoin("http://localhost:{}/notebooks/".format(port), url).rstrip("/")
+def _tree_url(port, url=""):
+ return urljoin("http://localhost:{}/tree/".format(port), url).rstrip("/")
+
+
def _check_url(browser, port, url):
if not url.startswith("http"):
url = _formgrade_url(port, url)
@@ -23,11 +27,11 @@ def _check_url(browser, port, url):
def _check_breadcrumbs(browser, *breadcrumbs):
# check that breadcrumbs are correct
- elements = browser.find_elements_by_css_selector("ul.breadcrumb li")
+ elements = browser.find_elements_by_css_selector(".breadcrumb li")
assert tuple([e.text for e in elements]) == breadcrumbs
# check that the active breadcrumb is correct
- element = browser.find_element_by_css_selector("ul.breadcrumb li.active")
+ element = browser.find_element_by_css_selector(".breadcrumb li.active")
assert element.text == breadcrumbs[-1]
@@ -52,13 +56,16 @@ def _wait_for_visibility_of_element(browser, element_id, time=10):
def _wait_for_gradebook_page(browser, port, url):
- _wait_for_element(browser, "gradebook")
+ page_loaded = lambda browser: browser.execute_script(
+ """return typeof models !== "undefined" && models !== undefined && models.loaded === true;""")
+ WebDriverWait(browser, 10).until(page_loaded)
_check_url(browser, port, url)
def _get(browser, url, retries=5):
try:
browser.get(url)
+ assert browser.get_cookies()
except TimeoutException:
if retries == 0:
raise
@@ -72,6 +79,11 @@ def _load_gradebook_page(browser, port, url):
_wait_for_gradebook_page(browser, port, url)
+def _wait_for_tree_page(browser, port, url):
+ _wait_for_element(browser, "ipython-main-app")
+ _check_url(browser, port, url)
+
+
def _wait_for_notebook_page(browser, port, url):
_wait_for_element(browser, "notebook-container")
_check_url(browser, port, url)
@@ -181,6 +193,15 @@ def _load_formgrade(browser, port, gradebook):
submissions = problem.submissions
submissions.sort(key=lambda x: x.id)
- _load_gradebook_page(browser, port, "assignments/Problem Set 1/Problem 1")
+ _load_gradebook_page(browser, port, "gradebook/Problem Set 1/Problem 1")
_click_link(browser, "Submission #1")
_wait_for_formgrader(browser, port, "submissions/{}/?index=0".format(submissions[0].id))
+
+
+def _child_exists(elem, selector):
+ try:
+ elem.find_element_by_css_selector(selector)
+ except NoSuchElementException:
+ return False
+ else:
+ return True
diff --git a/nbgrader/tests/nbextensions/test_assignment_list.py b/nbgrader/tests/nbextensions/test_assignment_list.py
index 46aa89b1..6840656b 100644
--- a/nbgrader/tests/nbextensions/test_assignment_list.py
+++ b/nbgrader/tests/nbextensions/test_assignment_list.py
@@ -13,10 +13,32 @@ from nbformat.v4 import new_notebook
from textwrap import dedent
from .. import run_nbgrader
-from .conftest import notwindows
+from .conftest import notwindows, _make_nbserver, _make_browser, _close_nbserver, _close_browser
from ...utils import rmtree
[email protected](scope="module")
+def nbserver(request, port, tempdir, jupyter_config_dir, jupyter_data_dir, exchange, cache):
+ server = _make_nbserver("", port, tempdir, jupyter_config_dir, jupyter_data_dir, exchange, cache)
+
+ def fin():
+ _close_nbserver(server)
+ request.addfinalizer(fin)
+
+ return server
+
+
[email protected]
+def browser(request, tempdir, nbserver):
+ browser = _make_browser(tempdir)
+
+ def fin():
+ _close_browser(browser)
+ request.addfinalizer(fin)
+
+ return browser
+
+
@pytest.fixture(scope="module")
def class_files(coursedir):
# copy files from the user guide
diff --git a/nbgrader/tests/nbextensions/test_create_assignment.py b/nbgrader/tests/nbextensions/test_create_assignment.py
index fe4fc2da..da9bbc34 100644
--- a/nbgrader/tests/nbextensions/test_create_assignment.py
+++ b/nbgrader/tests/nbextensions/test_create_assignment.py
@@ -9,9 +9,32 @@ from selenium.common.exceptions import TimeoutException, NoSuchElementException
from textwrap import dedent
from ...nbgraderformat import read
+from .conftest import _make_nbserver, _make_browser, _close_nbserver, _close_browser
from nbformat import current_nbformat
[email protected](scope="module")
+def nbserver(request, port, tempdir, jupyter_config_dir, jupyter_data_dir, exchange, cache):
+ server = _make_nbserver("", port, tempdir, jupyter_config_dir, jupyter_data_dir, exchange, cache)
+
+ def fin():
+ _close_nbserver(server)
+ request.addfinalizer(fin)
+
+ return server
+
+
[email protected]
+def browser(request, tempdir, nbserver):
+ browser = _make_browser(tempdir)
+
+ def fin():
+ _close_browser(browser)
+ request.addfinalizer(fin)
+
+ return browser
+
+
def _wait(browser):
return WebDriverWait(browser, 30)
diff --git a/nbgrader/tests/nbextensions/test_formgrader.py b/nbgrader/tests/nbextensions/test_formgrader.py
index eeeec21a..6f5d4feb 100644
--- a/nbgrader/tests/nbextensions/test_formgrader.py
+++ b/nbgrader/tests/nbextensions/test_formgrader.py
@@ -1,6 +1,10 @@
import pytest
import os
import shutil
+import sys
+import glob
+
+from os.path import join
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
@@ -10,40 +14,74 @@ from selenium.webdriver.common.by import By
from .. import run_nbgrader
from ...api import Gradebook, MissingEntry
from . import formgrade_utils as utils
+from .conftest import notwindows, _make_nbserver, _make_browser, _close_nbserver, _close_browser
+from ...utils import rmtree
+
+
+if sys.platform == 'win32':
+ tz = "Coordinated Universal Time"
+else:
+ tz = "UTC"
+
+
[email protected](scope="module")
+def nbserver(request, port, tempdir, jupyter_config_dir, jupyter_data_dir, exchange, cache):
+ server = _make_nbserver("course101", port, tempdir, jupyter_config_dir, jupyter_data_dir, exchange, cache)
+
+ def fin():
+ _close_nbserver(server)
+ request.addfinalizer(fin)
+
+ return server
+
+
[email protected]
+def browser(request, tempdir, nbserver):
+ browser = _make_browser(tempdir)
+
+ def fin():
+ _close_browser(browser)
+ request.addfinalizer(fin)
+
+ return browser
@pytest.fixture(scope="module")
def gradebook(request, tempdir, nbserver):
# copy files from the user guide
- source_path = os.path.join(os.path.dirname(__file__), "..", "..", "docs", "source", "user_guide", "source")
- submitted_path = os.path.join(os.path.dirname(__file__), "..", "..", "docs", "source", "user_guide", "submitted")
+ source_path = join(os.path.dirname(__file__), "..", "..", "docs", "source", "user_guide", "source")
+ submitted_path = join(os.path.dirname(__file__), "..", "..", "docs", "source", "user_guide", "submitted")
- shutil.copytree(os.path.join(os.path.dirname(__file__), source_path), os.path.join("source"))
- shutil.copytree(os.path.join(os.path.dirname(__file__), submitted_path), os.path.join("submitted"))
+ shutil.copytree(source_path, "source")
+ for student in ["bitdiddle", "hacker"]:
+ shutil.copytree(join(submitted_path, student), join("submitted", student))
# rename to old names -- we do this rather than changing all the tests
# because I want the tests to operate on files with spaces in the names
- os.rename(os.path.join("source", "ps1"), os.path.join("source", "Problem Set 1"))
- os.rename(os.path.join("source", "Problem Set 1", "problem1.ipynb"), os.path.join("source", "Problem Set 1", "Problem 1.ipynb"))
- os.rename(os.path.join("source", "Problem Set 1", "problem2.ipynb"), os.path.join("source", "Problem Set 1", "Problem 2.ipynb"))
- os.rename(os.path.join("submitted", "bitdiddle"), os.path.join("submitted", "Bitdiddle"))
- os.rename(os.path.join("submitted", "Bitdiddle", "ps1"), os.path.join("submitted", "Bitdiddle", "Problem Set 1"))
- os.rename(os.path.join("submitted", "Bitdiddle", "Problem Set 1", "problem1.ipynb"), os.path.join("submitted", "Bitdiddle", "Problem Set 1", "Problem 1.ipynb"))
- os.rename(os.path.join("submitted", "Bitdiddle", "Problem Set 1", "problem2.ipynb"), os.path.join("submitted", "Bitdiddle", "Problem Set 1", "Problem 2.ipynb"))
- os.rename(os.path.join("submitted", "hacker"), os.path.join("submitted", "Hacker"))
- os.rename(os.path.join("submitted", "Hacker", "ps1"), os.path.join("submitted", "Hacker", "Problem Set 1"))
- os.rename(os.path.join("submitted", "Hacker", "Problem Set 1", "problem1.ipynb"), os.path.join("submitted", "Hacker", "Problem Set 1", "Problem 1.ipynb"))
- os.rename(os.path.join("submitted", "Hacker", "Problem Set 1", "problem2.ipynb"), os.path.join("submitted", "Hacker", "Problem Set 1", "Problem 2.ipynb"))
+ os.rename(join("source", "ps1"), join("source", "Problem Set 1"))
+ os.rename(join("source", "Problem Set 1", "problem1.ipynb"), join("source", "Problem Set 1", "Problem 1.ipynb"))
+ os.rename(join("source", "Problem Set 1", "problem2.ipynb"), join("source", "Problem Set 1", "Problem 2.ipynb"))
+ os.rename(join("submitted", "bitdiddle"), join("submitted", "Bitdiddle"))
+ os.rename(join("submitted", "Bitdiddle", "ps1"), join("submitted", "Bitdiddle", "Problem Set 1"))
+ os.rename(join("submitted", "Bitdiddle", "Problem Set 1", "problem1.ipynb"), join("submitted", "Bitdiddle", "Problem Set 1", "Problem 1.ipynb"))
+ os.rename(join("submitted", "Bitdiddle", "Problem Set 1", "problem2.ipynb"), join("submitted", "Bitdiddle", "Problem Set 1", "Problem 2.ipynb"))
+ os.rename(join("submitted", "hacker"), join("submitted", "Hacker"))
+ os.rename(join("submitted", "Hacker", "ps1"), join("submitted", "Hacker", "Problem Set 1"))
+ os.rename(join("submitted", "Hacker", "Problem Set 1", "problem1.ipynb"), join("submitted", "Hacker", "Problem Set 1", "Problem 1.ipynb"))
+ os.rename(join("submitted", "Hacker", "Problem Set 1", "problem2.ipynb"), join("submitted", "Hacker", "Problem Set 1", "Problem 2.ipynb"))
# run nbgrader assign
run_nbgrader([
"assign", "Problem Set 1",
- "--IncludeHeaderFooter.header={}".format(os.path.join("source", "header.ipynb"))
+ "--IncludeHeaderFooter.header={}".format(join("source", "header.ipynb"))
])
# run the autograder
run_nbgrader(["autograde", "Problem Set 1"])
+ # make sure louis is in the database (won't get added because he hasn't submitted anything!)
+ run_nbgrader(["db", "student", "add", "Reasoner", "--first-name", "Louis", "--last-name", "R"])
+
gb = Gradebook("sqlite:///gradebook.db")
def fin():
@@ -54,52 +92,100 @@ def gradebook(request, tempdir, nbserver):
@pytest.mark.nbextensions
-def test_load_assignment_list(browser, port, gradebook):
+def test_load_manage_assignments(browser, port, gradebook):
# load the main page and make sure it is the Assignments page
utils._get(browser, utils._formgrade_url(port))
utils._wait_for_gradebook_page(browser, port, "")
utils._check_breadcrumbs(browser, "Assignments")
- # load the assignments page
- utils._load_gradebook_page(browser, port, "assignments")
- utils._check_breadcrumbs(browser, "Assignments")
-
# click on the "Problem Set 1" link
utils._click_link(browser, "Problem Set 1")
- utils._wait_for_gradebook_page(browser, port, "assignments/Problem Set 1")
+ browser.switch_to_window(browser.window_handles[1])
+ utils._wait_for_tree_page(
+ browser, port,
+ utils._tree_url(port, "source/Problem Set 1"))
+ browser.close()
+ browser.switch_to_window(browser.window_handles[0])
+
+ # click on the preview link
+ browser.find_element_by_css_selector("td.preview .glyphicon").click()
+ browser.switch_to_window(browser.window_handles[1])
+ utils._wait_for_tree_page(
+ browser, port,
+ utils._tree_url(port, "release/Problem Set 1"))
+ browser.close()
+ browser.switch_to_window(browser.window_handles[0])
+
+ # click on the number of submissions
+ browser.find_element_by_css_selector("td.num-submissions a").click()
+ utils._wait_for_gradebook_page(browser, port, "manage_submissions/Problem Set 1")
@pytest.mark.nbextensions
-def test_load_assignment_notebook_list(browser, port, gradebook):
- utils._load_gradebook_page(browser, port, "assignments/Problem Set 1")
+def test_load_manage_submissions(browser, port, gradebook):
+ # load the submissions page
+ utils._load_gradebook_page(browser, port, "manage_submissions/Problem Set 1")
utils._check_breadcrumbs(browser, "Assignments", "Problem Set 1")
- # click the "Assignments" link
+ # click on the "Assignments" link
utils._click_link(browser, "Assignments")
- utils._wait_for_gradebook_page(browser, port, "assignments")
+ utils._wait_for_gradebook_page(browser, port, "manage_assignments")
+ browser.back()
+
+ # click on students
+ for student in gradebook.students:
+ try:
+ gradebook.find_submission("Problem Set 1", student.id)
+ except MissingEntry:
+ continue
+
+ utils._click_link(browser, "{}, {}".format(student.last_name, student.first_name))
+ utils._wait_for_gradebook_page(browser, port, "manage_students/{}/Problem Set 1".format(student.id))
+ browser.back()
+
+
[email protected]
+def test_load_gradebook1(browser, port, gradebook):
+ # load the assignments page
+ utils._load_gradebook_page(browser, port, "gradebook")
+ utils._check_breadcrumbs(browser, "Manual Grading")
+
+ # click on the "Problem Set 1" link
+ utils._click_link(browser, "Problem Set 1")
+ utils._wait_for_gradebook_page(browser, port, "gradebook/Problem Set 1")
+
+
[email protected]
+def test_load_gradebook2(browser, port, gradebook):
+ utils._load_gradebook_page(browser, port, "gradebook/Problem Set 1")
+ utils._check_breadcrumbs(browser, "Manual Grading", "Problem Set 1")
+
+ # click the "Manual Grading" link
+ utils._click_link(browser, "Manual Grading")
+ utils._wait_for_gradebook_page(browser, port, "gradebook")
browser.back()
# click on the problem link
for problem in gradebook.find_assignment("Problem Set 1").notebooks:
utils._click_link(browser, problem.name)
- utils._wait_for_gradebook_page(browser, port, "assignments/Problem Set 1/{}".format(problem.name))
+ utils._wait_for_gradebook_page(browser, port, "gradebook/Problem Set 1/{}".format(problem.name))
browser.back()
@pytest.mark.nbextensions
-def test_load_assignment_notebook_submissions_list(browser, port, gradebook):
+def test_load_gradebook3(browser, port, gradebook):
for problem in gradebook.find_assignment("Problem Set 1").notebooks:
- utils._load_gradebook_page(browser, port, "assignments/Problem Set 1/{}".format(problem.name))
- utils._check_breadcrumbs(browser, "Assignments", "Problem Set 1", problem.name)
+ utils._load_gradebook_page(browser, port, "gradebook/Problem Set 1/{}".format(problem.name))
+ utils._check_breadcrumbs(browser, "Manual Grading", "Problem Set 1", problem.name)
- # click the "Assignments" link
- utils._click_link(browser, "Assignments")
- utils._wait_for_gradebook_page(browser, port, "assignments")
+ # click the "Manual Grading" link
+ utils._click_link(browser, "Manual Grading")
+ utils._wait_for_gradebook_page(browser, port, "gradebook")
browser.back()
# click the "Problem Set 1" link
utils._click_link(browser, "Problem Set 1")
- utils._wait_for_gradebook_page(browser, port, "assignments/Problem Set 1")
+ utils._wait_for_gradebook_page(browser, port, "gradebook/Problem Set 1")
browser.back()
submissions = problem.submissions
@@ -112,14 +198,14 @@ def test_load_assignment_notebook_submissions_list(browser, port, gradebook):
@pytest.mark.nbextensions
-def test_assignment_notebook_submissions_show_hide_names(browser, port, gradebook):
+def test_gradebook3_show_hide_names(browser, port, gradebook):
problem = gradebook.find_assignment("Problem Set 1").notebooks[0]
- utils._load_gradebook_page(browser, port, "assignments/Problem Set 1/{}".format(problem.name))
+ utils._load_gradebook_page(browser, port, "gradebook/Problem Set 1/{}".format(problem.name))
submissions = problem.submissions
submissions.sort(key=lambda x: x.id)
submission = submissions[0]
- top_elem = browser.find_element_by_css_selector("#submission-1")
+ top_elem = browser.find_elements_by_css_selector("tbody tr")[0]
col1, col2 = top_elem.find_elements_by_css_selector("td")[:2]
hidden = col1.find_element_by_css_selector(".glyphicon.name-hidden")
shown = col1.find_element_by_css_selector(".glyphicon.name-shown")
@@ -147,47 +233,41 @@ def test_assignment_notebook_submissions_show_hide_names(browser, port, gradeboo
@pytest.mark.nbextensions
-def test_load_student_list(browser, port, gradebook):
+def test_load_student1(browser, port, gradebook):
# load the student view
- utils._load_gradebook_page(browser, port, "students")
+ utils._load_gradebook_page(browser, port, "manage_students")
utils._check_breadcrumbs(browser, "Students")
# click on student
for student in gradebook.students:
- ## TODO: they should have a link here, even if they haven't submitted anything!
- if len(student.submissions) == 0:
- continue
utils._click_link(browser, "{}, {}".format(student.last_name, student.first_name))
- utils._wait_for_gradebook_page(browser, port, "students/{}".format(student.id))
+ utils._wait_for_gradebook_page(browser, port, "manage_students/{}".format(student.id))
browser.back()
@pytest.mark.nbextensions
-def test_load_student_assignment_list(browser, port, gradebook):
+def test_load_student2(browser, port, gradebook):
for student in gradebook.students:
- utils._load_gradebook_page(browser, port, "students/{}".format(student.id))
+ utils._load_gradebook_page(browser, port, "manage_students/{}".format(student.id))
utils._check_breadcrumbs(browser, "Students", student.id)
-
try:
- gradebook.find_submission("Problem Set 1", student.id)
+ submission = gradebook.find_submission("Problem Set 1", student.id)
except MissingEntry:
- ## TODO: make sure link doesn't exist
continue
utils._click_link(browser, "Problem Set 1")
- utils._wait_for_gradebook_page(browser, port, "students/{}/Problem Set 1".format(student.id))
+ utils._wait_for_gradebook_page(browser, port, "manage_students/{}/Problem Set 1".format(student.id))
@pytest.mark.nbextensions
-def test_load_student_assignment_submissions_list(browser, port, gradebook):
+def test_load_student3(browser, port, gradebook):
for student in gradebook.students:
try:
submission = gradebook.find_submission("Problem Set 1", student.id)
except MissingEntry:
- ## TODO: make sure link doesn't exist
continue
- utils._load_gradebook_page(browser, port, "students/{}/Problem Set 1".format(student.id))
+ utils._load_gradebook_page(browser, port, "manage_students/{}/Problem Set 1".format(student.id))
utils._check_breadcrumbs(browser, "Students", student.id, "Problem Set 1")
for problem in gradebook.find_assignment("Problem Set 1").notebooks:
@@ -195,27 +275,24 @@ def test_load_student_assignment_submissions_list(browser, port, gradebook):
utils._click_link(browser, problem.name)
utils._wait_for_formgrader(browser, port, "submissions/{}/?index=0".format(submission.id))
browser.back()
- utils._wait_for_gradebook_page(browser, port, "students/{}/Problem Set 1".format(student.id))
+ utils._wait_for_gradebook_page(browser, port, "manage_students/{}/Problem Set 1".format(student.id))
@pytest.mark.nbextensions
def test_switch_views(browser, port, gradebook):
- # load the main page
- utils._load_gradebook_page(browser, port, "assignments")
-
- # click the "Change View" button
- utils._click_link(browser, "Change View", partial=True)
-
- # click the "Students" option
- utils._click_link(browser, "Students")
- utils._wait_for_gradebook_page(browser, port, "students")
-
- # click the "Change View" button
- utils._click_link(browser, "Change View", partial=True)
+ pages = ["", "manage_assignments", "gradebook", "manage_students"]
+ links = [
+ ("Manage Assignments", "manage_assignments"),
+ ("Manual Grading", "gradebook"),
+ ("Manage Students", "manage_students")
+ ]
- # click the "Assignments" option
- utils._click_link(browser, "Assignments")
- utils._wait_for_gradebook_page(browser, port, "assignments")
+ for page in pages:
+ utils._load_gradebook_page(browser, port, page)
+ for link, target in links:
+ utils._click_link(browser, link)
+ utils._wait_for_gradebook_page(browser, port, target)
+ browser.back()
@pytest.mark.nbextensions
@@ -228,9 +305,9 @@ def test_formgrade_view_breadcrumbs(browser, port, gradebook):
utils._get(browser, utils._formgrade_url(port, "submissions/{}".format(submission.id)))
utils._wait_for_formgrader(browser, port, "submissions/{}/?index=0".format(submission.id))
- # click on the "Assignments" link
- utils._click_link(browser, "Assignments")
- utils._wait_for_gradebook_page(browser, port, "assignments")
+ # click on the "Manual Grading" link
+ utils._click_link(browser, "Manual Grading")
+ utils._wait_for_gradebook_page(browser, port, "gradebook")
# go back
browser.back()
@@ -238,7 +315,7 @@ def test_formgrade_view_breadcrumbs(browser, port, gradebook):
# click on the "Problem Set 1" link
utils._click_link(browser, "Problem Set 1")
- utils._wait_for_gradebook_page(browser, port, "assignments/Problem Set 1")
+ utils._wait_for_gradebook_page(browser, port, "gradebook/Problem Set 1")
# go back
browser.back()
@@ -246,7 +323,7 @@ def test_formgrade_view_breadcrumbs(browser, port, gradebook):
# click on the problem link
utils._click_link(browser, problem.name)
- utils._wait_for_gradebook_page(browser, port, "assignments/Problem Set 1/{}".format(problem.name))
+ utils._wait_for_gradebook_page(browser, port, "gradebook/Problem Set 1/{}".format(problem.name))
# go back
browser.back()
@@ -323,7 +400,7 @@ def test_next_prev_assignments(browser, port, gradebook):
# Move to the next submission (should return to notebook list)
next_function()
- utils._wait_for_gradebook_page(browser, port, "assignments/Problem Set 1/Problem 1")
+ utils._wait_for_gradebook_page(browser, port, "gradebook/Problem Set 1/Problem 1")
# Go back
browser.back()
@@ -335,7 +412,7 @@ def test_next_prev_assignments(browser, port, gradebook):
# Move to the previous submission (should return to the notebook list)
prev_function()
- utils._wait_for_gradebook_page(browser, port, "assignments/Problem Set 1/Problem 1")
+ utils._wait_for_gradebook_page(browser, port, "gradebook/Problem Set 1/Problem 1")
@pytest.mark.nbextensions
@@ -359,7 +436,7 @@ def test_next_prev_failed_assignments(browser, port, gradebook):
if submissions[0].failed_tests:
# Go to the next failed submission (should return to the notebook list)
utils._send_keys_to_body(browser, Keys.CONTROL, Keys.SHIFT, ".")
- utils._wait_for_gradebook_page(browser, port, "assignments/Problem Set 1/Problem 1")
+ utils._wait_for_gradebook_page(browser, port, "gradebook/Problem Set 1/Problem 1")
# Go back
browser.back()
@@ -367,7 +444,7 @@ def test_next_prev_failed_assignments(browser, port, gradebook):
# Go to the previous failed submission (should return to the notebook list)
utils._send_keys_to_body(browser, Keys.CONTROL, Keys.SHIFT, ",")
- utils._wait_for_gradebook_page(browser, port, "assignments/Problem Set 1/Problem 1")
+ utils._wait_for_gradebook_page(browser, port, "gradebook/Problem Set 1/Problem 1")
# Go back
browser.back()
@@ -379,7 +456,7 @@ def test_next_prev_failed_assignments(browser, port, gradebook):
# Go to the next failed submission (should return to the notebook list)
utils._send_keys_to_body(browser, Keys.CONTROL, Keys.SHIFT, ".")
- utils._wait_for_gradebook_page(browser, port, "assignments/Problem Set 1/Problem 1")
+ utils._wait_for_gradebook_page(browser, port, "gradebook/Problem Set 1/Problem 1")
# Go back
browser.back()
@@ -400,7 +477,7 @@ def test_next_prev_failed_assignments(browser, port, gradebook):
# Go to the previous failed submission (should return to the notebook list)
utils._send_keys_to_body(browser, Keys.CONTROL, Keys.SHIFT, ",")
- utils._wait_for_gradebook_page(browser, port, "assignments/Problem Set 1/Problem 1")
+ utils._wait_for_gradebook_page(browser, port, "gradebook/Problem Set 1/Problem 1")
# Go back
browser.back()
@@ -412,7 +489,7 @@ def test_next_prev_failed_assignments(browser, port, gradebook):
# Go to the next failed submission (should return to the notebook list)
utils._send_keys_to_body(browser, Keys.CONTROL, Keys.SHIFT, ".")
- utils._wait_for_gradebook_page(browser, port, "assignments/Problem Set 1/Problem 1")
+ utils._wait_for_gradebook_page(browser, port, "gradebook/Problem Set 1/Problem 1")
# Go back
browser.back()
@@ -420,7 +497,7 @@ def test_next_prev_failed_assignments(browser, port, gradebook):
# Go to the previous failed submission (should return to the notebook list)
utils._send_keys_to_body(browser, Keys.CONTROL, Keys.SHIFT, ",")
- utils._wait_for_gradebook_page(browser, port, "assignments/Problem Set 1/Problem 1")
+ utils._wait_for_gradebook_page(browser, port, "gradebook/Problem Set 1/Problem 1")
@pytest.mark.nbextensions
@@ -727,3 +804,487 @@ def test_formgrade_show_hide_names(browser, port, gradebook):
assert name.text == "Submission #1"
assert not shown.is_displayed()
assert hidden.is_displayed()
+
+
[email protected]
+def test_add_new_assignment(browser, port, gradebook):
+ utils._load_gradebook_page(browser, port, "")
+
+ # click the "add new assignment" button
+ utils._click_link(browser, "Add new assignment...")
+ utils._wait_for_element(browser, "add-assignment-modal")
+ WebDriverWait(browser, 10).until(EC.visibility_of_element_located((By.CSS_SELECTOR, "#add-assignment-modal button.btn-primary")))
+
+ # set the name and dudedate
+ elem = browser.find_element_by_css_selector("#add-assignment-modal .name")
+ elem.click()
+ elem.send_keys("ps2")
+ elem = browser.find_element_by_css_selector("#add-assignment-modal .duedate")
+ elem.click()
+ elem.send_keys("2017-07-05T17:00")
+ elem = browser.find_element_by_css_selector("#add-assignment-modal .timezone")
+ elem.click()
+ elem.send_keys("UTC")
+
+ # click save and wait for the modal to close
+ utils._click_element(browser, "#add-assignment-modal .save")
+ modal_not_present = lambda browser: browser.execute_script("""return $("#add-assignment-modal").length === 0;""")
+ WebDriverWait(browser, 10).until(modal_not_present)
+
+ # wait until both rows are present
+ rows_present = lambda browser: len(browser.find_elements_by_css_selector("tbody tr")) == 2
+ WebDriverWait(browser, 10).until(rows_present)
+
+ # check that the new row is correct
+ row = browser.find_elements_by_css_selector("tbody tr")[1]
+ assert row.find_element_by_css_selector(".name").text == "ps2"
+ assert row.find_element_by_css_selector(".duedate").text == "2017-07-05 17:00:00 {}".format(tz)
+ assert row.find_element_by_css_selector(".status").text == "draft"
+ assert utils._child_exists(row, ".edit a")
+ assert utils._child_exists(row, ".assign a")
+ assert not utils._child_exists(row, ".preview a")
+ assert not utils._child_exists(row, ".release a")
+ assert not utils._child_exists(row, ".collect a")
+ assert row.find_element_by_css_selector(".num-submissions").text == "0"
+
+ # reload the page and make sure everything is still correct
+ utils._load_gradebook_page(browser, port, "")
+ row = browser.find_elements_by_css_selector("tbody tr")[1]
+ assert row.find_element_by_css_selector(".name").text == "ps2"
+ assert row.find_element_by_css_selector(".duedate").text == "2017-07-05 17:00:00 {}".format(tz)
+ assert row.find_element_by_css_selector(".status").text == "draft"
+ assert utils._child_exists(row, ".edit a")
+ assert utils._child_exists(row, ".assign a")
+ assert not utils._child_exists(row, ".preview a")
+ assert not utils._child_exists(row, ".release a")
+ assert not utils._child_exists(row, ".collect a")
+ assert row.find_element_by_css_selector(".num-submissions").text == "0"
+
+
[email protected]
+def test_edit_assignment(browser, port, gradebook):
+ utils._load_gradebook_page(browser, port, "")
+
+ # click on the edit button
+ row = browser.find_elements_by_css_selector("tbody tr")[1]
+ row.find_element_by_css_selector(".edit a").click()
+ utils._wait_for_element(browser, "edit-assignment-modal")
+ WebDriverWait(browser, 10).until(EC.visibility_of_element_located((By.CSS_SELECTOR, "#edit-assignment-modal button.btn-primary")))
+
+ # modify the duedate
+ elem = browser.find_element_by_css_selector("#edit-assignment-modal .modal-duedate")
+ elem.clear()
+ elem.click()
+ elem.send_keys("2017-07-05T18:00")
+
+ # click save and wait for the modal to close
+ utils._click_element(browser, "#edit-assignment-modal .save")
+ modal_not_present = lambda browser: browser.execute_script("""return $("#edit-assignment-modal").length === 0;""")
+ WebDriverWait(browser, 10).until(modal_not_present)
+
+ # check that the modified row is correct
+ row = browser.find_elements_by_css_selector("tbody tr")[1]
+ assert row.find_element_by_css_selector(".name").text == "ps2"
+ assert row.find_element_by_css_selector(".duedate").text == "2017-07-05 18:00:00 {}".format(tz)
+ assert row.find_element_by_css_selector(".status").text == "draft"
+ assert utils._child_exists(row, ".edit a")
+ assert utils._child_exists(row, ".assign a")
+ assert not utils._child_exists(row, ".preview a")
+ assert not utils._child_exists(row, ".release a")
+ assert not utils._child_exists(row, ".collect a")
+ assert row.find_element_by_css_selector(".num-submissions").text == "0"
+
+ # reload the page and make sure everything is still correct
+ utils._load_gradebook_page(browser, port, "")
+ row = browser.find_elements_by_css_selector("tbody tr")[1]
+ assert row.find_element_by_css_selector(".name").text == "ps2"
+ assert row.find_element_by_css_selector(".duedate").text == "2017-07-05 18:00:00 {}".format(tz)
+ assert row.find_element_by_css_selector(".status").text == "draft"
+ assert utils._child_exists(row, ".edit a")
+ assert utils._child_exists(row, ".assign a")
+ assert not utils._child_exists(row, ".preview a")
+ assert not utils._child_exists(row, ".release a")
+ assert not utils._child_exists(row, ".collect a")
+ assert row.find_element_by_css_selector(".num-submissions").text == "0"
+
+
[email protected]
+def test_generate_assignment_fail(browser, port, gradebook):
+ utils._load_gradebook_page(browser, port, "")
+
+ # click on the generate button -- should produce an error because there
+ # are no notebooks for ps2 yet
+ row = browser.find_elements_by_css_selector("tbody tr")[1]
+ row.find_element_by_css_selector(".assign a").click()
+ utils._wait_for_element(browser, "error-modal")
+ WebDriverWait(browser, 10).until(EC.visibility_of_element_located((By.CSS_SELECTOR, "#error-modal button.btn-primary")))
+ utils._click_element(browser, "#error-modal .close")
+ modal_not_present = lambda browser: browser.execute_script("""return $("#error-modal").length === 0;""")
+ WebDriverWait(browser, 10).until(modal_not_present)
+
+
[email protected]
+def test_generate_assignment_success(browser, port, gradebook):
+ utils._load_gradebook_page(browser, port, "")
+
+ # add a notebook for ps2
+ source_path = join(os.path.dirname(__file__), "..", "..", "docs", "source", "user_guide", "source", "ps1", "problem1.ipynb")
+ shutil.copy(source_path, join("source", "ps2", "Problem 1.ipynb"))
+
+ # click on the generate button -- should now succeed
+ row = browser.find_elements_by_css_selector("tbody tr")[1]
+ row.find_element_by_css_selector(".assign a").click()
+ utils._wait_for_element(browser, "success-modal")
+ WebDriverWait(browser, 10).until(EC.visibility_of_element_located((By.CSS_SELECTOR, "#success-modal button.btn-primary")))
+ utils._click_element(browser, "#success-modal .close")
+ modal_not_present = lambda browser: browser.execute_script("""return $("#success-modal").length === 0;""")
+ WebDriverWait(browser, 10).until(modal_not_present)
+
+ # check that the modified row is correct
+ row = browser.find_elements_by_css_selector("tbody tr")[1]
+ assert row.find_element_by_css_selector(".name").text == "ps2"
+ assert row.find_element_by_css_selector(".duedate").text == "2017-07-05 18:00:00 {}".format(tz)
+ assert row.find_element_by_css_selector(".status").text == "draft"
+ assert utils._child_exists(row, ".edit a")
+ assert utils._child_exists(row, ".assign a")
+ assert utils._child_exists(row, ".preview a")
+ if sys.platform == 'win32':
+ assert not utils._child_exists(row, ".release a")
+ else:
+ assert utils._child_exists(row, ".release a")
+ assert not utils._child_exists(row, ".collect a")
+ assert row.find_element_by_css_selector(".num-submissions").text == "0"
+
+ # reload the page and make sure everything is still correct
+ utils._load_gradebook_page(browser, port, "")
+ row = browser.find_elements_by_css_selector("tbody tr")[1]
+ assert row.find_element_by_css_selector(".name").text == "ps2"
+ assert row.find_element_by_css_selector(".duedate").text == "2017-07-05 18:00:00 {}".format(tz)
+ assert row.find_element_by_css_selector(".status").text == "draft"
+ assert utils._child_exists(row, ".edit a")
+ assert utils._child_exists(row, ".assign a")
+ assert utils._child_exists(row, ".preview a")
+ if sys.platform == 'win32':
+ assert not utils._child_exists(row, ".release a")
+ else:
+ assert utils._child_exists(row, ".release a")
+ assert not utils._child_exists(row, ".collect a")
+ assert row.find_element_by_css_selector(".num-submissions").text == "0"
+
+
+@notwindows
[email protected]
+def test_release_assignment(browser, port, gradebook):
+ utils._load_gradebook_page(browser, port, "")
+
+ # click on the release button
+ row = browser.find_elements_by_css_selector("tbody tr")[1]
+ row.find_element_by_css_selector(".release a").click()
+ utils._wait_for_element(browser, "success-modal")
+ WebDriverWait(browser, 10).until(EC.visibility_of_element_located((By.CSS_SELECTOR, "#success-modal button.btn-primary")))
+ utils._click_element(browser, "#success-modal .close")
+ modal_not_present = lambda browser: browser.execute_script("""return $("#success-modal").length === 0;""")
+ WebDriverWait(browser, 10).until(modal_not_present)
+
+ # check that the modified row is correct
+ row = browser.find_elements_by_css_selector("tbody tr")[1]
+ assert row.find_element_by_css_selector(".name").text == "ps2"
+ assert row.find_element_by_css_selector(".duedate").text == "2017-07-05 18:00:00 {}".format(tz)
+ assert row.find_element_by_css_selector(".status").text == "released"
+ assert utils._child_exists(row, ".edit a")
+ assert utils._child_exists(row, ".assign a")
+ assert utils._child_exists(row, ".preview a")
+ assert utils._child_exists(row, ".release a")
+ assert utils._child_exists(row, ".collect a")
+ assert row.find_element_by_css_selector(".num-submissions").text == "0"
+
+ # reload the page and make sure everything is still correct
+ utils._load_gradebook_page(browser, port, "")
+ row = browser.find_elements_by_css_selector("tbody tr")[1]
+ assert row.find_element_by_css_selector(".name").text == "ps2"
+ assert row.find_element_by_css_selector(".duedate").text == "2017-07-05 18:00:00 {}".format(tz)
+ assert row.find_element_by_css_selector(".status").text == "released"
+ assert utils._child_exists(row, ".edit a")
+ assert utils._child_exists(row, ".assign a")
+ assert utils._child_exists(row, ".preview a")
+ assert utils._child_exists(row, ".release a")
+ assert utils._child_exists(row, ".collect a")
+ assert row.find_element_by_css_selector(".num-submissions").text == "0"
+
+
+@notwindows
[email protected]
+def test_collect_assignment(browser, port, gradebook):
+ run_nbgrader(["fetch", "ps2"])
+ run_nbgrader(["submit", "ps2"])
+ rmtree("ps2")
+
+ utils._load_gradebook_page(browser, port, "")
+
+ # click on the collect button
+ row = browser.find_elements_by_css_selector("tbody tr")[1]
+ row.find_element_by_css_selector(".collect a").click()
+ utils._wait_for_element(browser, "success-modal")
+ WebDriverWait(browser, 10).until(EC.visibility_of_element_located((By.CSS_SELECTOR, "#success-modal button.btn-primary")))
+ utils._click_element(browser, "#success-modal .close")
+ modal_not_present = lambda browser: browser.execute_script("""return $("#success-modal").length === 0;""")
+ WebDriverWait(browser, 10).until(modal_not_present)
+
+ # check that the modified row is correct
+ row = browser.find_elements_by_css_selector("tbody tr")[1]
+ assert row.find_element_by_css_selector(".name").text == "ps2"
+ assert row.find_element_by_css_selector(".duedate").text == "2017-07-05 18:00:00 {}".format(tz)
+ assert row.find_element_by_css_selector(".status").text == "released"
+ assert utils._child_exists(row, ".edit a")
+ assert utils._child_exists(row, ".assign a")
+ assert utils._child_exists(row, ".preview a")
+ assert utils._child_exists(row, ".release a")
+ assert utils._child_exists(row, ".collect a")
+ assert row.find_element_by_css_selector(".num-submissions").text == "1"
+
+ # reload the page and make sure everything is still correct
+ utils._load_gradebook_page(browser, port, "")
+ row = browser.find_elements_by_css_selector("tbody tr")[1]
+ assert row.find_element_by_css_selector(".name").text == "ps2"
+ assert row.find_element_by_css_selector(".duedate").text == "2017-07-05 18:00:00 {}".format(tz)
+ assert row.find_element_by_css_selector(".status").text == "released"
+ assert utils._child_exists(row, ".edit a")
+ assert utils._child_exists(row, ".assign a")
+ assert utils._child_exists(row, ".preview a")
+ assert utils._child_exists(row, ".release a")
+ assert utils._child_exists(row, ".collect a")
+ assert row.find_element_by_css_selector(".num-submissions").text == "1"
+
+
+@notwindows
[email protected]
+def test_unrelease_assignment(browser, port, gradebook):
+ utils._load_gradebook_page(browser, port, "")
+
+ # click on the unrelease button
+ row = browser.find_elements_by_css_selector("tbody tr")[1]
+ row.find_element_by_css_selector(".release a").click()
+ utils._wait_for_element(browser, "success-modal")
+ WebDriverWait(browser, 10).until(EC.visibility_of_element_located((By.CSS_SELECTOR, "#success-modal button.btn-primary")))
+ utils._click_element(browser, "#success-modal .close")
+ modal_not_present = lambda browser: browser.execute_script("""return $("#success-modal").length === 0;""")
+ WebDriverWait(browser, 10).until(modal_not_present)
+
+ # check that the modified row is correct
+ row = browser.find_elements_by_css_selector("tbody tr")[1]
+ assert row.find_element_by_css_selector(".name").text == "ps2"
+ assert row.find_element_by_css_selector(".duedate").text == "2017-07-05 18:00:00 {}".format(tz)
+ assert row.find_element_by_css_selector(".status").text == "draft"
+ assert utils._child_exists(row, ".edit a")
+ assert utils._child_exists(row, ".assign a")
+ assert utils._child_exists(row, ".preview a")
+ assert utils._child_exists(row, ".release a")
+ assert not utils._child_exists(row, ".collect a")
+ assert row.find_element_by_css_selector(".num-submissions").text == "1"
+
+ # reload the page and make sure everything is still correct
+ utils._load_gradebook_page(browser, port, "")
+ row = browser.find_elements_by_css_selector("tbody tr")[1]
+ assert row.find_element_by_css_selector(".name").text == "ps2"
+ assert row.find_element_by_css_selector(".duedate").text == "2017-07-05 18:00:00 {}".format(tz)
+ assert row.find_element_by_css_selector(".status").text == "draft"
+ assert utils._child_exists(row, ".edit a")
+ assert utils._child_exists(row, ".assign a")
+ assert utils._child_exists(row, ".preview a")
+ assert utils._child_exists(row, ".release a")
+ assert not utils._child_exists(row, ".collect a")
+ assert row.find_element_by_css_selector(".num-submissions").text == "1"
+
+
[email protected]
+def test_manually_collect_assignment(browser, port, gradebook):
+ existing_submissions = glob.glob(join("submitted", "*", "ps2"))
+ for dirname in existing_submissions:
+ rmtree(dirname)
+ dest = join("submitted", "Bitdiddle", "ps2")
+ if not os.path.exists(os.path.dirname(dest)):
+ os.makedirs(os.path.dirname(dest))
+ shutil.copytree(join("release", "ps2"), dest)
+ with open(join(dest, "timestamp.txt"), "w") as fh:
+ fh.write("2017-07-05 18:05:21 UTC")
+
+ utils._load_gradebook_page(browser, port, "")
+
+ # check that the row is correct
+ row = browser.find_elements_by_css_selector("tbody tr")[1]
+ assert row.find_element_by_css_selector(".name").text == "ps2"
+ assert row.find_element_by_css_selector(".duedate").text == "2017-07-05 18:00:00 {}".format(tz)
+ assert row.find_element_by_css_selector(".status").text == "draft"
+ assert utils._child_exists(row, ".edit a")
+ assert utils._child_exists(row, ".assign a")
+ assert utils._child_exists(row, ".preview a")
+ if sys.platform == 'win32':
+ assert not utils._child_exists(row, ".release a")
+ else:
+ assert utils._child_exists(row, ".release a")
+ assert not utils._child_exists(row, ".collect a")
+ assert row.find_element_by_css_selector(".num-submissions").text == "1"
+
+
[email protected]
+def test_autograde_assignment(browser, port, gradebook):
+ utils._load_gradebook_page(browser, port, "manage_submissions/ps2")
+
+ # check the contents of the row before grading
+ row = browser.find_elements_by_css_selector("tbody tr")[0]
+ assert row.find_element_by_css_selector(".student-name").text == "B, Ben"
+ assert row.find_element_by_css_selector(".student-id").text == "Bitdiddle"
+ assert row.find_element_by_css_selector(".timestamp").text == "2017-07-05 18:05:21 {}".format(tz)
+ assert row.find_element_by_css_selector(".status").text == "needs autograding"
+ assert row.find_element_by_css_selector(".score").text == ""
+ assert utils._child_exists(row, ".autograde a")
+
+ # click on the autograde button
+ row = browser.find_elements_by_css_selector("tbody tr")[0]
+ row.find_element_by_css_selector(".autograde a").click()
+ utils._wait_for_element(browser, "success-modal")
+ WebDriverWait(browser, 10).until(EC.visibility_of_element_located((By.CSS_SELECTOR, "#success-modal button.btn-primary")))
+ utils._click_element(browser, "#success-modal .close")
+ modal_not_present = lambda browser: browser.execute_script("""return $("#success-modal").length === 0;""")
+ WebDriverWait(browser, 10).until(modal_not_present)
+
+ # check that the modified row is correct
+ row = browser.find_elements_by_css_selector("tbody tr")[0]
+ assert row.find_element_by_css_selector(".student-name").text == "B, Ben"
+ assert row.find_element_by_css_selector(".student-id").text == "Bitdiddle"
+ assert row.find_element_by_css_selector(".timestamp").text == "2017-07-05 18:05:21 {}".format(tz)
+ assert row.find_element_by_css_selector(".status").text == "graded"
+ assert row.find_element_by_css_selector(".score").text == "0 / 6"
+ assert utils._child_exists(row, ".autograde a")
+
+ # refresh and check again
+ utils._load_gradebook_page(browser, port, "manage_submissions/ps2")
+ row = browser.find_elements_by_css_selector("tbody tr")[0]
+ assert row.find_element_by_css_selector(".student-name").text == "B, Ben"
+ assert row.find_element_by_css_selector(".student-id").text == "Bitdiddle"
+ assert row.find_element_by_css_selector(".timestamp").text == "2017-07-05 18:05:21 {}".format(tz)
+ assert row.find_element_by_css_selector(".status").text == "graded"
+ assert row.find_element_by_css_selector(".score").text == "0 / 6"
+ assert utils._child_exists(row, ".autograde a")
+
+ # overwrite the file
+ source_path = join(os.path.dirname(__file__), "..", "..", "docs", "source", "user_guide", "source", "ps1", "problem1.ipynb")
+ shutil.copy(source_path, join("submitted", "Bitdiddle", "ps2", "Problem 1.ipynb"))
+
+ # click on the autograde button
+ row = browser.find_elements_by_css_selector("tbody tr")[0]
+ row.find_element_by_css_selector(".autograde a").click()
+ utils._wait_for_element(browser, "success-modal")
+ WebDriverWait(browser, 10).until(EC.visibility_of_element_located((By.CSS_SELECTOR, "#success-modal button.btn-primary")))
+ utils._click_element(browser, "#success-modal .close")
+ modal_not_present = lambda browser: browser.execute_script("""return $("#success-modal").length === 0;""")
+ WebDriverWait(browser, 10).until(modal_not_present)
+
+ # check that the modified row is correct
+ row = browser.find_elements_by_css_selector("tbody tr")[0]
+ assert row.find_element_by_css_selector(".student-name").text == "B, Ben"
+ assert row.find_element_by_css_selector(".student-id").text == "Bitdiddle"
+ assert row.find_element_by_css_selector(".timestamp").text == "2017-07-05 18:05:21 {}".format(tz)
+ assert row.find_element_by_css_selector(".status").text == "needs manual grading"
+ assert row.find_element_by_css_selector(".score").text == "3 / 6"
+ assert utils._child_exists(row, ".autograde a")
+
+ # refresh and check again
+ utils._load_gradebook_page(browser, port, "manage_submissions/ps2")
+ row = browser.find_elements_by_css_selector("tbody tr")[0]
+ assert row.find_element_by_css_selector(".student-name").text == "B, Ben"
+ assert row.find_element_by_css_selector(".student-id").text == "Bitdiddle"
+ assert row.find_element_by_css_selector(".timestamp").text == "2017-07-05 18:05:21 {}".format(tz)
+ assert row.find_element_by_css_selector(".status").text == "needs manual grading"
+ assert row.find_element_by_css_selector(".score").text == "3 / 6"
+ assert utils._child_exists(row, ".autograde a")
+
+
[email protected]
+def test_add_new_student(browser, port, gradebook):
+ utils._load_gradebook_page(browser, port, "manage_students")
+ assert len(browser.find_elements_by_css_selector("tbody tr")) == 3
+
+ # click the "add new assignment" button
+ utils._click_link(browser, "Add new student...")
+ utils._wait_for_element(browser, "add-student-modal")
+ WebDriverWait(browser, 10).until(EC.visibility_of_element_located((By.CSS_SELECTOR, "#add-student-modal button.btn-primary")))
+
+ # set the name and dudedate
+ elem = browser.find_element_by_css_selector("#add-student-modal .id")
+ elem.click()
+ elem.send_keys("ator")
+ elem = browser.find_element_by_css_selector("#add-student-modal .first-name")
+ elem.click()
+ elem.send_keys("Eva Lou")
+ elem = browser.find_element_by_css_selector("#add-student-modal .last-name")
+ elem.click()
+ elem.send_keys("Ator")
+ elem = browser.find_element_by_css_selector("#add-student-modal .email")
+ elem.click()
+ elem.send_keys("[email protected]")
+
+ # click save and wait for the modal to close
+ utils._click_element(browser, "#add-student-modal .save")
+ modal_not_present = lambda browser: browser.execute_script("""return $("#add-student-modal").length === 0;""")
+ WebDriverWait(browser, 10).until(modal_not_present)
+
+ # wait until both rows are present
+ rows_present = lambda browser: len(browser.find_elements_by_css_selector("tbody tr")) == 4
+ WebDriverWait(browser, 10).until(rows_present)
+
+ # check that the new row is correct
+ row = browser.find_elements_by_css_selector("tbody tr")[0]
+ assert row.find_element_by_css_selector(".name").text == "Ator, Eva Lou"
+ assert row.find_element_by_css_selector(".id").text == "ator"
+ assert row.find_element_by_css_selector(".email").text == "[email protected]"
+ assert row.find_element_by_css_selector(".score").text == "0 / 15"
+ assert utils._child_exists(row, ".edit a")
+
+ # reload the page and make sure everything is still correct
+ utils._load_gradebook_page(browser, port, "manage_students")
+ row = browser.find_elements_by_css_selector("tbody tr")[0]
+ assert row.find_element_by_css_selector(".name").text == "Ator, Eva Lou"
+ assert row.find_element_by_css_selector(".id").text == "ator"
+ assert row.find_element_by_css_selector(".email").text == "[email protected]"
+ assert row.find_element_by_css_selector(".score").text == "0 / 15"
+ assert utils._child_exists(row, ".edit a")
+
+
[email protected]
+def test_edit_student(browser, port, gradebook):
+ utils._load_gradebook_page(browser, port, "manage_students")
+
+ # click on the edit button
+ row = browser.find_elements_by_css_selector("tbody tr")[0]
+ row.find_element_by_css_selector(".edit a").click()
+ utils._wait_for_element(browser, "edit-student-modal")
+ WebDriverWait(browser, 10).until(EC.visibility_of_element_located((By.CSS_SELECTOR, "#edit-student-modal button.btn-primary")))
+
+ # modify the duedate
+ elem = browser.find_element_by_css_selector("#edit-student-modal .modal-email")
+ elem.clear()
+ elem.click()
+ elem.send_keys("[email protected]")
+
+ # click save and wait for the modal to close
+ utils._click_element(browser, "#edit-student-modal .save")
+ modal_not_present = lambda browser: browser.execute_script("""return $("#edit-student-modal").length === 0;""")
+ WebDriverWait(browser, 10).until(modal_not_present)
+
+ # check that the modified row is correct
+ row = browser.find_elements_by_css_selector("tbody tr")[0]
+ assert row.find_element_by_css_selector(".name").text == "Ator, Eva Lou"
+ assert row.find_element_by_css_selector(".id").text == "ator"
+ assert row.find_element_by_css_selector(".email").text == "[email protected]"
+ assert row.find_element_by_css_selector(".score").text == "0 / 15"
+ assert utils._child_exists(row, ".edit a")
+
+ # reload the page and make sure everything is still correct
+ utils._load_gradebook_page(browser, port, "manage_students")
+ row = browser.find_elements_by_css_selector("tbody tr")[0]
+ assert row.find_element_by_css_selector(".name").text == "Ator, Eva Lou"
+ assert row.find_element_by_css_selector(".id").text == "ator"
+ assert row.find_element_by_css_selector(".email").text == "[email protected]"
+ assert row.find_element_by_css_selector(".score").text == "0 / 15"
+ assert utils._child_exists(row, ".edit a")
diff --git a/nbgrader/tests/nbextensions/test_validate_assignment.py b/nbgrader/tests/nbextensions/test_validate_assignment.py
index 9a300d1b..4b5f32ae 100644
--- a/nbgrader/tests/nbextensions/test_validate_assignment.py
+++ b/nbgrader/tests/nbextensions/test_validate_assignment.py
@@ -5,6 +5,30 @@ from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException, NoSuchElementException
+from .conftest import _make_nbserver, _make_browser, _close_nbserver, _close_browser
+
+
[email protected](scope="module")
+def nbserver(request, port, tempdir, jupyter_config_dir, jupyter_data_dir, exchange, cache):
+ server = _make_nbserver("", port, tempdir, jupyter_config_dir, jupyter_data_dir, exchange, cache)
+
+ def fin():
+ _close_nbserver(server)
+ request.addfinalizer(fin)
+
+ return server
+
+
[email protected]
+def browser(request, tempdir, nbserver):
+ browser = _make_browser(tempdir)
+
+ def fin():
+ _close_browser(browser)
+ request.addfinalizer(fin)
+
+ return browser
+
def _wait(browser):
return WebDriverWait(browser, 30)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_added_files",
"has_removed_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 3,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 10
} | 0.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -r dev-requirements.txt -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-rerunfailures",
"coverage",
"selenium",
"invoke",
"sphinx",
"codecov",
"cov-core",
"nbval"
],
"pre_install": [
"pip install -U pip wheel setuptools"
],
"python": "3.5",
"reqs_path": [
"dev-requirements.txt",
"dev-requirements-windows.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
alembic==1.7.7
anyio==3.6.2
argon2-cffi==21.3.0
argon2-cffi-bindings==21.2.0
async-generator==1.10
attrs==22.2.0
Babel==2.11.0
backcall==0.2.0
bleach==4.1.0
certifi==2021.5.30
cffi==1.15.1
charset-normalizer==2.0.12
codecov==2.1.13
comm==0.1.4
contextvars==2.4
cov-core==1.15.0
coverage==6.2
dataclasses==0.8
decorator==5.1.1
defusedxml==0.7.1
docutils==0.18.1
entrypoints==0.4
greenlet==2.0.2
idna==3.10
imagesize==1.4.1
immutables==0.19
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
invoke==2.2.0
ipykernel==5.5.6
ipython==7.16.3
ipython-genutils==0.2.0
ipywidgets==7.8.5
jedi==0.17.2
Jinja2==3.0.3
json5==0.9.16
jsonschema==3.2.0
jupyter==1.1.1
jupyter-client==7.1.2
jupyter-console==6.4.3
jupyter-core==4.9.2
jupyter-server==1.13.1
jupyterlab==3.2.9
jupyterlab-pygments==0.1.2
jupyterlab-server==2.10.3
jupyterlab_widgets==1.1.11
Mako==1.1.6
MarkupSafe==2.0.1
mistune==0.8.4
nbclassic==0.3.5
nbclient==0.5.9
nbconvert==6.0.7
nbformat==5.1.3
-e git+https://github.com/jupyter/nbgrader.git@1af1162f65ab38a7e4d55f1c0c1daf55ff8f5250#egg=nbgrader
nbval==0.10.0
nest-asyncio==1.6.0
notebook==6.4.10
packaging==21.3
pandocfilters==1.5.1
parso==0.7.1
pexpect==4.9.0
pickleshare==0.7.5
pluggy==1.0.0
prometheus-client==0.17.1
prompt-toolkit==3.0.36
ptyprocess==0.7.0
py==1.11.0
pycparser==2.21
pyenchant==3.2.2
Pygments==2.14.0
pyparsing==3.1.4
pyrsistent==0.18.0
pytest==7.0.1
pytest-cov==4.0.0
pytest-rerunfailures==10.3
python-dateutil==2.9.0.post0
pytz==2025.2
pyzmq==25.1.2
requests==2.27.1
selenium==3.141.0
Send2Trash==1.8.3
six==1.17.0
sniffio==1.2.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-rtd-theme==2.0.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
sphinxcontrib-spelling==7.7.0
SQLAlchemy==1.4.54
terminado==0.12.1
testpath==0.6.0
tomli==1.2.3
tornado==6.1
traitlets==4.3.3
typing_extensions==4.1.1
urllib3==1.26.20
wcwidth==0.2.13
webencodings==0.5.1
websocket-client==1.3.1
widgetsnbextension==3.6.10
zipp==3.6.0
| name: nbgrader
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- alembic==1.7.7
- anyio==3.6.2
- argon2-cffi==21.3.0
- argon2-cffi-bindings==21.2.0
- async-generator==1.10
- attrs==22.2.0
- babel==2.11.0
- backcall==0.2.0
- bleach==4.1.0
- cffi==1.15.1
- charset-normalizer==2.0.12
- codecov==2.1.13
- comm==0.1.4
- contextvars==2.4
- cov-core==1.15.0
- coverage==6.2
- dataclasses==0.8
- decorator==5.1.1
- defusedxml==0.7.1
- docutils==0.18.1
- entrypoints==0.4
- greenlet==2.0.2
- idna==3.10
- imagesize==1.4.1
- immutables==0.19
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- invoke==2.2.0
- ipykernel==5.5.6
- ipython==7.16.3
- ipython-genutils==0.2.0
- ipywidgets==7.8.5
- jedi==0.17.2
- jinja2==3.0.3
- json5==0.9.16
- jsonschema==3.2.0
- jupyter==1.1.1
- jupyter-client==7.1.2
- jupyter-console==6.4.3
- jupyter-core==4.9.2
- jupyter-server==1.13.1
- jupyterlab==3.2.9
- jupyterlab-pygments==0.1.2
- jupyterlab-server==2.10.3
- jupyterlab-widgets==1.1.11
- mako==1.1.6
- markupsafe==2.0.1
- mistune==0.8.4
- nbclassic==0.3.5
- nbclient==0.5.9
- nbconvert==6.0.7
- nbformat==5.1.3
- nbval==0.10.0
- nest-asyncio==1.6.0
- notebook==6.4.10
- packaging==21.3
- pandocfilters==1.5.1
- parso==0.7.1
- pexpect==4.9.0
- pickleshare==0.7.5
- pip==21.3.1
- pluggy==1.0.0
- prometheus-client==0.17.1
- prompt-toolkit==3.0.36
- ptyprocess==0.7.0
- py==1.11.0
- pycparser==2.21
- pyenchant==3.2.2
- pygments==2.14.0
- pyparsing==3.1.4
- pyrsistent==0.18.0
- pytest==7.0.1
- pytest-cov==4.0.0
- pytest-rerunfailures==10.3
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyzmq==25.1.2
- requests==2.27.1
- selenium==3.141.0
- send2trash==1.8.3
- setuptools==59.6.0
- six==1.17.0
- sniffio==1.2.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-rtd-theme==2.0.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- sphinxcontrib-spelling==7.7.0
- sqlalchemy==1.4.54
- terminado==0.12.1
- testpath==0.6.0
- tomli==1.2.3
- tornado==6.1
- traitlets==4.3.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- wcwidth==0.2.13
- webencodings==0.5.1
- websocket-client==1.3.1
- widgetsnbextension==3.6.10
- zipp==3.6.0
prefix: /opt/conda/envs/nbgrader
| [
"nbgrader/tests/apps/test_api.py::TestNbGraderAPI::test_get_assignment",
"nbgrader/tests/apps/test_api.py::TestNbGraderAPI::test_get_notebooks",
"nbgrader/tests/apps/test_api.py::TestNbGraderAPI::test_get_submission"
]
| [
"nbgrader/tests/apps/test_api.py::TestNbGraderAPI::test_get_autograded_students",
"nbgrader/tests/apps/test_api.py::TestNbGraderAPI::test_get_autograded_students_no_timestamps",
"nbgrader/tests/apps/test_api.py::TestNbGraderAPI::test_get_submissions",
"nbgrader/tests/apps/test_api.py::TestNbGraderAPI::test_get_notebook_submissions",
"nbgrader/tests/apps/test_api.py::TestNbGraderAPI::test_get_student_notebook_submissions"
]
| [
"nbgrader/tests/apps/test_api.py::TestNbGraderAPI::test_get_source_assignments",
"nbgrader/tests/apps/test_api.py::TestNbGraderAPI::test_get_released_assignments",
"nbgrader/tests/apps/test_api.py::TestNbGraderAPI::test_get_submitted_students",
"nbgrader/tests/apps/test_api.py::TestNbGraderAPI::test_get_submitted_timestamp",
"nbgrader/tests/apps/test_api.py::TestNbGraderAPI::test_get_assignments",
"nbgrader/tests/apps/test_api.py::TestNbGraderAPI::test_filter_existing_notebooks",
"nbgrader/tests/apps/test_api.py::TestNbGraderAPI::test_get_notebook_submission_indices",
"nbgrader/tests/apps/test_api.py::TestNbGraderAPI::test_get_student",
"nbgrader/tests/apps/test_api.py::TestNbGraderAPI::test_get_students",
"nbgrader/tests/apps/test_api.py::TestNbGraderAPI::test_get_student_submissions",
"nbgrader/tests/apps/test_api.py::TestNbGraderAPI::test_assign",
"nbgrader/tests/apps/test_api.py::TestNbGraderAPI::test_release_and_unrelease",
"nbgrader/tests/apps/test_api.py::TestNbGraderAPI::test_collect",
"nbgrader/tests/apps/test_api.py::TestNbGraderAPI::test_autograde"
]
| []
| BSD 3-Clause "New" or "Revised" License | 1,435 | [
"nbgrader/apps/api.py",
"nbgrader/server_extensions/formgrader/templates/gradebook_assignments.tpl",
"nbgrader/server_extensions/formgrader/static/js/utils.js",
"nbgrader/utils.py",
"nbgrader/server_extensions/formgrader/static/js/manage_students.js",
"nbgrader/server_extensions/formgrader/templates/gradebook_base.tpl",
"nbgrader/server_extensions/formgrader/templates/gradebook_notebook_submissions.tpl",
"nbgrader/server_extensions/formgrader/templates/notebook_submissions.tpl",
"nbgrader/server_extensions/formgrader/static/js/models.js",
"nbgrader/server_extensions/formgrader/templates/manage_submissions.tpl",
"nbgrader/server_extensions/formgrader/templates/gradebook_500.tpl",
"nbgrader/server_extensions/formgrader/static/js/formgrade.js",
"nbgrader/server_extensions/formgrader/apihandlers.py",
"nbgrader/server_extensions/formgrader/static/js/gradebook_notebook_submissions.js",
"nbgrader/server_extensions/formgrader/handlers.py",
"nbgrader/server_extensions/formgrader/templates/manage_students_base.tpl",
"nbgrader/server_extensions/formgrader/templates/base.tpl",
"nbgrader/server_extensions/formgrader/static/css/nbgrader.css",
"nbgrader/server_extensions/formgrader/templates/students.tpl",
"nbgrader/server_extensions/formgrader/static/js/manage_students_assignments.js",
"nbgrader/server_extensions/formgrader/templates/student_assignments.tpl",
"nbgrader/server_extensions/formgrader/static/js/keyboardmanager.js",
"nbgrader/server_extensions/formgrader/templates/student_submissions.tpl",
"nbgrader/server_extensions/formgrader/templates/manage_students.tpl",
"nbgrader/server_extensions/formgrader/static/js/manage_students_notebook_submissions.js",
"nbgrader/server_extensions/formgrader/static/js/manage_assignments.js",
"nbgrader/server_extensions/formgrader/static/js/backbone_xsrf.js",
"nbgrader/server_extensions/formgrader/templates/manage_students_notebook_submissions.tpl",
"nbgrader/server_extensions/formgrader/formgrader.py",
"nbgrader/server_extensions/formgrader/templates/gradebook_notebooks.tpl",
"nbgrader/server_extensions/formgrader/base.py",
"nbgrader/server_extensions/formgrader/templates/assignment_notebooks.tpl",
"nbgrader/server_extensions/formgrader/templates/gradebook.tpl",
"nbgrader/server_extensions/formgrader/static/js/gradebook_notebooks.js",
"nbgrader/server_extensions/formgrader/static/js/gradebook_assignments.js",
"nbgrader/server_extensions/formgrader/templates/gradebook_403.tpl",
"nbgrader/server_extensions/formgrader/templates/assignments.tpl",
"nbgrader/server_extensions/formgrader/static/js/manage_submissions.js",
"nbgrader/server_extensions/formgrader/templates/manage_assignments.tpl",
"nbgrader/server_extensions/formgrader/templates/formgrade_macros.tpl",
"nbgrader/server_extensions/formgrader/templates/manage_students_assignments.tpl"
]
| [
"nbgrader/apps/api.py",
"nbgrader/server_extensions/formgrader/templates/gradebook_assignments.tpl",
"nbgrader/server_extensions/formgrader/static/js/utils.js",
"nbgrader/utils.py",
"nbgrader/server_extensions/formgrader/static/js/manage_students.js",
"nbgrader/server_extensions/formgrader/templates/gradebook_base.tpl",
"nbgrader/server_extensions/formgrader/templates/gradebook_notebook_submissions.tpl",
"nbgrader/server_extensions/formgrader/templates/notebook_submissions.tpl",
"nbgrader/server_extensions/formgrader/templates/base_500.tpl",
"nbgrader/server_extensions/formgrader/templates/manage_submissions.tpl",
"nbgrader/server_extensions/formgrader/static/js/formgrade.js",
"nbgrader/server_extensions/formgrader/apihandlers.py",
"nbgrader/server_extensions/formgrader/static/js/gradebook_notebook_submissions.js",
"nbgrader/server_extensions/formgrader/handlers.py",
"nbgrader/server_extensions/formgrader/templates/manage_students_base.tpl",
"nbgrader/server_extensions/formgrader/templates/base.tpl",
"nbgrader/server_extensions/formgrader/static/css/nbgrader.css",
"nbgrader/server_extensions/formgrader/templates/students.tpl",
"nbgrader/server_extensions/formgrader/static/js/manage_students_assignments.js",
"nbgrader/server_extensions/formgrader/templates/student_assignments.tpl",
"nbgrader/server_extensions/formgrader/static/js/formgrade_models.js",
"nbgrader/server_extensions/formgrader/templates/student_submissions.tpl",
"nbgrader/server_extensions/formgrader/templates/manage_students.tpl",
"nbgrader/server_extensions/formgrader/static/js/manage_students_notebook_submissions.js",
"nbgrader/server_extensions/formgrader/static/js/manage_assignments.js",
"nbgrader/server_extensions/formgrader/static/js/backbone_xsrf.js",
"nbgrader/server_extensions/formgrader/templates/manage_students_notebook_submissions.tpl",
"nbgrader/server_extensions/formgrader/templates/base_403.tpl",
"nbgrader/server_extensions/formgrader/formgrader.py",
"nbgrader/server_extensions/formgrader/templates/gradebook_notebooks.tpl",
"nbgrader/server_extensions/formgrader/base.py",
"nbgrader/server_extensions/formgrader/templates/assignment_notebooks.tpl",
"nbgrader/server_extensions/formgrader/templates/gradebook.tpl",
"nbgrader/server_extensions/formgrader/static/js/formgrade_keyboardmanager.js",
"nbgrader/server_extensions/formgrader/static/js/gradebook_assignments.js",
"nbgrader/server_extensions/formgrader/static/js/gradebook_notebooks.js",
"nbgrader/server_extensions/formgrader/templates/assignments.tpl",
"nbgrader/server_extensions/formgrader/static/js/manage_submissions.js",
"nbgrader/server_extensions/formgrader/templates/manage_assignments.tpl",
"nbgrader/server_extensions/formgrader/templates/formgrade_macros.tpl",
"nbgrader/server_extensions/formgrader/templates/manage_students_assignments.tpl"
]
|
streamlink__streamlink-1070 | 4761570f479ba51ffeb099a4e8a2ed3fea6df72d | 2017-07-06 16:07:35 | 0521ae3ca127f7cc600f1adcbc18b302760889ab | Vangelis66: Many thanks, but where exactly has https://github.com/streamlink/streamlink/issues/1059 gone ???
unlocKing: Looks like #1059 and #1067 have both been deleted
beardypig: Oops forgot to update the tests too...
beardypig: Perhaps the user that created them has been blocked by github for posting URLs... | diff --git a/src/streamlink/plugins/app17.py b/src/streamlink/plugins/app17.py
index d3f7b075..691d8a2a 100644
--- a/src/streamlink/plugins/app17.py
+++ b/src/streamlink/plugins/app17.py
@@ -68,23 +68,26 @@ class App17(Plugin):
self.logger.info("Stream currently unavailable.")
return
- http_url = _rtmp_re.search(res.text).group(1)
- yield "live", HTTPStream(self.session, http_url)
-
- if 'pull-rtmp' in http_url:
- url = http_url.replace("http:", "rtmp:").replace(".flv", "")
+ url = _rtmp_re.search(res.text).group(1)
+ if 'rtmp:' in url:
stream = RTMPStream(self.session, {
"rtmp": url,
"live": True
})
yield "live", stream
-
- if 'wansu-global-pull-rtmp' in http_url:
- url = http_url.replace(".flv", "/playlist.m3u8")
+ else:
+ yield "live", HTTPStream(self.session, url)
+
+ if '17app.co' in url:
+ prefix = url.replace("rtmp:", "http:").replace(".flv", "/playlist.m3u8")
+ if '/playlist.m3u8' not in prefix:
+ url = prefix + "/playlist.m3u8"
+ else:
+ url = prefix
for stream in HLSStream.parse_variant_playlist(self.session, url).items():
yield stream
else:
- url = http_url.replace(".flv", ".m3u8")
+ url = url.replace(".flv", ".m3u8")
yield "live", HLSStream(self.session, url)
diff --git a/src/streamlink/plugins/hitbox.py b/src/streamlink/plugins/hitbox.py
index ddb45036..4d7a3e9b 100644
--- a/src/streamlink/plugins/hitbox.py
+++ b/src/streamlink/plugins/hitbox.py
@@ -178,7 +178,7 @@ class Hitbox(Plugin):
if not media_id:
res = http.get(LIVE_API.format(channel))
livestream = http.json(res, schema=_live_schema)
- if livestream.get("media_hosted_media"):
+ if livestream["media_hosted_media"]:
hosted = _live_schema.validate(livestream["media_hosted_media"])
self.logger.info("{0} is hosting {1}", livestream["media_user_name"], hosted["media_user_name"])
livestream = hosted
diff --git a/src/streamlink/plugins/npo.py b/src/streamlink/plugins/npo.py
index 840af212..f04aaade 100644
--- a/src/streamlink/plugins/npo.py
+++ b/src/streamlink/plugins/npo.py
@@ -1,13 +1,8 @@
"""Plugin for NPO: Nederlandse Publieke Omroep
Supports:
- VODs:
- - https://www.npo.nl/nos-journaal/07-07-2017/POW_03375651
- - https://www.zapp.nl/topdoks/gemist/VPWON_1276930
- - https://zappelin.nl/10-voor/gemist/VPWON_1271522
- Live:
- - https://www.npo.nl/live/npo-1
- - https://zappelin.nl/tv-kijken
+ VODs: http://www.npo.nl/het-zandkasteel/POMS_S_NTR_059963
+ Live: http://www.npo.nl/live/nederland-1
"""
import re
@@ -17,14 +12,12 @@ from streamlink.plugin.api import http
from streamlink.plugin.api import useragents
from streamlink.plugin.api import validate
from streamlink.stream import HLSStream
-from streamlink.stream import HTTPStream
from streamlink.utils import parse_json
class NPO(Plugin):
api_url = "http://ida.omroep.nl/app.php/{endpoint}"
- url_re = re.compile(r"https?://(\w+\.)?(npo\.nl|zapp\.nl|zappelin\.nl)/")
- media_id_re = re.compile(r'''<npo-player\smedia-id=["'](?P<media_id>[^"']+)["']''')
+ url_re = re.compile(r"https?://(\w+\.)?(npo.nl|zapp.nl|zappelin.nl)/")
prid_re = re.compile(r'''(?:data(-alt)?-)?prid\s*[=:]\s*(?P<q>["'])(\w+)(?P=q)''')
react_re = re.compile(r'''data-react-props\s*=\s*(?P<q>["'])(?P<data>.*?)(?P=q)''')
@@ -85,11 +78,6 @@ class NPO(Plugin):
data = parse_json(m.group("data").replace(""", '"'))
bprid = data.get("mid")
- if bprid is None:
- m = self.media_id_re.search(res.text)
- if m:
- bprid = m.group('media_id')
-
return bprid
def _get_streams(self):
@@ -103,7 +91,7 @@ class NPO(Plugin):
schema=self.streams_schema)
for stream in streams:
- if stream["format"] in ("adaptive", "hls", "mp4"):
+ if stream["format"] in ("adaptive", "hls"):
if stream["contentType"] == "url":
stream_url = stream["url"]
else:
@@ -114,10 +102,8 @@ class NPO(Plugin):
stream_url = http.json(http.get(info_url),
schema=self.stream_info_schema)
- if stream["format"] in ("adaptive", "hls"):
- for s in HLSStream.parse_variant_playlist(self.session, stream_url).items():
- yield s
- elif stream["format"] in ("mp3", "mp4"):
- yield "vod", HTTPStream(self.session, stream_url)
+ for s in HLSStream.parse_variant_playlist(self.session, stream_url).items():
+ yield s
+
__plugin__ = NPO
diff --git a/src/streamlink/plugins/tvplayer.py b/src/streamlink/plugins/tvplayer.py
index 249e85fa..f79474e6 100644
--- a/src/streamlink/plugins/tvplayer.py
+++ b/src/streamlink/plugins/tvplayer.py
@@ -15,7 +15,7 @@ class TVPlayer(Plugin):
dummy_postcode = "SE1 9LT" # location of ITV HQ in London
url_re = re.compile(r"https?://(?:www.)?tvplayer.com/(:?watch/?|watch/(.+)?)")
- stream_attrs_re = re.compile(r'data-(resource|token)\s*=\s*"(.*?)"', re.S)
+ stream_attrs_re = re.compile(r'data-(resource|token|channel-id)\s*=\s*"(.*?)"', re.S)
login_token_re = re.compile(r'input.*?name="token".*?value="(\w+)"')
stream_schema = validate.Schema({
"tvplayer": validate.Schema({
@@ -58,20 +58,22 @@ class TVPlayer(Plugin):
# there is a 302 redirect on a successful login
return res2.status_code == 302
- def _get_stream_data(self, resource, token, service=1):
+ def _get_stream_data(self, resource, channel_id, token, service=1):
# Get the context info (validation token and platform)
self.logger.debug("Getting stream information for resource={0}".format(resource))
context_res = http.get(self.context_url, params={"resource": resource,
"gen": token})
context_data = http.json(context_res, schema=self.context_schema)
+ self.logger.debug("Context data: {0}", str(context_data))
# get the stream urls
res = http.post(self.api_url, data=dict(
service=service,
- id=resource,
+ id=channel_id,
validate=context_data["validate"],
token=context_data.get("token"),
- platform=context_data["platform"]["key"]))
+ platform=context_data["platform"]["key"]),
+ raise_for_status=False)
return http.json(res, schema=self.stream_schema)
@@ -91,7 +93,8 @@ class TVPlayer(Plugin):
data=dict(postcode=self.dummy_postcode),
params=dict(return_url=self.url))
- stream_attrs = dict((k, v.strip('"')) for k, v in self.stream_attrs_re.findall(res.text))
+ stream_attrs = dict((k.replace("-", "_"), v.strip('"')) for k, v in self.stream_attrs_re.findall(res.text))
+ self.logger.debug("Got stream attrs: {0}", str(stream_attrs))
if "resource" in stream_attrs and "token" in stream_attrs:
stream_data = self._get_stream_data(**stream_attrs)
| tvplayer plugin broken
https://tvplayer.com/watch/bbcone
Unable to open URL: http://api.tvplayer.com/api/v2/stream/live (400 Client Error: Bad Request for url: http://api.tvplayer.com/api/v2/stream/live) | streamlink/streamlink | diff --git a/tests/test_plugin_tvplayer.py b/tests/test_plugin_tvplayer.py
index 52f27dc0..f9f13367 100644
--- a/tests/test_plugin_tvplayer.py
+++ b/tests/test_plugin_tvplayer.py
@@ -41,7 +41,7 @@ class TestPluginTVPlayer(unittest.TestCase):
page_resp = Mock()
page_resp.text = u"""
<div class="video-js theoplayer-skin theo-seekbar-above-controls content-box vjs-fluid"
- data-resource= "89"
+ data-resource= "bbcone"
data-token = "1324567894561268987948596154656418448489159"
data-content-type="live"
data-environment="live"
@@ -54,6 +54,7 @@ class TestPluginTVPlayer(unittest.TestCase):
mock_http.get.return_value = page_resp
hlsstream.parse_variant_playlist.return_value = {"test": HLSStream(self.session, "http://test.se/stream1")}
+ TVPlayer.bind(self.session, "test.plugin.tvplayer")
plugin = TVPlayer("http://tvplayer.com/watch/dave")
streams = plugin.get_streams()
@@ -63,7 +64,7 @@ class TestPluginTVPlayer(unittest.TestCase):
# test the url is used correctly
mock_http.get.assert_called_with("http://tvplayer.com/watch/dave")
# test that the correct API call is made
- mock_get_stream_data.assert_called_with(resource="89", token="1324567894561268987948596154656418448489159")
+ mock_get_stream_data.assert_called_with(resource="bbcone", channel_id="89", token="1324567894561268987948596154656418448489159")
# test that the correct URL is used for the HLSStream
hlsstream.parse_variant_playlist.assert_called_with(ANY, "http://test.se/stream1")
@@ -76,6 +77,7 @@ class TestPluginTVPlayer(unittest.TestCase):
"""
mock_http.get.return_value = page_resp
+ TVPlayer.bind(self.session, "test.plugin.tvplayer")
plugin = TVPlayer("http://tvplayer.com/watch/dave")
streams = plugin.get_streams()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 4
} | 0.7 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"codecov",
"coverage",
"mock",
"pynsist",
"unittest2"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"dev-requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
charset-normalizer==2.0.12
codecov==2.1.13
coverage==6.2
distlib==0.3.9
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
iso-639==0.4.5
iso3166==2.1.1
Jinja2==3.0.3
linecache2==1.0.0
MarkupSafe==2.0.1
mock==5.2.0
packaging==21.3
pluggy==1.0.0
py==1.11.0
pycryptodome==3.21.0
pynsist==2.8
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
requests==2.27.1
requests_download==0.1.2
six==1.17.0
-e git+https://github.com/streamlink/streamlink.git@4761570f479ba51ffeb099a4e8a2ed3fea6df72d#egg=streamlink
tomli==1.2.3
traceback2==1.4.0
typing_extensions==4.1.1
unittest2==1.1.0
urllib3==1.26.20
yarg==0.1.10
zipp==3.6.0
| name: streamlink
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- argparse==1.4.0
- attrs==22.2.0
- charset-normalizer==2.0.12
- codecov==2.1.13
- coverage==6.2
- distlib==0.3.9
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- iso-639==0.4.5
- iso3166==2.1.1
- jinja2==3.0.3
- linecache2==1.0.0
- markupsafe==2.0.1
- mock==5.2.0
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pycryptodome==3.21.0
- pynsist==2.8
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- requests==2.27.1
- requests-download==0.1.2
- six==1.17.0
- tomli==1.2.3
- traceback2==1.4.0
- typing-extensions==4.1.1
- unittest2==1.1.0
- urllib3==1.26.20
- yarg==0.1.10
- zipp==3.6.0
prefix: /opt/conda/envs/streamlink
| [
"tests/test_plugin_tvplayer.py::TestPluginTVPlayer::test_get_streams"
]
| []
| [
"tests/test_plugin_tvplayer.py::TestPluginTVPlayer::test_can_handle_url",
"tests/test_plugin_tvplayer.py::TestPluginTVPlayer::test_get_invalid_page"
]
| []
| BSD 2-Clause "Simplified" License | 1,437 | [
"src/streamlink/plugins/npo.py",
"src/streamlink/plugins/app17.py",
"src/streamlink/plugins/hitbox.py",
"src/streamlink/plugins/tvplayer.py"
]
| [
"src/streamlink/plugins/npo.py",
"src/streamlink/plugins/app17.py",
"src/streamlink/plugins/hitbox.py",
"src/streamlink/plugins/tvplayer.py"
]
|
jd__daiquiri-14 | dd83419910bd6e6e156332fe63b8eda599721ba8 | 2017-07-06 20:36:34 | dd83419910bd6e6e156332fe63b8eda599721ba8 | diff --git a/daiquiri/__init__.py b/daiquiri/__init__.py
index 74e5001..0254f57 100644
--- a/daiquiri/__init__.py
+++ b/daiquiri/__init__.py
@@ -79,7 +79,7 @@ def setup(level=logging.WARNING, outputs=[output.STDERR], program_name=None,
:param level: Root log level.
:param outputs: Iterable of outputs to log to.
:param program_name: The name of the program. Auto-detected if not set.
- :param capture_warnings: Capture warnings from the `warnings' module
+ :param capture_warnings: Capture warnings from the `warnings' module.
"""
root_logger = logging.getLogger(None)
diff --git a/daiquiri/output.py b/daiquiri/output.py
index a9dc088..ca0e858 100644
--- a/daiquiri/output.py
+++ b/daiquiri/output.py
@@ -9,8 +9,11 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+import datetime
import inspect
import logging
+import logging.handlers
+import numbers
import os
import sys
try:
@@ -41,6 +44,26 @@ class Output(object):
logger.addHandler(self.handler)
+def _get_log_file_path(logfile=None, logdir=None, program_name=None,
+ logfile_suffix=".log"):
+ ret_path = None
+
+ if not logdir:
+ ret_path = logfile
+
+ if not ret_path and logfile and logdir:
+ ret_path = os.path.join(logdir, logfile)
+
+ if not ret_path and logdir:
+ program_name = program_name or get_program_name()
+ ret_path = os.path.join(logdir, program_name) + logfile_suffix
+
+ if not ret_path:
+ raise ValueError("Unable to determine log file destination")
+
+ return ret_path
+
+
class File(Output):
def __init__(self, filename=None, directory=None, suffix=".log",
program_name=None, formatter=formatter.TEXT_FORMATTER,
@@ -56,24 +79,87 @@ class File(Output):
This will be only used if no filename has been provided.
:param program_name: Program name. Autodetected by default.
"""
- logpath = self._get_log_file_path(filename, directory, program_name)
- if not logpath:
- raise ValueError("Unable to determine log file destination")
+ logpath = _get_log_file_path(filename, directory,
+ program_name, suffix)
handler = logging.handlers.WatchedFileHandler(logpath)
super(File, self).__init__(handler, formatter, level)
- @staticmethod
- def _get_log_file_path(logfile=None, logdir=None, program_name=None,
- logfile_suffix=".log"):
- if not logdir:
- return logfile
- if logfile and logdir:
- return os.path.join(logdir, logfile)
+class RotatingFile(Output):
+ def __init__(self, filename=None, directory=None, suffix='.log',
+ program_name=None, formatter=formatter.TEXT_FORMATTER,
+ level=None, max_size_bytes=0, backup_count=0):
+ """Rotating log file output.
+
+ :param filename: The log file path to write to.
+ If directory is also specified, both will be combined.
+ :param directory: The log directory to write to.
+ If no filename is specified, the program name and suffix will be used
+ to contruct the full path relative to the directory.
+ :param suffix: The log file name suffix.
+ This will be only used if no filename has been provided.
+ :param program_name: Program name. Autodetected by default.
+ :param max_size_bytes: allow the file to rollover at a
+ predetermined size.
+ :param backup_count: the maximum number of files to rotate
+ logging output between.
+ """
+ logpath = _get_log_file_path(filename, directory,
+ program_name, suffix)
+ handler = logging.handlers.RotatingFileHandler(
+ logpath, maxBytes=max_size_bytes, backupCount=backup_count)
+ super(RotatingFile, self).__init__(handler, formatter, level)
+
+ def do_rollover(self):
+ """Manually forces a log file rotation."""
+ return self.handler.doRollover()
+
+
+class TimedRotatingFile(Output):
+ def __init__(self, filename=None, directory=None, suffix='.log',
+ program_name=None, formatter=formatter.TEXT_FORMATTER,
+ level=None, interval=datetime.timedelta(hours=24),
+ backup_count=0):
+ """Rotating log file output, triggered by a fixed interval.
+
+ :param filename: The log file path to write to.
+ If directory is also specified, both will be combined.
+ :param directory: The log directory to write to.
+ If no filename is specified, the program name and suffix will be used
+ to contruct the full path relative to the directory.
+ :param suffix: The log file name suffix.
+ This will be only used if no filename has been provided.
+ :param program_name: Program name. Autodetected by default.
+ :param interval: datetime.timedelta instance representing
+ how often a new log file should be created.
+ :param backup_count: the maximum number of files to rotate
+ logging output between.
+ """
+ logpath = _get_log_file_path(filename, directory,
+ program_name, suffix)
+ handler = logging.handlers.TimedRotatingFileHandler(
+ logpath,
+ when='S',
+ interval=self._timedelta_to_seconds(interval),
+ backupCount=backup_count)
+ super(TimedRotatingFile, self).__init__(handler, formatter, level)
+
+ def do_rollover(self):
+ """Manually forces a log file rotation."""
+ return self.handler.doRollover()
+
+ @staticmethod
+ def _timedelta_to_seconds(td):
+ """Convert a datetime.timedelta object into a seconds interval for
+ rotating file ouput.
- if logdir:
- program_name = program_name or get_program_name()
- return os.path.join(logdir, program_name) + logfile_suffix
+ :param td: datetime.timedelta
+ :return: time in seconds
+ :rtype: int
+ """
+ if isinstance(td, numbers.Real):
+ td = datetime.timedelta(seconds=td)
+ return td.total_seconds()
class Stream(Output):
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 31db38e..bdf8ae0 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -34,7 +34,7 @@ Usage
=====
The basic usage of daiquiri is to call the `daiquiri.setup` function that will
-setup logging with the options passed as keyword arguments. If no argument are
+setup logging with the options passed as keyword arguments. If no arguments are
passed, the default will log to `stderr`. If `stderr` is a terminal, the output
will use colors.
@@ -42,7 +42,7 @@ will use colors.
You can specify different outputs with different formatters. The
`daiquiri.output` module provides a collection of `Output` classes that you can
-use to your liking to configut the logging output. Any number of output can bex
+use to your liking to configure the logging output. Any number of output can be
configured.
.. literalinclude:: ../../examples/output.py
@@ -51,8 +51,8 @@ configured.
Picking format
--------------
-You can configure the format of any output by passing a formatter to as the
-`formatter` argument to the contructor. Two default formatter are available:
+You can configure the format of any output by passing a formatter as the
+`formatter` argument to the contructor. Two default formatters are available:
`daiquiri.formatter.TEXT_FORMATTER` which prints log messages as text, and the
`daiquiri.formatter.JSON_FORMATTER` which prints log messages as parsable JSON
(requires `python-json-logger`).
@@ -64,7 +64,7 @@ You can provide any class of type `logging.Formatter` as a formatter.
Python warning support
----------------------
-The Python `warnings` module is sometimes used by applications and library to
+The Python `warnings` module is sometimes used by applications and libraries to
emit warnings. By default, they are printed on `stderr`. Daiquiri overrides
this by default and log warnings to the `py.warnings` logger.
@@ -92,7 +92,7 @@ Systemd journal support
-----------------------
The `daiquiri.output.Journal` output provides systemd journal support. All the
-extra argument passed to the logger will be shipped as extra keys to the
+extra arguments passed to the logger will be shipped as extra keys to the
journal.
@@ -100,3 +100,11 @@ File support
------------
The `daiquiri.output.File` output class provides support to log into a file.
+
+`daiquiri.output.RotatingFile` class logs to a file that rotates when a
+maximum file size has been reached.
+
+`daiquiri.output.TimedRotatingFile` will rotate the log file on a fixed
+interval.
+
+.. literalinclude:: ../../examples/files.py
\ No newline at end of file
diff --git a/examples/files.py b/examples/files.py
new file mode 100644
index 0000000..96903d5
--- /dev/null
+++ b/examples/files.py
@@ -0,0 +1,19 @@
+import daiquiri
+import datetime
+import logging
+
+daiquiri.setup(
+ level=logging.DEBUG,
+ outputs=(
+ daiquiri.output.File('errors.log', level=logging.ERROR),
+ daiquiri.output.TimedRotatingFile(
+ 'everything.log',
+ level=logging.DEBUG,
+ interval=datetime.timedelta(hours=1))
+ )
+)
+
+logger = daiquiri.getLogger(__name__)
+
+logger.info('only to rotating file logger')
+logger.error('both log files, including errors only')
diff --git a/tox.ini b/tox.ini
index f3ac5a9..6e88a61 100644
--- a/tox.ini
+++ b/tox.ini
@@ -7,6 +7,7 @@ deps = -e.[test,json]
commands =
python setup.py test --slowest --testr-args='{posargs}'
sh -c "for example in examples/*.py; do python $example; done"
+ sh -c "rm errors.log everything.log"
[testenv:pep8]
deps =
| Rotating logs?
Can this module configure a rotating file log?
logzero does it: http://logzero.readthedocs.io/en/latest/index.html?highlight=rotating#i-logzero-logfile | jd/daiquiri | diff --git a/daiquiri/tests/test_output.py b/daiquiri/tests/test_output.py
index 82773b8..dc7fa38 100644
--- a/daiquiri/tests/test_output.py
+++ b/daiquiri/tests/test_output.py
@@ -9,6 +9,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+from datetime import timedelta
import syslog
import testtools
@@ -31,18 +32,42 @@ class TestOutput(testtools.TestCase):
def test_get_log_file_path(self):
self.assertEqual("foobar.log",
- output.File._get_log_file_path("foobar.log"))
+ output._get_log_file_path("foobar.log"))
self.assertEqual("/var/log/foo/foobar.log",
- output.File._get_log_file_path("foobar.log",
- logdir="/var/log/foo"))
+ output._get_log_file_path("foobar.log",
+ logdir="/var/log/foo"))
self.assertEqual("/var/log/foobar.log",
- output.File._get_log_file_path(logdir="/var/log",
- program_name="foobar"))
+ output._get_log_file_path(logdir="/var/log",
+ program_name="foobar"))
self.assertEqual("/var/log/foobar.log",
- output.File._get_log_file_path(logdir="/var/log",
- program_name="foobar"))
+ output._get_log_file_path(logdir="/var/log",
+ program_name="foobar"))
self.assertEqual("/var/log/foobar.journal",
- output.File._get_log_file_path(
+ output._get_log_file_path(
logdir="/var/log",
logfile_suffix=".journal",
program_name="foobar"))
+
+ def test_timedelta_seconds(self):
+ fn = output.TimedRotatingFile._timedelta_to_seconds
+ hour = 60 * 60 # seconds * minutes
+
+ one_hour = [
+ timedelta(hours=1),
+ timedelta(minutes=60),
+ timedelta(seconds=hour),
+ hour,
+ float(hour)
+ ]
+ for t in one_hour:
+ self.assertEqual(hour, fn(t))
+
+ error_cases = [
+ 'string',
+ ['some', 'list'],
+ ('some', 'tuple',),
+ ('tuple',),
+ {'dict': 'mapping'}
+ ]
+ for t in error_cases:
+ self.assertRaises(AttributeError, fn, t)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_added_files",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 4
} | 1.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test,json]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"testrepository",
"testtools",
"six"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
-e git+https://github.com/jd/daiquiri.git@dd83419910bd6e6e156332fe63b8eda599721ba8#egg=daiquiri
extras==1.0.0
fixtures==4.0.1
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
iso8601==1.1.0
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pbr==6.1.1
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
python-json-logger==2.0.7
python-subunit==1.4.2
six==1.17.0
testrepository==0.0.21
testtools==2.6.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: daiquiri
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- extras==1.0.0
- fixtures==4.0.1
- iso8601==1.1.0
- pbr==6.1.1
- python-json-logger==2.0.7
- python-subunit==1.4.2
- six==1.17.0
- testrepository==0.0.21
- testtools==2.6.0
prefix: /opt/conda/envs/daiquiri
| [
"daiquiri/tests/test_output.py::TestOutput::test_get_log_file_path",
"daiquiri/tests/test_output.py::TestOutput::test_timedelta_seconds"
]
| []
| [
"daiquiri/tests/test_output.py::TestOutput::test_find_facility"
]
| []
| Apache License 2.0 | 1,438 | [
"daiquiri/__init__.py",
"daiquiri/output.py",
"examples/files.py",
"tox.ini",
"doc/source/index.rst"
]
| [
"daiquiri/__init__.py",
"daiquiri/output.py",
"examples/files.py",
"tox.ini",
"doc/source/index.rst"
]
|
|
vertexproject__synapse-331 | 178f474d2cb47ab261cb3cdb8249f0f353e8e1c9 | 2017-07-06 23:03:05 | 6f5fc661a88b8cc3f4befb2c9c7ddcebf0b89ba0 | diff --git a/synapse/lib/storm.py b/synapse/lib/storm.py
index 376aee35f..8f49cdd36 100644
--- a/synapse/lib/storm.py
+++ b/synapse/lib/storm.py
@@ -1154,20 +1154,7 @@ class Runtime(Configable):
raise s_common.BadSyntaxError(name=prop, mesg=mesg)
continue # pragma: no cover
- if prop.startswith(forms):
- valid = False
- for form in forms:
- if prop.startswith(form + ':') and core.isSetPropOk(prop):
- _prop = prop[len(form) + 1:]
- formprops[form][_prop] = valu
- valid = True
- break
- if not valid:
- mesg = 'Full prop is not valid on any lifted forms.'
- raise s_common.BadSyntaxError(name=prop, mesg=mesg)
- continue # pragma: no cover
-
- mesg = 'setprop operator requires props to start with relative or full prop names.'
+ mesg = 'setprop operator requires props to start with relative prop names.'
raise s_common.BadSyntaxError(name=prop, mesg=mesg)
for form, nodes in formnodes.items():
| setprop() should take both full props and rel props
```
setprop(foo:bar:baz=10)
- or -
setprop(:baz=10)
```
rather than
```
setprop(baz=10)
``` | vertexproject/synapse | diff --git a/synapse/tests/test_lib_storm.py b/synapse/tests/test_lib_storm.py
index 1cede3be2..e23984862 100644
--- a/synapse/tests/test_lib_storm.py
+++ b/synapse/tests/test_lib_storm.py
@@ -52,54 +52,23 @@ class StormTest(SynTest):
self.eq(node[1].get('inet:netuser'), 'vertex.link/pennywise')
self.eq(node[1].get('inet:netuser:realname'), 'robert gray')
- # Full prop val syntax
- node = core.eval('inet:netuser=vertex.link/pennywise setprop(inet:netuser:signup="1970-01-01")')[0]
- self.eq(node[1].get('inet:netuser'), 'vertex.link/pennywise')
- self.eq(node[1].get('inet:netuser:signup'), 0)
-
- # Combined syntax using both relative props and full props together
- cmd = 'inet:netuser=vertex.link/pennywise setprop(:seen:min="2000", :seen:max="2017", ' \
- 'inet:netuser:[email protected], inet:netuser:signup:ipv4="127.0.0.1")'
+ # Can set multiple props at once
+ cmd = 'inet:netuser=vertex.link/pennywise setprop(:seen:min="2000", :seen:max="2017")'
node = core.eval(cmd)[0]
self.nn(node[1].get('inet:netuser:seen:min'))
self.nn(node[1].get('inet:netuser:seen:max'))
- self.nn(node[1].get('inet:netuser:signup:ipv4'))
- self.eq(node[1].get('inet:netuser:email'), '[email protected]')
# old / bad syntax fails
# kwlist key/val syntax is no longer valid in setprop()
node = core.formTufoByProp('inet:fqdn', 'vertex.link')
bad_cmd = 'inet:fqdn=vertex.link setprop(created="2016-05-05",updated="2017/05/05")'
self.raises(BadSyntaxError, core.eval, bad_cmd)
- # a full prop which isn't valid for the node is bad
- bad_cmd = 'inet:fqdn=vertex.link setprop(inet:fqdn:typocreated="2016-05-05")'
- self.raises(BadSyntaxError, core.eval, bad_cmd)
# a rel prop which isn't valid for the node is bad
bad_cmd = 'inet:fqdn=vertex.link setprop(:typocreated="2016-05-05")'
self.raises(BadSyntaxError, core.eval, bad_cmd)
-
- # test possible form confusion
- modl = {
- 'types': (
- ('foo:bar', {'subof': 'str'}),
- ('foo:barbaz', {'subof': 'str'})
- ),
- 'forms': (
- ('foo:bar', {'ptype': 'str'}, [
- ('blah', {'ptype': 'str'})
- ]),
- ('foo:barbaz', {'ptype': 'str'}, [
- ('blah', {'ptype': 'str'})
- ]),
- )
- }
- core.addDataModel('form_confusion', modl)
- node = core.formTufoByProp('foo:bar', 'hehe')
- core.addTufoTag(node, 'confusion')
- node = core.formTufoByProp('foo:barbaz', 'haha')
- core.addTufoTag(node, 'confusion')
- node = core.eval('''#confusion setprop(foo:barbaz:blah=duck) +foo:barbaz''')[0]
- self.eq(node[1].get('foo:barbaz:blah'), 'duck')
+ # full prop syntax is not acceptable
+ bad_cmd = 'inet:netuser=vertex.link/pennywise setprop(inet:netuser:signup="1970-01-01")'
+ self.raises(BadSyntaxError, core.eval, bad_cmd)
def test_storm_filt_regex(self):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 1
} | 0.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"nose",
"coverage",
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
cffi==1.15.1
coverage==6.2
cryptography==40.0.2
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
lmdb==1.6.2
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
msgpack-python==0.5.6
nose==1.3.7
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pycparser==2.21
pyOpenSSL==23.2.0
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
-e git+https://github.com/vertexproject/synapse.git@178f474d2cb47ab261cb3cdb8249f0f353e8e1c9#egg=synapse
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tornado==6.1
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
xxhash==3.2.0
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: synapse
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- cffi==1.15.1
- coverage==6.2
- cryptography==40.0.2
- lmdb==1.6.2
- msgpack-python==0.5.6
- nose==1.3.7
- pycparser==2.21
- pyopenssl==23.2.0
- tornado==6.1
- xxhash==3.2.0
prefix: /opt/conda/envs/synapse
| [
"synapse/tests/test_lib_storm.py::StormTest::test_storm_setprop"
]
| []
| [
"synapse/tests/test_lib_storm.py::StormTest::test_storm_addnode",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_addtag",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_alltag",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_cmpr_norm",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_delnode",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_delnode_caching",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_deltag",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_edit_end",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_editmode",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_filt_regex",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_lift",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_lifts_by",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_pivot",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_refs",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_show_help",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_tag_fromtag",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_tag_glob",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_tag_ival",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_tag_jointag",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_tag_query",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_tag_totag",
"synapse/tests/test_lib_storm.py::LimitTest::test_limit_behavior",
"synapse/tests/test_lib_storm.py::LimitTest::test_limit_behavior_negatives",
"synapse/tests/test_lib_storm.py::LimitTest::test_limit_default"
]
| []
| Apache License 2.0 | 1,439 | [
"synapse/lib/storm.py"
]
| [
"synapse/lib/storm.py"
]
|
|
networkx__networkx-2497 | 26e6efcb88e132ff76130531ca7e88583f4af9e2 | 2017-07-07 02:08:51 | 3f4fd85765bf2d88188cfd4c84d0707152e6cd1e | dschult: It seems to work in Python3.6 I get the error only for python2.7 though I haven't tested in all the versions. I'm assuming it is a python2 vs python3 issue.
In the test you are processing all strings through ```literal_stringizer``` instead of letting str and unicode be unprocessed. I guess this fixes the int node name ```0``` to ```"0"``` but now the test fails on the attribute 'demo' having a unicode value because ```literal_stringizer``` in python2.7 doesn't convert the unicode symbol to HTML correctly. In the old tests, strings were not processed by ```literal_stringizer```. So this wasn't a problem.
We might need a separate section just for labels. Instead of calling literal_stringizer on all strings AND on labels, you would just call it on the labels. I think that will make this test work but is kind of a hack because its not clear how to handle labels that are unicode strings. Perhaps just send them through without processing and apply ```literal_stringizer``` to the numeric types. You could hardcode the stringizer to use or make literal_stringizer the default.
Perhaps we should add a test where a node is a unicode string.
Does this make any sense? | diff --git a/CONTRIBUTE.rst b/CONTRIBUTE.rst
new file mode 100644
index 000000000..b8ef591d0
--- /dev/null
+++ b/CONTRIBUTE.rst
@@ -0,0 +1,161 @@
+Development process
+-------------------
+
+1. If you are a first-time contributor:
+
+ * Go to `https://github.com/networkx/networkx
+ <https://github.com/networkx/networkx>`_ and click the
+ "fork" button to create your own copy of the project.
+
+ * Clone the project to your local computer::
+
+ git clone [email protected]:your-username/networkx.git
+
+ * Add the upstream repository::
+
+ git remote add upstream [email protected]:networkx/networkx.git
+
+ * Now, you have remote repositories named:
+
+ - ``upstream``, which refers to the ``networkx`` repository
+ - ``origin``, which refers to your personal fork
+
+2. Develop your contribution:
+
+ * Pull the latest changes from upstream::
+
+ git checkout master
+ git pull upstream master
+
+ * Create a branch for the feature you want to work on. Since the
+ branch name will appear in the merge message, use a sensible name
+ such as 'bugfix-for-issue-1480'::
+
+ git checkout -b bugfix-for-issue-1480
+
+ * Commit locally as you progress (``git add`` and ``git commit``)
+
+3. To submit your contribution:
+
+ * Push your changes back to your fork on GitHub::
+
+ git push origin bugfix-for-issue-1480
+
+ * Go to GitHub. The new branch will show up with a green Pull Request
+ button---click it.
+
+ * If you want, post on the `mailing list
+ <http://groups.google.com/group/networkx-discuss>`_ to explain your changes or
+ to ask for review.
+
+For a more detailed discussion, read these :doc:`detailed documents
+<gitwash/index>` on how to use Git with ``networkx``
+(`<https://networkx.readthedocs.io/en/stable/developer/gitwash/index.html>`_).
+
+4. Review process:
+
+ * Reviewers (the other developers and interested community members) will
+ write inline and/or general comments on your Pull Request (PR) to help
+ you improve its implementation, documentation, and style. Every single
+ developer working on the project has their code reviewed, and we've come
+ to see it as friendly conversation from which we all learn and the
+ overall code quality benefits. Therefore, please don't let the review
+ discourage you from contributing: its only aim is to improve the quality
+ of project, not to criticize (we are, after all, very grateful for the
+ time you're donating!).
+
+ * To update your pull request, make your changes on your local repository
+ and commit. As soon as those changes are pushed up (to the same branch as
+ before) the pull request will update automatically.
+
+ * `Travis-CI <http://travis-ci.org/>`__, a continuous integration service,
+ is triggered after each Pull Request update to build the code and run unit
+ tests of your branch. The Travis tests must pass before your PR can be merged.
+ If Travis fails, you can find out why by clicking on the "failed" icon (red
+ cross) and inspecting the build and test log.
+
+ * `AppVeyor <http://ci.appveyor.com>`__, is another continuous integration
+ service, which we use. You will also need to make sure that the AppVeyor
+ tests pass.
+
+.. note::
+
+ If closing a bug, also add "Fixes #1480" where 1480 is the issue number.
+
+Divergence between ``upstream master`` and your feature branch
+--------------------------------------------------------------
+
+Never merge the main branch into yours. If GitHub indicates that the
+branch of your Pull Request can no longer be merged automatically, rebase
+onto master::
+
+ git checkout master
+ git pull upstream master
+ git checkout bugfix-for-issue-1480
+ git rebase master
+
+If any conflicts occur, fix the according files and continue::
+
+ git add conflict-file1 conflict-file2
+ git rebase --continue
+
+However, you should only rebase your own branches and must generally not
+rebase any branch which you collaborate on with someone else.
+
+Finally, you must push your rebased branch::
+
+ git push --force origin bugfix-for-issue-1480
+
+(If you are curious, here's a further discussion on the
+`dangers of rebasing <http://tinyurl.com/lll385>`__.
+Also see this `LWN article <http://tinyurl.com/nqcbkj>`__.)
+
+Guidelines
+----------
+
+* All code should have tests.
+* All code should be documented, to the same
+ `standard <http://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt#docstring-standard>`__
+ as NumPy and SciPy.
+* No changes are committed without review. Ask on the
+ `mailing list <http://groups.google.com/group/networkx-discuss>`_ if
+ you get no response to your pull request.
+
+Stylistic Guidelines
+--------------------
+
+* Set up your editor to remove trailing whitespace. Follow `PEP08
+ <www.python.org/dev/peps/pep-0008/>`__. Check code with pyflakes / flake8.
+
+* Use the following import conventions::
+
+ import numpy as np
+ import scipy as sp
+ import matplotlib as mpl
+ import matplotlib.pyplot as plt
+ import networkx as nx
+
+ cimport numpy as cnp # in Cython code
+
+Pull request codes
+------------------
+
+When you submit a pull request to github, github will ask you for a summary. If
+your code is not ready to merge, but you want to get feedback, please consider
+using ``WIP: experimental optimization`` or similar for the title of your pull
+request. That way we will all know that it's not yet ready to merge and that
+you may be interested in more fundamental comments about design.
+
+When you think the pull request is ready to merge, change the title (using the
+*Edit* button) to remove the ``WIP:``.
+
+Developer Notes
+---------------
+
+For additional information about contributing to NetworkX, please see
+the `Developer Notes <https://github.com/networkx/networkx/wiki>`_.
+
+Bugs
+----
+
+Please `report bugs on GitHub <https://github.com/networkx/networkx/issues>`_.
diff --git a/INSTALL.txt b/INSTALL.txt
index 0df6ae25f..bbfc5c632 100644
--- a/INSTALL.txt
+++ b/INSTALL.txt
@@ -1,3 +1,3 @@
See doc/source/install.rst
or
-http://networkx.github.io/documentation/latest/install.html
+http://networkx.readthedocs.io/en/stable/install.html
diff --git a/appveyor.yml b/appveyor.yml
index f53602bd5..cc5ffc826 100644
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -11,11 +11,11 @@ environment:
matrix:
- PYTHON: "C:\\Python27"
- PYTHON_VERSION: "2.7.9"
+ PYTHON_VERSION: "2.7.13"
PYTHON_ARCH: "32"
- PYTHON: "C:\\Python27-x64"
- PYTHON_VERSION: "2.7.9"
+ PYTHON_VERSION: "2.7.13"
PYTHON_ARCH: "64"
- PYTHON: "C:\\Python33"
@@ -27,19 +27,27 @@ environment:
PYTHON_ARCH: "64"
- PYTHON: "C:\\Python34"
- PYTHON_VERSION: "3.4.3"
+ PYTHON_VERSION: "3.4.4"
PYTHON_ARCH: "32"
- PYTHON: "C:\\Python34-x64"
- PYTHON_VERSION: "3.4.3"
+ PYTHON_VERSION: "3.4.4"
PYTHON_ARCH: "64"
- PYTHON: "C:\\Python35"
- PYTHON_VERSION: "3.5.0"
+ PYTHON_VERSION: "3.5.3"
PYTHON_ARCH: "32"
- PYTHON: "C:\\Python35-x64"
- PYTHON_VERSION: "3.5.0"
+ PYTHON_VERSION: "3.5.3"
+ PYTHON_ARCH: "64"
+
+ - PYTHON: "C:\\Python36"
+ PYTHON_VERSION: "3.6.1"
+ PYTHON_ARCH: "32"
+
+ - PYTHON: "C:\\Python36-x64"
+ PYTHON_VERSION: "3.6.1"
PYTHON_ARCH: "64"
install:
diff --git a/doc/source/developer/contribute.rst b/doc/source/developer/contribute.rst
new file mode 100644
index 000000000..eccd2e4cc
--- /dev/null
+++ b/doc/source/developer/contribute.rst
@@ -0,0 +1,1 @@
+.. include:: ../../../CONTRIBUTE.rst
diff --git a/doc/source/developer/index.rst b/doc/source/developer/index.rst
index 515d32bcb..040b5a1c5 100644
--- a/doc/source/developer/index.rst
+++ b/doc/source/developer/index.rst
@@ -6,4 +6,5 @@ Developer Guide
.. toctree::
:maxdepth: 2
+ contribute
gitwash/index
diff --git a/doc/source/reference/generators.rst b/doc/source/reference/generators.rst
index 2498ce10d..41e93ebce 100644
--- a/doc/source/reference/generators.rst
+++ b/doc/source/reference/generators.rst
@@ -31,9 +31,6 @@ Classic
cycle_graph
dorogovtsev_goltsev_mendes_graph
empty_graph
- grid_2d_graph
- grid_graph
- hypercube_graph
ladder_graph
lollipop_graph
null_graph
@@ -54,6 +51,19 @@ Expanders
chordal_cycle_graph
+Lattice
+-------
+.. automodule:: networkx.generators.lattice
+.. autosummary::
+ :toctree:generated/
+
+ grid_2d_graph
+ grid_graph
+ hexagonal_lattice_graph
+ hypercube_graph
+ triangular_lattice_graph
+
+
Small
-----
.. automodule:: networkx.generators.small
diff --git a/networkx/algorithms/dag.py b/networkx/algorithms/dag.py
index 00d5a30cc..381090391 100644
--- a/networkx/algorithms/dag.py
+++ b/networkx/algorithms/dag.py
@@ -1,5 +1,10 @@
# -*- coding: utf-8 -*-
-"""Algorithms for directed acyclic graphs (DAGs)."""
+"""Algorithms for directed acyclic graphs (DAGs).
+
+Note that most of these functions are only guaranteed to work for DAGs.
+In general, these functions do not check for acyclic-ness, so it is up
+to the user to check for that.
+"""
# Copyright (C) 2006-2016 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
@@ -31,17 +36,18 @@ __all__ = ['descendants',
def descendants(G, source):
- """Return all nodes reachable from `source` in G.
+ """Return all nodes reachable from `source` in `G`.
Parameters
----------
G : NetworkX DiGraph
- source : node in G
+ A directed acyclic graph (DAG)
+ source : node in `G`
Returns
-------
- des : set()
- The descendants of source in G
+ set()
+ The descendants of `source` in `G`
"""
if not G.has_node(source):
raise nx.NetworkXError("The node %s is not in the graph." % source)
@@ -50,16 +56,17 @@ def descendants(G, source):
def ancestors(G, source):
- """Return all nodes having a path to `source` in G.
+ """Return all nodes having a path to `source` in `G`.
Parameters
----------
G : NetworkX DiGraph
- source : node in G
+ A directed acyclic graph (DAG)
+ source : node in `G`
Returns
-------
- ancestors : set()
+ set()
The ancestors of source in G
"""
if not G.has_node(source):
@@ -69,18 +76,17 @@ def ancestors(G, source):
def is_directed_acyclic_graph(G):
- """Return True if the graph G is a directed acyclic graph (DAG) or
+ """Return True if the graph `G` is a directed acyclic graph (DAG) or
False if not.
Parameters
----------
G : NetworkX graph
- A graph
Returns
-------
- is_dag : bool
- True if G is a DAG, false otherwise
+ bool
+ True if `G` is a DAG, False otherwise
"""
if not G.is_directed():
return False
@@ -101,39 +107,39 @@ def topological_sort(G):
Parameters
----------
G : NetworkX digraph
- A directed graph
+ A directed acyclic graph (DAG)
Returns
-------
- topologically_sorted_nodes : iterable
+ iterable
An iterable of node names in topological sorted order.
Raises
------
NetworkXError
- Topological sort is defined for directed graphs only. If the graph G
+ Topological sort is defined for directed graphs only. If the graph `G`
is undirected, a :exc:`NetworkXError` is raised.
NetworkXUnfeasible
- If G is not a directed acyclic graph (DAG) no topological sort exists
- and a NetworkXUnfeasible exception is raised. This can also be
- raised if G is changed while the returned iterator is being processed.
+ If `G` is not a directed acyclic graph (DAG) no topological sort exists
+ and a :exc:`NetworkXUnfeasible` exception is raised. This can also be
+ raised if `G` is changed while the returned iterator is being processed.
RuntimeError
- If G is changed while the returned iterator is being processed.
+ If `G` is changed while the returned iterator is being processed.
Examples
--------
- To get the reverse order of the topological sort::
+ To get the reverse order of the topological sort:
- >>> DG = nx.DiGraph([(1, 2), (2, 3)])
- >>> list(reversed(list(nx.topological_sort(DG))))
- [3, 2, 1]
+ >>> DG = nx.DiGraph([(1, 2), (2, 3)])
+ >>> list(reversed(list(nx.topological_sort(DG))))
+ [3, 2, 1]
Notes
-----
This algorithm is based on a description and proof in
- Introduction to algorithms - a creative approach [1]_ .
+ "Introduction to Algorithms: A Creative Approach" [1]_ .
See also
--------
@@ -141,8 +147,8 @@ def topological_sort(G):
References
----------
- .. [1] Manber, U. (1989). Introduction to algorithms - a creative approach. Addison-Wesley.
- http://www.amazon.com/Introduction-Algorithms-A-Creative-Approach/dp/0201120372
+ .. [1] Manber, U. (1989). "Introduction to Algorithms: A Creative Approach."
+ Addison-Wesley.
"""
if not G.is_directed():
raise nx.NetworkXError(
@@ -183,7 +189,7 @@ def lexicographical_topological_sort(G, key=None):
Parameters
----------
G : NetworkX digraph
- A directed graph
+ A directed acyclic graph (DAG)
key : function, optional
This function maps nodes to keys with which to resolve ambiguities in
@@ -191,27 +197,27 @@ def lexicographical_topological_sort(G, key=None):
Returns
-------
- lexicographically_topologically_sorted_nodes : iterable
+ iterable
An iterable of node names in lexicographical topological sort order.
Raises
------
NetworkXError
- Topological sort is defined for directed graphs only. If the graph G
+ Topological sort is defined for directed graphs only. If the graph `G`
is undirected, a :exc:`NetworkXError` is raised.
NetworkXUnfeasible
- If G is not a directed acyclic graph (DAG) no topological sort exists
- and a NetworkXUnfeasible exception is raised. This can also be
- raised if G is changed while the returned iterator is being processed.
+ If `G` is not a directed acyclic graph (DAG) no topological sort exists
+ and a :exc:`NetworkXUnfeasible` exception is raised. This can also be
+ raised if `G` is changed while the returned iterator is being processed.
RuntimeError
- If G is changed while the returned iterator is being processed.
+ If `G` is changed while the returned iterator is being processed.
Notes
-----
This algorithm is based on a description and proof in
- Introduction to algorithms - a creative approach [1]_ .
+ "Introduction to Algorithms: A Creative Approach" [1]_ .
See also
--------
@@ -219,8 +225,8 @@ def lexicographical_topological_sort(G, key=None):
References
----------
- .. [1] Manber, U. (1989). Introduction to algorithms - a creative approach. Addison-Wesley.
- http://www.amazon.com/Introduction-Algorithms-A-Creative-Approach/dp/0201120372
+ .. [1] Manber, U. (1989). "Introduction to Algorithms: A Creative Approach."
+ Addison-Wesley.
"""
if not G.is_directed():
raise nx.NetworkXError(
@@ -259,7 +265,7 @@ def lexicographical_topological_sort(G, key=None):
def is_aperiodic(G):
- """Return True if G is aperiodic.
+ """Return True if `G` is aperiodic.
A directed graph is aperiodic if there is no integer k > 1 that
divides the length of every cycle in the graph.
@@ -267,28 +273,28 @@ def is_aperiodic(G):
Parameters
----------
G : NetworkX DiGraph
- Graph
+ A directed graph
Returns
-------
- aperiodic : boolean
+ bool
True if the graph is aperiodic False otherwise
Raises
------
NetworkXError
- If G is not directed
+ If `G` is not directed
Notes
-----
This uses the method outlined in [1]_, which runs in O(m) time
- given m edges in G. Note that a graph is not aperiodic if it is
+ given m edges in `G`. Note that a graph is not aperiodic if it is
acyclic as every integer trivial divides length 0 cycles.
References
----------
.. [1] Jarvis, J. P.; Shier, D. R. (1996),
- Graph-theoretic analysis of finite Markov chains,
+ "Graph-theoretic analysis of finite Markov chains,"
in Shier, D. R.; Wallenius, K. T., Applied Mathematical Modeling:
A Multidisciplinary Approach, CRC Press.
"""
@@ -329,17 +335,17 @@ def transitive_closure(G):
Parameters
----------
G : NetworkX DiGraph
- Graph
+ A directed graph
Returns
-------
- TC : NetworkX DiGraph
- Graph
+ NetworkX DiGraph
+ The transitive closure of `G`
Raises
------
NetworkXNotImplemented
- If G is not directed
+ If `G` is not directed
References
----------
@@ -366,40 +372,40 @@ def transitive_reduction(G):
Parameters
----------
G : NetworkX DiGraph
- Graph
+ A directed acyclic graph (DAG)
Returns
-------
- TR : NetworkX DiGraph
- Graph
+ NetworkX DiGraph
+ The transitive reduction of `G`
Raises
------
NetworkXError
- If G is not a directed acyclic graph (DAG) transitive reduction is
- not uniquely defined and a NetworkXError exception is raised.
+ If `G` is not a directed acyclic graph (DAG) transitive reduction is
+ not uniquely defined and a :exc:`NetworkXError` exception is raised.
- References
+ References
----------
https://en.wikipedia.org/wiki/Transitive_reduction
- """
+ """
if not is_directed_acyclic_graph(G):
raise nx.NetworkXError(
- "Transitive reduction only uniquely defined on directed acyclic graphs.")
+ "Transitive reduction only uniquely defined on directed acyclic graphs.")
TR = nx.DiGraph()
TR.add_nodes_from(G.nodes())
for u in G:
u_edges = set(G[u])
for v in G[u]:
u_edges -= {y for x, y in nx.dfs_edges(G, v)}
- TR.add_edges_from((u,v) for v in u_edges)
+ TR.add_edges_from((u, v) for v in u_edges)
return TR
@not_implemented_for('undirected')
def antichains(G):
- """Generates antichains from a DAG.
+ """Generates antichains from a directed acyclic graph (DAG).
An antichain is a subset of a partially ordered set such that any
two elements in the subset are incomparable.
@@ -407,19 +413,19 @@ def antichains(G):
Parameters
----------
G : NetworkX DiGraph
- Graph
+ A directed acyclic graph (DAG)
Returns
-------
- antichain : generator object
+ generator object
Raises
------
NetworkXNotImplemented
- If G is not directed
+ If `G` is not directed
NetworkXUnfeasible
- If G contains a cycle
+ If `G` contains a cycle
Notes
-----
@@ -452,38 +458,39 @@ def antichains(G):
@not_implemented_for('undirected')
def dag_longest_path(G, weight='weight', default_weight=1):
- """Returns the longest path in a DAG
- If G has edges with 'weight' attribute the edge data are used as weight values.
+ """Returns the longest path in a directed acyclic graph (DAG).
+
+ If `G` has edges with `weight` attribute the edge data are used as weight values.
Parameters
----------
G : NetworkX DiGraph
- Graph
+ A directed acyclic graph (DAG)
- weight : string (default 'weight')
+ weight : str, optional
Edge data key to use for weight
- default_weight : integer (default 1)
+ default_weight : int, optional
The weight of edges that do not have a weight attribute
Returns
-------
- path : list
+ list
Longest path
Raises
------
NetworkXNotImplemented
- If G is not directed
+ If `G` is not directed
See also
--------
dag_longest_path_length
"""
- dist = {} # stores {v : (length, u)}
+ dist = {} # stores {v : (length, u)}
for v in nx.topological_sort(G):
us = [(dist[u][0] + data.get(weight, default_weight), u)
- for u, data in G.pred[v].items()]
+ for u, data in G.pred[v].items()]
# Use the best predecessor if there is one and its distance is non-negative, otherwise terminate.
maxu = max(us, key=lambda x: x[0]) if us else (0, v)
dist[v] = maxu if maxu[0] >= 0 else (0, v)
@@ -505,23 +512,23 @@ def dag_longest_path_length(G, weight='weight', default_weight=1):
Parameters
----------
G : NetworkX DiGraph
- Graph
+ A directed acyclic graph (DAG)
- weight : string (default 'weight')
+ weight : string, optional
Edge data key to use for weight
- default_weight : integer (default 1)
+ default_weight : int, optional
The weight of edges that do not have a weight attribute
Returns
-------
- path_length : int
+ int
Longest path length
Raises
------
NetworkXNotImplemented
- If G is not directed
+ If `G` is not directed
See also
--------
diff --git a/networkx/generators/__init__.py b/networkx/generators/__init__.py
index f253d9c72..41e1bea04 100644
--- a/networkx/generators/__init__.py
+++ b/networkx/generators/__init__.py
@@ -13,6 +13,7 @@ from networkx.generators.expanders import *
from networkx.generators.geometric import *
from networkx.generators.intersection import *
from networkx.generators.joint_degree_seq import *
+from networkx.generators.lattice import *
from networkx.generators.line import *
from networkx.generators.nonisomorphic_trees import *
from networkx.generators.random_clustered import *
diff --git a/networkx/generators/classic.py b/networkx/generators/classic.py
index fbdfd4995..8fe146f8e 100644
--- a/networkx/generators/classic.py
+++ b/networkx/generators/classic.py
@@ -1,29 +1,31 @@
-"""
-Generators for some classic graphs.
+# Copyright (C) 2004-2017 by
+# Aric Hagberg <[email protected]>
+# Dan Schult <[email protected]>
+# Pieter Swart <[email protected]>
+# All rights reserved.
+# BSD license.
+#
+# Authors: Aric Hagberg ([email protected])
+# Pieter Swart ([email protected])
+"""Generators for some classic graphs.
The typical graph generator is called as follows:
->>> G=nx.complete_graph(100)
+>>> G = nx.complete_graph(100)
returning the complete graph on n nodes labeled 0, .., 99
as a simple graph. Except for empty_graph, all the generators
in this module return a Graph class (i.e. a simple, undirected graph).
"""
-# Authors: Aric Hagberg ([email protected]) and Pieter Swart ([email protected])
-
-# Copyright (C) 2004-2016 by
-# Aric Hagberg <[email protected]>
-# Dan Schult <[email protected]>
-# Pieter Swart <[email protected]>
-# All rights reserved.
-# BSD license.
from __future__ import division
import itertools
import networkx as nx
from networkx.algorithms.bipartite.generators import complete_bipartite_graph
+from networkx.classes import Graph
+from networkx.exception import NetworkXError
from networkx.utils import accumulate
from networkx.utils import flatten
from networkx.utils import nodes_or_number
@@ -39,9 +41,6 @@ __all__ = ['balanced_tree',
'dorogovtsev_goltsev_mendes_graph',
'empty_graph',
'full_rary_tree',
- 'grid_graph',
- 'grid_2d_graph',
- 'hypercube_graph',
'ladder_graph',
'lollipop_graph',
'null_graph',
@@ -52,9 +51,9 @@ __all__ = ['balanced_tree',
'wheel_graph']
-#-------------------------------------------------------------------
+# -------------------------------------------------------------------
# Some Classic Graphs
-#-------------------------------------------------------------------
+# -------------------------------------------------------------------
def _tree_edges(n, r):
# helper function for trees
@@ -101,7 +100,7 @@ def full_rary_tree(r, n, create_using=None):
.. [1] An introduction to data structures and algorithms,
James Andrew Storer, Birkhauser Boston 2001, (page 225).
"""
- G = nx.empty_graph(n, create_using)
+ G = empty_graph(n, create_using)
G.add_edges_from(_tree_edges(n, r))
return G
@@ -165,8 +164,8 @@ def barbell_graph(m1, m2, create_using=None):
`m1, ..., m1+m2-1` for the path,
and `m1+m2, ..., 2*m1+m2-1` for the right barbell.
- The 3 subgraphs are joined via the edges `(m1-1, m1)` and
- `(m1+m2-1, m1+m2)`. If `m2=0`, this is merely two complete
+ The 3 subgraphs are joined via the edges `(m1-1, m1)` and
+ `(m1+m2-1, m1+m2)`. If `m2=0`, this is merely two complete
graphs joined together.
This graph is an extremal example in David Aldous
@@ -174,12 +173,12 @@ def barbell_graph(m1, m2, create_using=None):
"""
if create_using is not None and create_using.is_directed():
- raise nx.NetworkXError("Directed Graph not supported")
+ raise NetworkXError("Directed Graph not supported")
if m1 < 2:
- raise nx.NetworkXError(
+ raise NetworkXError(
"Invalid graph description, m1 should be >=2")
if m2 < 0:
- raise nx.NetworkXError(
+ raise NetworkXError(
"Invalid graph description, m2 should be >=0")
# left barbell
@@ -220,7 +219,7 @@ def complete_graph(n, create_using=None):
9
>>> G.size()
36
- >>> G = nx.complete_graph(range(11,14))
+ >>> G = nx.complete_graph(range(11, 14))
>>> list(G.nodes())
[11, 12, 13]
>>> G = nx.complete_graph(4, nx.DiGraph())
@@ -277,9 +276,9 @@ def circulant_graph(n, offsets, create_using=None):
Examples
--------
- Many well-known graph families are subfamilies of the circulant graphs; for
- example, to generate the cycle graph on n points, we connect every vertex to
- every other at offset plus or minus one. For n = 10,
+ Many well-known graph families are subfamilies of the circulant graphs;
+ for example, to generate the cycle graph on n points, we connect every
+ vertex to every other at offset plus or minus one. For n = 10,
>>> import networkx
>>> G = networkx.generators.classic.circulant_graph(10, [1])
@@ -335,7 +334,7 @@ def cycle_graph(n, create_using=None):
n_orig, nodes = n
G = empty_graph(nodes, create_using)
G.name = "cycle_graph(%s)" % (n_orig,)
- G.add_edges_from(nx.utils.pairwise(nodes))
+ G.add_edges_from(pairwise(nodes))
G.add_edge(nodes[-1], nodes[0])
return G
@@ -349,9 +348,9 @@ def dorogovtsev_goltsev_mendes_graph(n, create_using=None):
"""
if create_using is not None:
if create_using.is_directed():
- raise nx.NetworkXError("Directed Graph not supported")
+ raise NetworkXError("Directed Graph not supported")
if create_using.is_multigraph():
- raise nx.NetworkXError("Multigraph not supported")
+ raise NetworkXError("Multigraph not supported")
G = empty_graph(0, create_using)
G.name = "Dorogovtsev-Goltsev-Mendes Graph"
G.add_edge(0, 1)
@@ -382,12 +381,12 @@ def empty_graph(n=0, create_using=None):
with the new graph. Usually used to set the type of the graph.
For example:
- >>> G=nx.empty_graph(10)
+ >>> G = nx.empty_graph(10)
>>> G.number_of_nodes()
10
>>> G.number_of_edges()
0
- >>> G=nx.empty_graph("ABC")
+ >>> G = nx.empty_graph("ABC")
>>> G.number_of_nodes()
3
>>> sorted(G)
@@ -405,8 +404,8 @@ def empty_graph(n=0, create_using=None):
Firstly, the variable create_using can be used to create an
empty digraph, multigraph, etc. For example,
- >>> n=10
- >>> G=nx.empty_graph(n, create_using=nx.DiGraph())
+ >>> n = 10
+ >>> G = nx.empty_graph(n, create_using=nx.DiGraph())
will create an empty digraph on n nodes.
@@ -421,7 +420,7 @@ def empty_graph(n=0, create_using=None):
"""
if create_using is None:
# default empty graph is a simple graph
- G = nx.Graph()
+ G = Graph()
else:
G = create_using
G.clear()
@@ -432,116 +431,17 @@ def empty_graph(n=0, create_using=None):
return G
-@nodes_or_number([0, 1])
-def grid_2d_graph(m, n, periodic=False, create_using=None):
- """ Return the 2d grid graph of mxn nodes
-
- The grid graph has each node connected to its four nearest neighbors.
-
- Parameters
- ==========
- m, n : int or iterable container of nodes (default = 0)
- If an integer, nodes are from `range(n)`.
- If a container, those become the coordinate of the node.
- periodic : bool (default = False)
- If True will connect boundary nodes in periodic fashion.
- create_using : Graph, optional (default Graph())
- If provided this graph is cleared of nodes and edges and filled
- with the new graph. Usually used to set the type of the graph.
- """
- G = empty_graph(0, create_using)
- row_name, rows = m
- col_name, columns = n
- G.name = "grid_2d_graph(%s, %s)" % (row_name, col_name)
- G.add_nodes_from((i, j) for i in rows for j in columns)
- G.add_edges_from(((i, j), (pi, j))
- for pi, i in pairwise(rows) for j in columns)
- G.add_edges_from(((i, j), (i, pj))
- for i in rows for pj, j in pairwise(columns))
- if G.is_directed():
- G.add_edges_from(((pi, j), (i, j))
- for pi, i in pairwise(rows) for j in columns)
- G.add_edges_from(((i, pj), (i, j))
- for i in rows for pj, j in pairwise(columns))
- if periodic:
- if len(columns) > 2:
- f = columns[0]
- l = columns[-1]
- G.add_edges_from(((i, f), (i, l)) for i in rows)
- if G.is_directed():
- G.add_edges_from(((i, l), (i, f)) for i in rows)
- if len(rows) > 2:
- f = rows[0]
- l = rows[-1]
- G.add_edges_from(((f, j), (l, j)) for j in columns)
- if G.is_directed():
- G.add_edges_from(((l, j), (f, j)) for j in columns)
- G.name = "periodic_grid_2d_graph(%s,%s)" % (m, n)
- return G
-
-
-def grid_graph(dim, periodic=False):
- """ Return the n-dimensional grid graph.
-
- 'dim' is a tuple or list with the size in each dimension or an
- iterable of nodes for each dimension. The dimension of
- the grid_graph is the length of the tuple or list 'dim'.
-
- E.g. G=grid_graph(dim=[2, 3]) produces a 2x3 grid graph.
-
- E.g. G=grid_graph(dim=[range(7, 9), range(3, 6)]) produces a 2x3 grid graph.
-
- If periodic=True then join grid edges with periodic boundary conditions.
-
- """
- dlabel = "%s" % str(dim)
- if not dim:
- G = empty_graph(0)
- G.name = "grid_graph(%s)" % dlabel
- return G
- if periodic:
- func = cycle_graph
- else:
- func = path_graph
-
- G = func(dim[0])
- for current_dim in dim[1:]:
- # order matters: copy before it is cleared during the creation of Gnew
- Gold = G.copy()
- Gnew = func(current_dim)
- # explicit: create_using=None
- # This is so that we get a new graph of Gnew's class.
- G = nx.cartesian_product(Gnew, Gold)
- # graph G is done but has labels of the form (1, (2, (3, 1)))
- # so relabel
- H = nx.relabel_nodes(G, flatten)
- H.name = "grid_graph(%s)" % dlabel
- return H
-
-
-def hypercube_graph(n):
- """Return the n-dimensional hypercube.
-
- Node labels are the integers 0 to 2**n - 1.
-
- """
- dim = n * [2]
- G = grid_graph(dim)
- G.name = "hypercube_graph_(%d)" % n
- return G
-
-
def ladder_graph(n, create_using=None):
"""Return the Ladder graph of length n.
- This is two rows of n nodes, with
+ This is two paths of n nodes, with
each pair connected by a single edge.
Node labels are the integers 0 to 2*n - 1.
"""
if create_using is not None and create_using.is_directed():
- raise nx.NetworkXError("Directed Graph not supported")
+ raise NetworkXError("Directed Graph not supported")
G = empty_graph(2 * n, create_using)
G.name = "ladder_graph_(%d)" % n
G.add_edges_from(pairwise(range(n)))
@@ -570,7 +470,7 @@ def lollipop_graph(m, n, create_using=None):
Notes
=====
- The 2 subgraphs are joined via an edge (m-1, m).
+ The 2 subgraphs are joined via an edge (m-1, m).
If n=0, this is merely a complete graph.
(This graph is an extremal example in David Aldous and Jim
@@ -584,12 +484,12 @@ def lollipop_graph(m, n, create_using=None):
if isinstance(m, int):
n_nodes = [len(m_nodes) + i for i in n_nodes]
if create_using is not None and create_using.is_directed():
- raise nx.NetworkXError("Directed Graph not supported")
+ raise NetworkXError("Directed Graph not supported")
if M < 2:
- raise nx.NetworkXError(
+ raise NetworkXError(
"Invalid graph description, m should be >=2")
if N < 0:
- raise nx.NetworkXError(
+ raise NetworkXError(
"Invalid graph description, n should be >=0")
# the ball
@@ -633,14 +533,14 @@ def path_graph(n, create_using=None):
n_name, nodes = n
G = empty_graph(nodes, create_using)
G.name = "path_graph(%s)" % (n_name,)
- G.add_edges_from(nx.utils.pairwise(nodes))
+ G.add_edges_from(pairwise(nodes))
return G
@nodes_or_number(0)
def star_graph(n, create_using=None):
""" Return the star graph
-
+
The star graph consists of one center node connected to n outer nodes.
Parameters
@@ -663,7 +563,7 @@ def star_graph(n, create_using=None):
first = nodes[0]
G = empty_graph(nodes, create_using)
if G.is_directed():
- raise nx.NetworkXError("Directed Graph not supported")
+ raise NetworkXError("Directed Graph not supported")
G.add_edges_from((first, v) for v in nodes[1:])
G.name = "star_graph(%s)" % (n_name,)
return G
@@ -704,7 +604,7 @@ def turan_graph(n, r):
"""
if not 1 <= r <= n:
- raise nx.NetworkXError("Must satisfy 1 <= r <= n")
+ raise NetworkXError("Must satisfy 1 <= r <= n")
partitions = [n//r]*(r-(n%r))+[n//r+1]*(n%r)
G = complete_multipartite_graph(*partitions)
@@ -715,7 +615,7 @@ def turan_graph(n, r):
@nodes_or_number(0)
def wheel_graph(n, create_using=None):
""" Return the wheel graph
-
+
The wheel graph consists of a hub node connected to a cycle of (n-1) nodes.
Parameters
@@ -731,7 +631,7 @@ def wheel_graph(n, create_using=None):
"""
n_name, nodes = n
if n_name == 0:
- G = nx.empty_graph(0, create_using=create_using)
+ G = empty_graph(0, create_using=create_using)
G.name = "wheel_graph(0)"
return G
G = star_graph(nodes, create_using)
@@ -798,7 +698,7 @@ def complete_multipartite_graph(*subset_sizes):
complete_bipartite_graph
"""
# The complete multipartite graph is an undirected simple graph.
- G = nx.Graph()
+ G = Graph()
G.name = 'complete_multiparite_graph{}'.format(subset_sizes)
if len(subset_sizes) == 0:
@@ -817,7 +717,7 @@ def complete_multipartite_graph(*subset_sizes):
for (i, subset) in enumerate(subsets):
G.add_nodes_from(subset, subset=i)
except TypeError:
- raise nx.NetworkXError("Arguments must be all ints or all iterables")
+ raise NetworkXError("Arguments must be all ints or all iterables")
# Across subsets, all vertices should be adjacent.
# We can use itertools.combinations() because undirected.
diff --git a/networkx/generators/lattice.py b/networkx/generators/lattice.py
new file mode 100644
index 000000000..32e77a7ad
--- /dev/null
+++ b/networkx/generators/lattice.py
@@ -0,0 +1,391 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2004-2017 by
+# Aric Hagberg <[email protected]>
+# Dan Schult <[email protected]>
+# Pieter Swart <[email protected]>
+# All rights reserved.
+# BSD license.
+#
+# Authors: Aric Hagberg ([email protected])
+# Pieter Swart ([email protected])
+# Joel Miller ([email protected])
+# Dan Schult ([email protected])
+"""Functions for generating grid graphs and lattices
+
+The :func:`grid_2d_graph`, :func:`triangular_lattice_graph`, and
+:func:`hexagonal_lattice_graph` functions correspond to the three
+`regular tilings of the plane`_, the square, triangular, and hexagonal
+tilings, respectively. :func:`grid_graph` and :func:`hypercube_graph`
+are similar for arbitrary dimensions. Useful relevent discussion can
+be found about `Triangular Tiling`_, and `Square, Hex and Triangle Grids`_
+
+.. _regular tilings of the plane: https://en.wikipedia.org/wiki/List_of_regular_polytopes_and_compounds#Euclidean_tilings
+.. _Square, Hex and Triangle Grids: http://www-cs-students.stanford.edu/~amitp/game-programming/grids/
+.. _Triangular Tiling: https://en.wikipedia.org/wiki/Triangular_tiling
+
+"""
+from __future__ import division
+
+from math import sqrt
+
+from networkx.classes import Graph
+from networkx.classes import set_node_attributes
+from networkx.algorithms.minors import contracted_nodes
+from networkx.algorithms.operators.product import cartesian_product
+from networkx.exception import NetworkXError
+from networkx.relabel import relabel_nodes
+from networkx.utils import flatten
+from networkx.utils import is_list_of_ints
+from networkx.utils import nodes_or_number
+from networkx.utils import pairwise
+from networkx.generators.classic import cycle_graph
+from networkx.generators.classic import empty_graph
+from networkx.generators.classic import path_graph
+
+__all__ = ['grid_2d_graph', 'grid_graph', 'hypercube_graph',
+ 'triangular_lattice_graph', 'hexagonal_lattice_graph']
+
+
+@nodes_or_number([0, 1])
+def grid_2d_graph(m, n, periodic=False, create_using=None):
+ """Returns the two-dimensional grid graph.
+
+ The grid graph has each node connected to its four nearest neighbors.
+
+ Parameters
+ ----------
+ m, n : int or iterable container of nodes
+ If an integer, nodes are from `range(n)`.
+ If a container, elements become the coordinate of the nodes.
+
+ periodic : bool (default: False)
+ If this is ``True`` the nodes on the grid boundaries are joined
+ to the corresponding nodes on the opposite grid boundaries.
+
+ create_using : NetworkX graph (default: Graph())
+ If provided this graph is cleared of nodes and edges and filled
+ with the new graph. Usually used to set the type of the graph.
+
+ Returns
+ -------
+ NetworkX graph
+ The (possibly periodic) grid graph of the specified dimensions.
+
+ """
+ G = empty_graph(0, create_using)
+ row_name, rows = m
+ col_name, cols = n
+ G.add_nodes_from((i, j) for i in rows for j in cols)
+ G.add_edges_from(((i, j), (pi, j))
+ for pi, i in pairwise(rows) for j in cols)
+ G.add_edges_from(((i, j), (i, pj))
+ for i in rows for pj, j in pairwise(cols))
+ if periodic is True:
+ if len(rows) > 2:
+ first = rows[0]
+ last = rows[-1]
+ G.add_edges_from(((first, j), (last, j)) for j in cols)
+ if len(cols) > 2:
+ first = cols[0]
+ last = cols[-1]
+ G.add_edges_from(((i, first), (i, last)) for i in rows)
+ # both directions for directed
+ if G.is_directed():
+ G.add_edges_from((v, u) for u, v in G.edges())
+
+ # set name
+ G.name = "grid_2d_graph(%s, %s)" % (row_name, col_name)
+ if periodic is True:
+ G.name = "periodic_" + G.name
+ return G
+
+
+def grid_graph(dim, periodic=False):
+ """Returns the *n*-dimensional grid graph.
+
+ The dimension *n* is the length of the list `dim` and the size in
+ each dimension is the value of the corresponding list element.
+
+ Parameters
+ ----------
+ dim : list or tuple of numbers or iterables of nodes
+ 'dim' is a tuple or list with, for each dimension, either a number
+ that is the size of that dimension or an iterable of nodes for
+ that dimension. The dimension of the grid_graph is the length
+ of `dim`.
+
+ periodic : bool
+ If `periodic is True` the nodes on the grid boundaries are joined
+ to the corresponding nodes on the opposite grid boundaries.
+
+ Returns
+ -------
+ NetworkX graph
+ The (possibly periodic) grid graph of the specified dimensions.
+
+ Examples
+ --------
+ To produce a 2 by 3 by 4 grid graph, a graph on 24 nodes::
+
+ >>> G = grid_graph(dim=[2, 3, 4])
+ >>> len(G)
+ 24
+ >>> G = grid_graph(dim=[range(7, 9), range(3, 6)])
+ >>> len(G)
+ 6
+ """
+ dlabel = "%s" % dim
+ if not dim:
+ G = empty_graph(0)
+ G.name = "grid_graph(%s)" % dlabel
+ return G
+
+ func = cycle_graph if periodic else path_graph
+ G = func(dim[0])
+ for current_dim in dim[1:]:
+ # order matters: copy before it is cleared during the creation of Gnew
+ Gold = G.copy()
+ Gnew = func(current_dim)
+ # explicit: create_using = None
+ # This is so that we get a new graph of Gnew's class.
+ G = cartesian_product(Gnew, Gold)
+ # graph G is done but has labels of the form (1, (2, (3, 1))) so relabel
+ H = relabel_nodes(G, flatten)
+ H.name = "grid_graph(%s)" % dlabel
+ return H
+
+
+def hypercube_graph(n):
+ """Returns the *n*-dimensional hypercube graph.
+
+ The nodes are the integers between 0 and ``2 ** n - 1``, inclusive.
+
+ For more information on the hypercube graph, see the Wikipedia
+ article *`Hypercube graph`_*.
+
+ .. _Hypercube graph: https://en.wikipedia.org/wiki/Hypercube_graph
+
+ Parameters
+ ----------
+ n : int
+ The dimension of the hypercube.
+ The number of nodes in the graph will be ``2 ** n``.
+
+ Returns
+ -------
+ NetworkX graph
+ The hypercube graph of dimension *n*.
+ """
+ dim = n * [2]
+ G = grid_graph(dim)
+ G.name = "hypercube_graph_(%d)" % n
+ return G
+
+
+def triangular_lattice_graph(m, n, periodic=False, with_positions=True,
+ create_using=None):
+ """Returns the *m* by *n* triangular lattice graph.
+
+ The *`triangular lattice graph`_* is a two-dimensional `grid graph`_ in
+ which each square unit has a diagonal edge (each grid unit has a chord).
+
+ The returned graph has `m` rows and `n` columns of triangles. Rows and
+ columns include both triangles pointing up and down. Rows form a strip
+ of constant height. Columns form a series of diamond shapes, staggered
+ with the columns on either side. Another way to state the size is that
+ the nodes form a grid of `m+1` rows and `(n + 1) // 2` columns.
+ The odd row nodes are shifted horizontally relative to the even rows.
+
+ Directed graph types have edges pointed up or right.
+
+ Positions of nodes are computed by default or `with_positions is True`.
+ The position of each node (embedded in a euclidean plane) is stored in
+ the graph using equilateral triangles with sidelength 1.
+ The height between rows of nodes is thus :math:`\sqrt(3)/2`.
+ Nodes lie in the first quadrant with the node `(0, 0)` at the origin.
+
+ .. _triangular lattice graph: http://mathworld.wolfram.com/TriangularGrid.html
+ .. _grid graph: http://www-cs-students.stanford.edu/~amitp/game-programming/grids/
+ .. _Triangular Tiling: https://en.wikipedia.org/wiki/Triangular_tiling
+
+ Parameters
+ ----------
+ m : int
+ The number of rows in the lattice.
+
+ n : int
+ The number of columns in the lattice.
+
+ periodic : bool (default: False)
+ If True, join the boundary vertices of the grid using periodic
+ boundary conditions. The join between boundaries is the final row
+ and column of triangles. This means there is one row and one column
+ fewer nodes for the periodic lattice. Periodic lattices require
+ `m >= 3`, `n >= 5` and are allowed but misaligned if `m` or `n` are odd
+
+ with_positions : bool (default: True)
+ Store the coordinates of each node in the graph node attribute 'pos'.
+ The coordinates provide a lattice with equilateral triangles.
+ Periodic positions shift the nodes vertically in a nonlinear way so
+ the edges don't overlap so much.
+
+ create_using : NetworkX graph
+ If specified, this must be an instance of a NetworkX graph
+ class. It will be cleared of nodes and edges and filled
+ with the new graph. Usually used to set the type of the graph.
+
+ Returns
+ -------
+ NetworkX graph
+ The *m* by *n* triangular lattice graph.
+ """
+ H = empty_graph(0, create_using)
+ if n == 0 or m == 0:
+ return H
+ if periodic:
+ if n < 5 or m < 3:
+ msg = "m > 2 and n > 4 required for periodic. m={}, n={}"
+ raise NetworkXError(msg.format(m, n))
+
+ N = (n + 1) // 2 # number of nodes in row
+ rows = range(m + 1)
+ cols = range(N + 1)
+ # Make grid
+ H.add_edges_from(((i, j), (i + 1, j)) for j in rows for i in cols[:N])
+ H.add_edges_from(((i, j), (i, j + 1)) for j in rows[:m] for i in cols)
+ # add diagonals
+ H.add_edges_from(((i, j), (i + 1, j + 1))
+ for j in rows[1:m:2] for i in cols[:N])
+ H.add_edges_from(((i + 1, j), (i, j + 1))
+ for j in rows[:m:2] for i in cols[:N])
+ # identify boundary nodes if periodic
+ if periodic is True:
+ for i in cols:
+ H = contracted_nodes(H, (i, 0), (i, m))
+ for j in rows[:m]:
+ H = contracted_nodes(H, (0, j), (N, j))
+ elif n % 2:
+ # remove extra nodes
+ H.remove_nodes_from(((N, j) for j in rows[1::2]))
+
+ # Add position node attributes
+ if with_positions:
+ ii = (i for i in cols for j in rows)
+ jj = (j for i in cols for j in rows)
+ xx = (0.5 * (j % 2) + i for i in cols for j in rows)
+ h = sqrt(3)/2
+ if periodic:
+ yy = (h * j + .01 * i * i for i in cols for j in rows)
+ else:
+ yy = (h * j for i in cols for j in rows)
+ pos = {(i, j): (x, y) for i, j, x, y in zip(ii, jj, xx, yy)
+ if (i, j) in H}
+ set_node_attributes(H, 'pos', pos)
+
+ # set the name
+ H.name = 'triangular_lattice_graph({}, {})'.format(m, n)
+ if periodic:
+ H.name = 'periodic_' + H.name
+ return H
+
+
+def hexagonal_lattice_graph(m, n, periodic=False, with_positions=True,
+ create_using=None):
+ """Returns an `m` by `n` hexagonal lattice graph.
+
+ The *hexagonal lattice graph* is a graph whose nodes and edges are
+ the `hexagonal tiling`_ of the plane.
+
+ The returned graph will have `m` rows and `n` columns of hexagons.
+ `Odd numbered columns`_ are shifted up relative to even numbered columns.
+
+ Positions of nodes are computed by default or `with_positions is True`.
+ Node positions creating the standard embedding in the plane
+ with sidelength 1 and are stored in the node attribute 'pos'.
+ `pos = nx.get_node_attributes(G, 'pos')` creates a dict ready for drawing.
+
+ .. _hexagonal tiling: https://en.wikipedia.org/wiki/Hexagonal_tiling
+ .. _Odd numbered columns: http://www-cs-students.stanford.edu/~amitp/game-programming/grids/
+
+ Parameters
+ ----------
+ m : int
+ The number of rows of hexagons in the lattice.
+
+ n : int
+ The number of columns of hexagons in the lattice.
+
+ periodic : bool
+ Whether to make a periodic grid by joining the boundary vertices.
+ For this to work `n` must be odd and both `n > 1` and `m > 1`.
+ The periodic connections create another row and column of hexagons
+ so these graphs have fewer nodes as boundary nodes are identified.
+
+ with_positions : bool (default: True)
+ Store the coordinates of each node in the graph node attribute 'pos'.
+ The coordinates provide a lattice with vertical columns of hexagons
+ offset to interleave and cover the plane.
+ Periodic positions shift the nodes vertically in a nonlinear way so
+ the edges don't overlap so much.
+
+ create_using : NetworkX graph
+ If specified, this must be an instance of a NetworkX graph
+ class. It will be cleared of nodes and edges and filled
+ with the new graph. Usually used to set the type of the graph.
+ If graph is directed, edges will point up or right.
+
+ Returns
+ -------
+ NetworkX graph
+ The *m* by *n* hexagonal lattice graph.
+ """
+ G = create_using if create_using is not None else Graph()
+ G.clear()
+ if m == 0 or n == 0:
+ return G
+ if periodic and (n % 2 == 1 or m < 2 or n < 2):
+ msg = "periodic hexagonal lattice needs m > 1, n > 1 and even n"
+ raise NetworkXError(msg)
+
+ M = 2 * m # twice as many nodes as hexagons vertically
+ rows = range(M + 2)
+ cols = range(n + 1)
+ # make lattice
+ col_edges = (((i, j), (i, j + 1)) for i in cols for j in rows[:M + 1])
+ row_edges = (((i, j), (i + 1, j)) for i in cols[:n] for j in rows
+ if i % 2 == j % 2)
+ G.add_edges_from(col_edges)
+ G.add_edges_from(row_edges)
+ # Remove corner nodes with one edge
+ G.remove_node((0, M + 1))
+ G.remove_node((n, (M + 1) * (n % 2)))
+
+ # identify boundary nodes if periodic
+ if periodic:
+ for i in cols[:n]:
+ G = contracted_nodes(G, (i, 0), (i, M))
+ for i in cols[1:]:
+ G = contracted_nodes(G, (i, 1), (i, M + 1))
+ for j in rows[1:M]:
+ G = contracted_nodes(G, (0, j), (n, j))
+ G.remove_node((n, M))
+
+ # calc position in embedded space
+ ii = (i for i in cols for j in rows)
+ jj = (j for i in cols for j in rows)
+ xx = (0.5 + i + i // 2 + (j % 2) * ((i % 2) - .5)
+ for i in cols for j in rows)
+ h = sqrt(3)/2
+ if periodic:
+ yy = (h * j + .01*i*i for i in cols for j in rows)
+ else:
+ yy = (h * j for i in cols for j in rows)
+ # exclude nodes not in G
+ pos = {(i, j): (x, y) for i, j, x, y in zip(ii, jj, xx, yy) if (i, j) in G}
+ set_node_attributes(G, 'pos', pos)
+
+ # set the name
+ G.name = 'hexagonal_lattice_graph({}, {})'.format(m, n)
+ if periodic:
+ G.name = 'periodic_' + G.name
+ return G
diff --git a/networkx/readwrite/gml.py b/networkx/readwrite/gml.py
index af8db1d00..22c0afdf0 100644
--- a/networkx/readwrite/gml.py
+++ b/networkx/readwrite/gml.py
@@ -423,7 +423,9 @@ def parse_gml_lines(lines, label, destringizer):
G.add_edge(source, target, **edge)
else:
raise nx.NetworkXError(
- 'edge #%d (%r%s%r) is duplicated' %
+ """edge #%d (%r%s%r) is duplicated
+
+Hint: If this is a multigraph, add "multigraph 1" to the header of the file.""" %
(i, source, '->' if directed else '--', target))
else:
key = edge.pop('key', None)
@@ -565,7 +567,7 @@ def generate_gml(G, stringizer=None):
Notes
-----
Graph attributes named 'directed', 'multigraph', 'node' or
- 'edge',node attributes named 'id' or 'label', edge attributes
+ 'edge', node attributes named 'id' or 'label', edge attributes
named 'source' or 'target' (or 'key' if `G` is a multigraph)
are ignored because these attribute names are used to encode the graph
structure.
@@ -581,7 +583,10 @@ def generate_gml(G, stringizer=None):
key = str(key)
if key not in ignored_keys:
if isinstance(value, (int, long)):
- yield indent + key + ' ' + str(value)
+ if key == 'label':
+ yield indent + key + ' "' + str(value) + '"'
+ else:
+ yield indent + key + ' ' + str(value)
elif isinstance(value, float):
text = repr(value).upper()
# GML requires that a real literal contain a decimal point, but
@@ -590,7 +595,10 @@ def generate_gml(G, stringizer=None):
epos = text.rfind('E')
if epos != -1 and text.find('.', 0, epos) == -1:
text = text[:epos] + '.' + text[epos:]
- yield indent + key + ' ' + text
+ if key == 'label':
+ yield indent + key + ' "' + test + '"'
+ else:
+ yield indent + key + ' ' + text
elif isinstance(value, dict):
yield indent + key + ' ['
next_indent = indent + ' '
| label in gml output should be quoted
The output of the `write_gml` function currently (1.11) does not quote the label values, but I do think it should according to the [GML spec.](http://www.fim.uni-passau.de/fileadmin/files/lehrstuhl/brandenburg/projekte/gml/gml-technical-report.pdf). Because of this, tools like Cytoscape, jhive and gml2gv fails to read the gml file.
A node from my output:
```
node [
id 0
label 1203
]
```
that should be
```
node [
id 0
label "1203"
]
``` | networkx/networkx | diff --git a/networkx/generators/tests/test_classic.py b/networkx/generators/tests/test_classic.py
index 72a38ead9..d7cb02721 100644
--- a/networkx/generators/tests/test_classic.py
+++ b/networkx/generators/tests/test_classic.py
@@ -257,82 +257,6 @@ class TestGeneratorClassic():
assert_equal(G.name, 'empty_graph(42)')
assert_true(isinstance(G,Graph))
- def test_grid_2d_graph(self):
- n=5;m=6
- G=grid_2d_graph(n,m)
- assert_equal(number_of_nodes(G), n*m)
- assert_equal(degree_histogram(G), [0,0,4,2*(n+m)-8,(n-2)*(m-2)])
- DG=grid_2d_graph(n,m, create_using=DiGraph())
- assert_equal(DG.succ, G.adj)
- assert_equal(DG.pred, G.adj)
- MG=grid_2d_graph(n,m, create_using=MultiGraph())
- assert_edges_equal(MG.edges(), G.edges())
- g=grid_2d_graph(range(n), range(m))
- assert_edges_equal(g.edges(), G.edges())
-
- def test_grid_graph(self):
- """grid_graph([n,m]) is a connected simple graph with the
- following properties:
- number_of_nodes=n*m
- degree_histogram=[0,0,4,2*(n+m)-8,(n-2)*(m-2)]
- """
- for n, m in [(3, 5), (5, 3), (4, 5), (5, 4)]:
- dim=[n,m]
- g=grid_graph(dim)
- assert_equal(number_of_nodes(g), n*m)
- assert_equal(degree_histogram(g), [0,0,4,2*(n+m)-8,(n-2)*(m-2)])
- assert_equal(dim,[n,m])
-
- for n, m in [(1, 5), (5, 1)]:
- dim=[n,m]
- g=grid_graph(dim)
- assert_equal(number_of_nodes(g), n*m)
- assert_true(is_isomorphic(g,path_graph(5)))
- assert_equal(dim,[n,m])
-
-# mg=grid_graph([n,m], create_using=MultiGraph())
-# assert_equal(mg.edges(), g.edges())
-
- g=grid_graph([range(7,9), range(3,6)])
- assert_equal(number_of_nodes(g), 2*3)
- assert_true(is_isomorphic(g, grid_graph([2,3])))
-
- """Tuple dim arguments of the above tests
- """
- for n, m in [(3, 5), (5, 3), (4, 5), (5, 4)]:
- dim=(n,m)
- g=grid_graph(dim)
- assert_equal(number_of_nodes(g), n*m)
- assert_equal(degree_histogram(g), [0,0,4,2*(n+m)-8,(n-2)*(m-2)])
- assert_equal(dim,(n,m))
-
- for n, m in [(1, 5), (5, 1)]:
- dim=(n,m)
- g=grid_graph(dim)
- assert_equal(number_of_nodes(g), n*m)
- assert_true(is_isomorphic(g,path_graph(5)))
- assert_equal(dim,(n,m))
-
- g=grid_graph((range(7,9), range(3,6)))
- assert_equal(number_of_nodes(g), 2*3)
- assert_true(is_isomorphic(g, grid_graph((2,3))))
-
- def test_hypercube_graph(self):
- for n, G in [(0, null_graph()), (1, path_graph(2)),
- (2, cycle_graph(4)), (3, cubical_graph())]:
- g=hypercube_graph(n)
- assert_true(is_isomorphic(g, G))
-
- g=hypercube_graph(4)
- assert_equal(degree_histogram(g), [0, 0, 0, 0, 16])
- g=hypercube_graph(5)
- assert_equal(degree_histogram(g), [0, 0, 0, 0, 0, 32])
- g=hypercube_graph(6)
- assert_equal(degree_histogram(g), [0, 0, 0, 0, 0, 0, 64])
-
-# mg=hypercube_graph(6, create_using=MultiGraph())
-# assert_equal(mg.edges(), g.edges())
-
def test_ladder_graph(self):
for i, G in [(0, empty_graph(0)), (1, path_graph(2)),
(2, hypercube_graph(2)), (10, grid_graph([2,10]))]:
@@ -411,28 +335,6 @@ class TestGeneratorClassic():
assert_equal(g.size(), 2)
assert_true(g.is_directed())
- def test_periodic_grid_2d_graph(self):
- g=grid_2d_graph(0,0, periodic=True)
- assert_equal(dict(g.degree()), {})
-
- for m, n, G in [(2, 2, cycle_graph(4)), (1, 7, cycle_graph(7)),
- (7, 1, cycle_graph(7)), (2, 5, circular_ladder_graph(5)),
- (5, 2, circular_ladder_graph(5)), (2, 4, cubical_graph()),
- (4, 2, cubical_graph())]:
- g=grid_2d_graph(m,n, periodic=True)
- assert_true(is_isomorphic(g, G))
-
- DG=grid_2d_graph(4, 2, periodic=True, create_using=DiGraph())
- assert_equal(DG.succ,g.adj)
- assert_equal(DG.pred,g.adj)
- MG=grid_2d_graph(4, 2, periodic=True, create_using=MultiGraph())
- assert_edges_equal(MG.edges(), g.edges())
-
- gg=grid_2d_graph(range(4), range(2), periodic=True)
- assert_true(is_isomorphic(gg, g))
- ggg=grid_2d_graph("abcd", "ef", periodic=True)
- assert_true(is_isomorphic(ggg, g))
-
def test_star_graph(self):
assert_true(is_isomorphic(star_graph(0), empty_graph(1)))
assert_true(is_isomorphic(star_graph(1), path_graph(2)))
@@ -520,4 +422,4 @@ class TestGeneratorClassic():
for (block1, block2) in itertools.combinations(blocks, 2):
for u, v in itertools.product(block1, block2):
assert_true(v in G[u])
- assert_not_equal(G.node[u], G.node[v])
+ assert_not_equal(G.node[u], G.node[v])
\ No newline at end of file
diff --git a/networkx/generators/tests/test_lattice.py b/networkx/generators/tests/test_lattice.py
new file mode 100644
index 000000000..dab3cc4ee
--- /dev/null
+++ b/networkx/generators/tests/test_lattice.py
@@ -0,0 +1,209 @@
+"""Unit tests for the :mod:`networkx.generators.lattice` module."""
+import itertools
+
+from nose.tools import assert_equal
+from nose.tools import assert_true
+from nose.tools import assert_raises
+
+import networkx as nx
+from networkx.testing import assert_edges_equal
+
+
+class TestGrid2DGraph:
+ """Unit tests for :func:`networkx.generators.lattice.grid_2d_graph`"""
+ def test_number_of_vertices(self):
+ m, n = 5, 6
+ G = nx.grid_2d_graph(m, n)
+ assert_equal(len(G), m * n)
+
+ def test_degree_distribution(self):
+ m, n = 5, 6
+ G = nx.grid_2d_graph(m, n)
+ expected_histogram = [0, 0, 4, 2 * (m + n) - 8, (m - 2) * (n - 2)]
+ assert_equal(nx.degree_histogram(G), expected_histogram)
+
+ def test_directed(self):
+ m, n = 5, 6
+ G = nx.grid_2d_graph(m, n)
+ H = nx.grid_2d_graph(m, n, create_using=nx.DiGraph())
+ assert_equal(H.succ, G.adj)
+ assert_equal(H.pred, G.adj)
+
+ def test_multigraph(self):
+ m, n = 5, 6
+ G = nx.grid_2d_graph(m, n)
+ H = nx.grid_2d_graph(m, n, create_using=nx.MultiGraph())
+ assert_equal(list(H.edges()), list(G.edges()))
+
+ def test_periodic(self):
+ G = nx.grid_2d_graph(0, 0, periodic=True)
+ assert_equal(dict(G.degree()), {})
+
+ for m, n, H in [(2, 2, nx.cycle_graph(4)), (1, 7, nx.cycle_graph(7)),
+ (7, 1, nx.cycle_graph(7)),
+ (2, 5, nx.circular_ladder_graph(5)),
+ (5, 2, nx.circular_ladder_graph(5)),
+ (2, 4, nx.cubical_graph()),
+ (4, 2, nx.cubical_graph())]:
+ G = nx.grid_2d_graph(m, n, periodic=True)
+ assert_true(nx.could_be_isomorphic(G, H))
+
+ def test_periodic_directed(self):
+ G = nx.grid_2d_graph(4, 2, periodic=True)
+ H = nx.grid_2d_graph(4, 2, periodic=True, create_using=nx.DiGraph())
+ assert_equal(H.succ, G.adj)
+ assert_equal(H.pred, G.adj)
+
+ def test_periodic_multigraph(self):
+ G = nx.grid_2d_graph(4, 2, periodic=True)
+ H = nx.grid_2d_graph(4, 2, periodic=True, create_using=nx.MultiGraph())
+ assert_equal(list(G.edges()), list(H.edges()))
+
+ def test_node_input(self):
+ G = nx.grid_2d_graph(4, 2, periodic=True)
+ H = nx.grid_2d_graph(range(4), range(2), periodic=True)
+ assert_true(nx.is_isomorphic(H, G))
+ H = nx.grid_2d_graph("abcd", "ef", periodic=True)
+ assert_true(nx.is_isomorphic(H, G))
+ G = nx.grid_2d_graph(5, 6)
+ H = nx.grid_2d_graph(range(5), range(6))
+ assert_edges_equal(H, G)
+
+
+class TestGridGraph:
+ """Unit tests for :func:`networkx.generators.lattice.grid_graph`"""
+ def test_grid_graph(self):
+ """grid_graph([n,m]) is a connected simple graph with the
+ following properties:
+ number_of_nodes = n*m
+ degree_histogram = [0,0,4,2*(n+m)-8,(n-2)*(m-2)]
+ """
+ for n, m in [(3, 5), (5, 3), (4, 5), (5, 4)]:
+ dim = [n, m]
+ g = nx.grid_graph(dim)
+ assert_equal(len(g), n*m)
+ assert_equal(nx.degree_histogram(g), [0, 0, 4, 2 * (n + m) - 8,
+ (n - 2) * (m - 2)])
+
+ for n, m in [(1, 5), (5, 1)]:
+ dim = [n, m]
+ g = nx.grid_graph(dim)
+ assert_equal(len(g), n*m)
+ assert_true(nx.is_isomorphic(g, nx.path_graph(5)))
+
+# mg = nx.grid_graph([n,m], create_using=MultiGraph())
+# assert_equal(mg.edges(), g.edges())
+
+ def test_node_input(self):
+ G = nx.grid_graph([range(7, 9), range(3, 6)])
+ assert_equal(len(G), 2 * 3)
+ assert_true(nx.is_isomorphic(G, nx.grid_graph([2, 3])))
+
+
+class TestHypercubeGraph:
+ """Unit tests for :func:`networkx.generators.lattice.hypercube_graph`"""
+ def test_special_cases(self):
+ for n, H in [(0, nx.null_graph()), (1, nx.path_graph(2)),
+ (2, nx.cycle_graph(4)), (3, nx.cubical_graph())]:
+ G = nx.hypercube_graph(n)
+ assert_true(nx.could_be_isomorphic(G, H))
+
+ def test_degree_distribution(self):
+ for n in range(1, 10):
+ G = nx.hypercube_graph(n)
+ expected_histogram = [0] * n + [2 ** n]
+ assert_equal(nx.degree_histogram(G), expected_histogram)
+
+
+class TestTriangularLatticeGraph:
+ "Tests for :func:`networkx.generators.lattice.triangular_lattice_graph`"
+ def test_lattice_points(self):
+ """Tests that the graph is really a triangular lattice."""
+ for m, n in [(2, 3), (2, 2), (2, 1), (3, 3), (3, 2), (3, 4)]:
+ G = nx.triangular_lattice_graph(m, n)
+ N = (n + 1) // 2
+ assert_equal(len(G), (m + 1) * (1 + N) - (n % 2) * ((m + 1) // 2))
+ for (i, j) in G.nodes():
+ nbrs = G[(i, j)]
+ if i < N:
+ assert_true((i + 1, j) in nbrs)
+ if j < m:
+ assert_true((i, j + 1) in nbrs)
+ if j < m and (i > 0 or j % 2) and (i < N or (j + 1) % 2):
+ assert_true((i + 1, j + 1) in nbrs or (i - 1, j + 1) in nbrs)
+
+ def test_directed(self):
+ """Tests for creating a directed triangular lattice."""
+ G = nx.triangular_lattice_graph(3, 4, create_using=nx.Graph())
+ H = nx.triangular_lattice_graph(3, 4, create_using=nx.DiGraph())
+ assert_true(H.is_directed())
+ for u, v in H.edges():
+ assert_true(v[1] >= u[1])
+ if v[1] == u[1]:
+ assert_true(v[0] > u[0])
+
+ def test_multigraph(self):
+ """Tests for creating a triangular lattice multigraph."""
+ G = nx.triangular_lattice_graph(3, 4, create_using=nx.Graph())
+ H = nx.triangular_lattice_graph(3, 4, create_using=nx.MultiGraph())
+ assert_equal(list(H.edges()), list(G.edges()))
+
+ def test_periodic(self):
+ G = nx.triangular_lattice_graph(4, 6, periodic=True)
+ assert_equal(len(G), 12)
+ assert_equal(G.size(), 36)
+ # all degrees are 6
+ assert_equal(len([n for n, d in G.degree() if d != 6]), 0)
+ G = nx.triangular_lattice_graph(5, 7, periodic=True)
+ TLG = nx.triangular_lattice_graph
+ assert_raises(nx.NetworkXError, TLG, 2, 4, periodic=True)
+ assert_raises(nx.NetworkXError, TLG, 4, 4, periodic=True)
+ assert_raises(nx.NetworkXError, TLG, 2, 6, periodic=True)
+
+
+class TestHexagonalLatticeGraph:
+ "Tests for :func:`networkx.generators.lattice.hexagonal_lattice_graph`"
+ def test_lattice_points(self):
+ """Tests that the graph is really a hexagonal lattice."""
+ for m, n in [(4, 5), (4, 4), (4, 3), (3, 2), (3, 3), (3, 5)]:
+ G = nx.hexagonal_lattice_graph(m, n)
+ assert_equal(len(G), 2 * (m + 1) * (n + 1) - 2)
+ C_6 = nx.cycle_graph(6)
+ hexagons = [
+ [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2)],
+ [(0, 2), (0, 3), (0, 4), (1, 2), (1, 3), (1, 4)],
+ [(1, 1), (1, 2), (1, 3), (2, 1), (2, 2), (2, 3)],
+ [(2, 0), (2, 1), (2, 2), (3, 0), (3, 1), (3, 2)],
+ [(2, 2), (2, 3), (2, 4), (3, 2), (3, 3), (3, 4)],
+ ]
+ for hexagon in hexagons:
+ assert_true(nx.is_isomorphic(G.subgraph(hexagon), C_6))
+
+ def test_directed(self):
+ """Tests for creating a directed hexagonal lattice."""
+ G = nx.hexagonal_lattice_graph(3, 5, create_using=nx.Graph())
+ H = nx.hexagonal_lattice_graph(3, 5, create_using=nx.DiGraph())
+ assert_true(H.is_directed())
+ pos = nx.get_node_attributes(H, 'pos')
+ for u, v in H.edges():
+ assert_true(pos[v][1] >= pos[u][1])
+ if pos[v][1] == pos[u][1]:
+ assert_true(pos[v][0] > pos[u][0])
+
+ def test_multigraph(self):
+ """Tests for creating a hexagonal lattice multigraph."""
+ G = nx.hexagonal_lattice_graph(3, 5, create_using=nx.Graph())
+ H = nx.hexagonal_lattice_graph(3, 5, create_using=nx.MultiGraph())
+ assert_equal(list(H.edges()), list(G.edges()))
+
+ def test_periodic(self):
+ G = nx.hexagonal_lattice_graph(4, 6, periodic=True)
+ assert_equal(len(G), 48)
+ assert_equal(G.size(), 72)
+ # all degrees are 3
+ assert_equal(len([n for n, d in G.degree() if d != 3]), 0)
+ G = nx.hexagonal_lattice_graph(5, 8, periodic=True)
+ HLG = nx.hexagonal_lattice_graph
+ assert_raises(nx.NetworkXError, HLG, 2, 7, periodic=True)
+ assert_raises(nx.NetworkXError, HLG, 1, 4, periodic=True)
+ assert_raises(nx.NetworkXError, HLG, 2, 1, periodic=True)
diff --git a/networkx/readwrite/tests/test_gml.py b/networkx/readwrite/tests/test_gml.py
index cd9d61b97..42052d7d7 100644
--- a/networkx/readwrite/tests/test_gml.py
+++ b/networkx/readwrite/tests/test_gml.py
@@ -180,6 +180,19 @@ graph [
os.close(fd)
os.unlink(fname)
+ def test_labels_are_strings(self):
+ # GML requires labels to be strings (i.e., in quotes)
+ answer = """graph [
+ node [
+ id 0
+ label "1203"
+ ]
+]"""
+ G = nx.Graph()
+ G.add_node(1203)
+ data = '\n'.join(nx.generate_gml(G, stringizer=literal_stringizer))
+ assert_equal(data, answer)
+
def test_relabel_duplicate(self):
data = """
graph
@@ -240,12 +253,29 @@ graph
name "path_graph(1)"
node [
id 0
- label 0
+ label "0"
demo "This is "quoted" and this is a copyright: ©"
]
]"""
assert_equal(data, answer)
+ def test_unicode_node(self):
+ node = 'node' + unichr(169)
+ G = nx.Graph()
+ G.add_node(node)
+ fobj = tempfile.NamedTemporaryFile()
+ nx.write_gml(G, fobj)
+ fobj.seek(0)
+ # Should be bytes in 2.x and 3.x
+ data = fobj.read().strip().decode('ascii')
+ answer = """graph [
+ node [
+ id 0
+ label "node©"
+ ]
+]"""
+ assert_equal(data, answer)
+
def test_name(self):
G = nx.parse_gml('graph [ name "x" node [ id 0 label "x" ] ]')
assert_equal('x', G.graph['name'])
@@ -261,7 +291,7 @@ graph
gml += ' directed ' + str(int(directed))
if multigraph is not None:
gml += ' multigraph ' + str(int(multigraph))
- gml += ' node [ id 0 label 0 ]'
+ gml += ' node [ id 0 label "0" ]'
gml += ' edge [ source 0 target 0 ]'
gml += ' ]'
G = nx.parse_gml(gml)
@@ -274,7 +304,7 @@ graph
gml += ' multigraph 1\n'
gml += """ node [
id 0
- label 0
+ label "0"
]
edge [
source 0
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 8
} | help | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y libgdal-dev graphviz"
],
"python": "3.6",
"reqs_path": [
"requirements/default.txt",
"requirements/test.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
decorator==5.1.1
importlib-metadata==4.8.3
iniconfig==1.1.1
-e git+https://github.com/networkx/networkx.git@26e6efcb88e132ff76130531ca7e88583f4af9e2#egg=networkx
nose==1.3.7
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: networkx
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- decorator==5.1.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- nose==1.3.7
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/networkx
| [
"networkx/generators/tests/test_lattice.py::TestTriangularLatticeGraph::test_lattice_points",
"networkx/generators/tests/test_lattice.py::TestTriangularLatticeGraph::test_directed",
"networkx/generators/tests/test_lattice.py::TestTriangularLatticeGraph::test_multigraph",
"networkx/generators/tests/test_lattice.py::TestTriangularLatticeGraph::test_periodic",
"networkx/generators/tests/test_lattice.py::TestHexagonalLatticeGraph::test_lattice_points",
"networkx/generators/tests/test_lattice.py::TestHexagonalLatticeGraph::test_directed",
"networkx/generators/tests/test_lattice.py::TestHexagonalLatticeGraph::test_multigraph",
"networkx/generators/tests/test_lattice.py::TestHexagonalLatticeGraph::test_periodic",
"networkx/readwrite/tests/test_gml.py::TestGraph::test_labels_are_strings",
"networkx/readwrite/tests/test_gml.py::TestGraph::test_quotes"
]
| [
"networkx/generators/tests/test_classic.py::test",
"networkx/readwrite/tests/test_gml.py::TestGraph::test_parse_gml",
"networkx/readwrite/tests/test_gml.py::TestGraph::test_read_gml"
]
| [
"networkx/generators/tests/test_classic.py::TestGeneratorClassic::test_balanced_tree",
"networkx/generators/tests/test_classic.py::TestGeneratorClassic::test_balanced_tree_star",
"networkx/generators/tests/test_classic.py::TestGeneratorClassic::test_balanced_tree_path",
"networkx/generators/tests/test_classic.py::TestGeneratorClassic::test_full_rary_tree",
"networkx/generators/tests/test_classic.py::TestGeneratorClassic::test_full_rary_tree_balanced",
"networkx/generators/tests/test_classic.py::TestGeneratorClassic::test_full_rary_tree_path",
"networkx/generators/tests/test_classic.py::TestGeneratorClassic::test_full_rary_tree_empty",
"networkx/generators/tests/test_classic.py::TestGeneratorClassic::test_full_rary_tree_3_20",
"networkx/generators/tests/test_classic.py::TestGeneratorClassic::test_barbell_graph",
"networkx/generators/tests/test_classic.py::TestGeneratorClassic::test_complete_graph",
"networkx/generators/tests/test_classic.py::TestGeneratorClassic::test_complete_digraph",
"networkx/generators/tests/test_classic.py::TestGeneratorClassic::test_circular_ladder_graph",
"networkx/generators/tests/test_classic.py::TestGeneratorClassic::test_circulant_graph",
"networkx/generators/tests/test_classic.py::TestGeneratorClassic::test_cycle_graph",
"networkx/generators/tests/test_classic.py::TestGeneratorClassic::test_dorogovtsev_goltsev_mendes_graph",
"networkx/generators/tests/test_classic.py::TestGeneratorClassic::test_empty_graph",
"networkx/generators/tests/test_classic.py::TestGeneratorClassic::test_ladder_graph",
"networkx/generators/tests/test_classic.py::TestGeneratorClassic::test_lollipop_graph",
"networkx/generators/tests/test_classic.py::TestGeneratorClassic::test_null_graph",
"networkx/generators/tests/test_classic.py::TestGeneratorClassic::test_path_graph",
"networkx/generators/tests/test_classic.py::TestGeneratorClassic::test_star_graph",
"networkx/generators/tests/test_classic.py::TestGeneratorClassic::test_trivial_graph",
"networkx/generators/tests/test_classic.py::TestGeneratorClassic::test_turan_graph",
"networkx/generators/tests/test_classic.py::TestGeneratorClassic::test_wheel_graph",
"networkx/generators/tests/test_classic.py::TestGeneratorClassic::test_complete_0_partite_graph",
"networkx/generators/tests/test_classic.py::TestGeneratorClassic::test_complete_1_partite_graph",
"networkx/generators/tests/test_classic.py::TestGeneratorClassic::test_complete_2_partite_graph",
"networkx/generators/tests/test_classic.py::TestGeneratorClassic::test_complete_multipartite_graph",
"networkx/generators/tests/test_lattice.py::TestGrid2DGraph::test_number_of_vertices",
"networkx/generators/tests/test_lattice.py::TestGrid2DGraph::test_degree_distribution",
"networkx/generators/tests/test_lattice.py::TestGrid2DGraph::test_directed",
"networkx/generators/tests/test_lattice.py::TestGrid2DGraph::test_multigraph",
"networkx/generators/tests/test_lattice.py::TestGrid2DGraph::test_periodic",
"networkx/generators/tests/test_lattice.py::TestGrid2DGraph::test_periodic_directed",
"networkx/generators/tests/test_lattice.py::TestGrid2DGraph::test_periodic_multigraph",
"networkx/generators/tests/test_lattice.py::TestGrid2DGraph::test_node_input",
"networkx/generators/tests/test_lattice.py::TestGridGraph::test_grid_graph",
"networkx/generators/tests/test_lattice.py::TestGridGraph::test_node_input",
"networkx/generators/tests/test_lattice.py::TestHypercubeGraph::test_special_cases",
"networkx/generators/tests/test_lattice.py::TestHypercubeGraph::test_degree_distribution",
"networkx/readwrite/tests/test_gml.py::TestGraph::test_parse_gml_cytoscape_bug",
"networkx/readwrite/tests/test_gml.py::TestGraph::test_relabel_duplicate",
"networkx/readwrite/tests/test_gml.py::TestGraph::test_tuplelabels",
"networkx/readwrite/tests/test_gml.py::TestGraph::test_unicode_node",
"networkx/readwrite/tests/test_gml.py::TestGraph::test_name",
"networkx/readwrite/tests/test_gml.py::TestGraph::test_graph_types",
"networkx/readwrite/tests/test_gml.py::TestGraph::test_data_types",
"networkx/readwrite/tests/test_gml.py::TestGraph::test_escape_unescape",
"networkx/readwrite/tests/test_gml.py::TestGraph::test_exceptions"
]
| []
| BSD 3-Clause | 1,440 | [
"INSTALL.txt",
"doc/source/reference/generators.rst",
"networkx/algorithms/dag.py",
"networkx/generators/lattice.py",
"networkx/readwrite/gml.py",
"CONTRIBUTE.rst",
"networkx/generators/__init__.py",
"doc/source/developer/index.rst",
"appveyor.yml",
"doc/source/developer/contribute.rst",
"networkx/generators/classic.py"
]
| [
"INSTALL.txt",
"doc/source/reference/generators.rst",
"networkx/algorithms/dag.py",
"networkx/generators/lattice.py",
"networkx/readwrite/gml.py",
"CONTRIBUTE.rst",
"networkx/generators/__init__.py",
"doc/source/developer/index.rst",
"appveyor.yml",
"doc/source/developer/contribute.rst",
"networkx/generators/classic.py"
]
|
projectmesa__mesa-393 | 5a3a62c6dd9dcb3b310a5526d37dc4c7bcfc43c0 | 2017-07-07 11:48:27 | 246c69d592a82e89c75d555c79736c3f619d434a | diff --git a/docs/tutorials/intro_tutorial.ipynb b/docs/tutorials/intro_tutorial.ipynb
index 63b79978..3127a7fd 100644
--- a/docs/tutorials/intro_tutorial.ipynb
+++ b/docs/tutorials/intro_tutorial.ipynb
@@ -183,9 +183,7 @@
{
"cell_type": "code",
"execution_count": 3,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [],
"source": [
"empty_model = MoneyModel(10)\n",
@@ -235,9 +233,7 @@
{
"cell_type": "code",
"execution_count": 5,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [],
"source": [
"class MoneyAgent(Agent):\n",
@@ -294,15 +290,13 @@
{
"cell_type": "code",
"execution_count": 7,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [
{
"data": {
"text/plain": [
- "(array([ 4., 0., 0., 0., 0., 2., 0., 0., 0., 4.]),\n",
- " array([ 0. , 0.2, 0.4, 0.6, 0.8, 1. , 1.2, 1.4, 1.6, 1.8, 2. ]),\n",
+ "(array([ 4., 0., 5., 0., 0., 0., 0., 0., 0., 1.]),\n",
+ " array([ 0. , 0.5, 1. , 1.5, 2. , 2.5, 3. , 3.5, 4. , 4.5, 5. ]),\n",
" <a list of 10 Patch objects>)"
]
},
@@ -312,9 +306,9 @@
},
{
"data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXkAAAEACAYAAABWLgY0AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAADyVJREFUeJzt3V+oXWeZx/HvL/2H0EmhClGSpmVqO1KZEB1IUzqQI4PY\nZJj2plD/QKFXoUyxIAwOpZD0buZqsGOkDVPFKGLBixqmzVChxtbCxJLm2GCbMQVHa8bkpmYkTZCo\nz1ycpdk52Sd7nX3WyUne+X7ghfXnOXs/LFZ++82bs7JTVUiS2rRqpRuQJC0fQ16SGmbIS1LDDHlJ\napghL0kNM+QlqWG9Qz7JqiSvJ9m7wPknkxxNMptk43AtSpKmtZiZ/KPAm+NOJNkK3FpVtwHbgacG\n6E2StES9Qj7JOmAb8G8LlNwH7AGoqgPADUnWDNKhJGlqfWfy/wL8A7DQ47FrgXdG9o91xyRJK2hi\nyCf5W+BEVc0C6YYk6QpwdY+au4F7k2wD3gf8WZI9VfXgSM0x4KaR/XXdsfMk8T/KkaQpVNVUE+yJ\nM/mqeqyq1lfVnwOfBl6aF/AAe4EHAZJsBk5W1YkFXnFFx+rVf8mPf/xjquqKHzt27FjxHloaXk+v\n5bhxOeTWUvSZyY+VZDtQVbW7ql5Isi3J28B7wENL6kqSNIhFhXxV/QD4Qbf99LxzjwzYlyRpAD7x\negWbmZlZ6Raa4vUcjtfy8pFza06X4M2SWur60lKtXr2BV175Jhs2bFjRPiRdGZKw0rkFoZbrH14l\nSVcuQ16SGmbIS1LDDHlJapghL0kNM+QlqWGGvCQ1zJCXpIYZ8pLUMENekhpmyEtSwwx5SWqYIS9J\nDTPkJalhhrwkNcyQl6SGGfKS1LCJIZ/kuiQHkhxKcjjJjjE1W5KcTPJ6Nx5fnnYlSYsx8Yu8q+q3\nST5RVaeTXAW8mmRfVf1oXunLVXXv8rQpSZpGr+WaqjrdbV7H3AfDuC88nOr7ByVJy6dXyCdZleQQ\ncBz4XlW9NqbsriSzSZ5PcsegXUqSptJ3Jv+HqvoYsA64c0yIHwTWV9VG4MvAc8O2KUmaxsQ1+VFV\n9Zsk3wfuAd4cOX5qZHtfkq8kubGq3r3wVXaObM90Q5J0zv5uLN3EkE/yAeBsVf1vkvcBnwT+aV7N\nmqo60W1vAjI+4OH8kJckXWiG8yfAT0z9Sn1m8h8Cvp5kFXPLO89W1QtJtgNVVbuB+5M8DJwFzgAP\nTN2RJGkwfX6F8jDw8THHnx7Z3gXsGrY1SdJS+cSrJDXMkJekhhnyktQwQ16SGmbIS1LDDHlJapgh\nL0kNM+QlqWGGvCQ1zJCXpIYZ8pLUMENekhpmyEtSwwx5SWqYIS9JDTPkJalhhrwkNcyQl6SGGfKS\n1LCJIZ/kuiQHkhxKcjjJjgXqnkxyNMlsko3DtypJWqw+X+T92ySfqKrTSa4CXk2yr6p+9MeaJFuB\nW6vqtiR3Ak8Bm5evbUlSH72Wa6rqdLd5HXMfDDWv5D5gT1d7ALghyZqhmpQkTadXyCdZleQQcBz4\nXlW9Nq9kLfDOyP6x7pgkaQVNXK4BqKo/AB9Lshp4LskdVfXmdG+5c2R7phuSpHP2d2PpeoX8H1XV\nb5J8H7gHGA35Y8BNI/vrumNj7FxUg5L0/88M50+An5j6lfr8ds0HktzQbb8P+CRwZF7ZXuDBrmYz\ncLKqTkzdlSRpEH1m8h8Cvp5kFXMfCs9W1QtJtgNVVbu7/W1J3gbeAx5axp4lST31+RXKw8DHxxx/\net7+IwP2JUkagE+8SlLDDHlJapghL0kNM+QlqWGGvCQ1zJCXpIYZ8pLUMENekhpmyEtSwwx5SWqY\nIS9JDTPkJalhhrwkNcyQl6SGGfKS1DBDXpIaZshLUsMMeUlqmCEvSQ2bGPJJ1iV5KclPkhxO8vkx\nNVuSnEzyejceX552JUmLMfGLvIHfAV+oqtkk1wMHk7xYVUfm1b1cVfcO36IkaVoTZ/JVdbyqZrvt\nU8BbwNoxpRm4N0nSEi1qTT7JLcBG4MCY03clmU3yfJI7BuhNkrREfZZrAOiWar4DPNrN6EcdBNZX\n1ekkW4HngNvHv9LOke2ZbkiSztnfjaVLVU0uSq4G/h3YV1Vf6lH/M+CvqurdeccLJr/fclq9egOv\nvPJNNmzYsKJ9SLoyJGGlcwtCVU21JN53uearwJsLBXySNSPbm5j78Hh3XK0k6dKZuFyT5G7gc8Dh\nJIeY+0h7DLgZqKraDdyf5GHgLHAGeGD5WpYk9TUx5KvqVeCqCTW7gF1DNSVJGoZPvEpSwwx5SWqY\nIS9JDTPkJalhhrwkNcyQl6SGGfKS1DBDXpIaZshLUsMMeUlqmCEvSQ0z5CWpYYa8JDXMkJekhhny\nktQwQ16SGmbIS1LDDHlJapghL0kNmxjySdYleSnJT5IcTvL5BeqeTHI0yWySjcO3KklarIlf5A38\nDvhCVc0muR44mOTFqjryx4IkW4Fbq+q2JHcCTwGbl6dlSVJfE2fyVXW8qma77VPAW8DaeWX3AXu6\nmgPADUnWDNyrJGmRFrUmn+QWYCNwYN6ptcA7I/vHuPCDQJJ0ifVZrgGgW6r5DvBoN6Of0s6R7Zlu\nSCvvgx+8hRMnfr6iPaxZczPHj//3ivagy8H+bixdr5BPcjVzAf+NqvrumJJjwE0j++u6Y2PsXFSD\n0qUyF/C1wj1kRd9fl4sZzp8APzH1K/Vdrvkq8GZVfWmB83uBBwGSbAZOVtWJqbuSJA1i4kw+yd3A\n54DDSQ4xN9V5DLgZqKraXVUvJNmW5G3gPeCh5WxaktTPxJCvqleBq3rUPTJIR5KkwfjEqyQ1zJCX\npIYZ8pLUMENekhpmyEtSwwx5SWqYIS9JDTPkJalhhrwkNcyQl6SGGfKS1DBDXpIaZshLUsMMeUlq\nmCEvSQ0z5CWpYYa8JDXMkJekhhnyktSwiSGf5JkkJ5K8scD5LUlOJnm9G48P36YkaRoTv8gb+Brw\nr8Cei9S8XFX3DtOSJGkoE2fyVfVD4NcTyjJMO5KkIQ21Jn9Xktkkzye5Y6DXlCQtUZ/lmkkOAuur\n6nSSrcBzwO0Ll+8c2Z7phiTpnP3dWLolh3xVnRrZ3pfkK0lurKp3x//EzqW+pSQ1bobzJ8BPTP1K\nfZdrwgLr7knWjGxvArJwwEuSLqWJM/kk32LuI+X9SX4B7ACuBaqqdgP3J3kYOAucAR5YvnYlSYsx\nMeSr6rMTzu8Cdg3WkSRpMD7xKkkNM+QlqWGGvCQ1zJCXpIYZ8pLUMENekhpmyEtSwwx5SWqYIS9J\nDTPkJalhhrwkNcyQl6SGGfKS1DBDXpIaZshLUsMMeUlqmCEvSQ0z5CWpYYa8JDVsYsgneSbJiSRv\nXKTmySRHk8wm2Thsi5KkafWZyX8N+NRCJ5NsBW6tqtuA7cBTA/UmSVqiiSFfVT8Efn2RkvuAPV3t\nAeCGJGuGaU+StBRDrMmvBd4Z2T/WHZMkrbCrL/1b7hzZnumGJOmc/d1YuiFC/hhw08j+uu7YAnYO\n8JaS1LIZzp8APzH1K/Vdrkk3xtkLPAiQZDNwsqpOTN2RJGkwE2fySb7F3EfK+5P8AtgBXAtUVe2u\nqheSbEvyNvAe8NByNixJ6m9iyFfVZ3vUPDJMO5KkIfnEqyQ1zJCXpIYZ8pLUMENekhpmyEtSwwx5\nSWqYIS9JDTPkJalhhrwkNcyQl6SGGfKS1DBDXpIaZshLUsMMeUlqmCEvSQ0z5CWpYYa8JDXMkJek\nhvUK+ST3JDmS5KdJvjjm/JYkJ5O83o3Hh29VkrRYfb7IexXwZeBvgP8BXkvy3ao6Mq/05aq6dxl6\nlCRNqc9MfhNwtKp+XlVngW8D942py6CdSZKWrE/IrwXeGdn/ZXdsvruSzCZ5Pskdg3QnSVqSics1\nPR0E1lfV6SRbgeeA2wd6bUnSlPqE/DFg/cj+uu7Yn1TVqZHtfUm+kuTGqnr3wpfbObI90w1J0jn7\nu7F0fUL+NeDDSW4GfgV8GvjMaEGSNVV1otveBGR8wMP5IS9JutAM50+An5j6lSaGfFX9PskjwIvM\nreE/U1VvJdk+d7p2A/cneRg4C5wBHpi6I0nSYHqtyVfVfwB/Me/Y0yPbu4Bdw7YmSVoqn3iVpIYZ\n8pLUMENekhpmyEtSwwx5SWqYIS9JDTPkJalhhrwkNcyQl6SGGfKS1DBDXpIaZshLUsMMeUlqmCEv\nSQ0z5CWpYYa8JDXMkJekhhnyktQwQ16SGtYr5JPck+RIkp8m+eICNU8mOZpkNsnGYduUJE1jYsgn\nWQV8GfgU8FHgM0k+Mq9mK3BrVd0GbAeeWoZeNc/+/ftXugVpLO/Ny0efmfwm4GhV/byqzgLfBu6b\nV3MfsAegqg4ANyRZM2inuoB/kHS58t68fPQJ+bXAOyP7v+yOXazm2JgaSdIldvWlfsPVq//uUr/l\nec6c+RnXXHPNivYgSZdKquriBclmYGdV3dPt/yNQVfXPIzVPAd+vqme7/SPAlqo6Me+1Lv5mkqSx\nqirT/FyfmfxrwIeT3Az8Cvg08Jl5NXuBvwee7T4UTs4P+KU0KUmazsSQr6rfJ3kEeJG5Nfxnquqt\nJNvnTtfuqnohybYkbwPvAQ8tb9uSpD4mLtdIkq5cy/LEqw9PDWvS9UyyJcnJJK934/GV6PNKkOSZ\nJCeSvHGRGu/NHiZdS+/LxUmyLslLSX6S5HCSzy9Qt7j7s6oGHcx9cLwN3AxcA8wCH5lXsxV4vtu+\nE/jPoftoZfS8nluAvSvd65UwgL8GNgJvLHDee3O4a+l9ubjr+UFgY7d9PfBfQ2TncszkfXhqWH2u\nJ4D/qN1DVf0Q+PVFSrw3e+pxLcH7sreqOl5Vs932KeAtLnzeaNH353KEvA9PDavP9QS4q/vr2/NJ\n7rg0rTXJe3NY3pdTSHILc39LOjDv1KLvz0v+MJSWxUFgfVWd7v4foeeA21e4J8n7cgpJrge+Azza\nzeiXZDlm8seA9SP767pj82tumlCjOROvZ1WdqqrT3fY+4JokN166FpvivTkQ78vFS3I1cwH/jar6\n7piSRd+fyxHyf3p4Ksm1zD08tXdezV7gQfjTE7VjH54S0ON6jq7JJdnE3K/Gvntp27yihIXXir03\nF2fBa+l9OZWvAm9W1ZcWOL/o+3Pw5Zry4alB9bmewP1JHgbOAmeAB1au48tbkm8BM8D7k/wC2AFc\ni/fmok26lnhfLkqSu4HPAYeTHAIKeIy536yb+v70YShJaphf/ydJDTPkJalhhrwkNcyQl6SGGfKS\n1DBDXpIaZshLUsMMeUlq2P8BrOKkBgdtOTYAAAAASUVORK5CYII=\n",
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAW4AAAD8CAYAAABXe05zAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAACllJREFUeJzt3EGoZnd5x/Hf00yKElNc5FaCyfR2UQQRasolm4HSBiup\nCbZLA7oSZtNCpAWJS3dxI266GTS0RWsQYqAk1howIQQ0cSYmNnG0iExpiDATREw2LYlPF/MG0ngn\n90wy577zzHw+cJn3nTnznucQ8uXwf/9nqrsDwBy/s+0BALg4wg0wjHADDCPcAMMIN8Awwg0wjHAD\nDCPcAMMIN8AwR9b40BtuuKF3d3fX+GiAK9KpU6de6u6dJceuEu7d3d2cPHlyjY8GuCJV1X8tPdZS\nCcAwwg0wjHADDCPcAMMIN8Awi3aVVNWZJC8neS3Jq929t+ZQAFzYxWwH/PPufmm1SQBYxFIJwDBL\nw91JvlNVp6rq+JoDAfDWli6VHOvuF6vq95M8UlU/6e7H33jAJujHk+To0aOXeMwr2+49D2/lvGfu\nvWMr5wXemUV33N394ubXs0keTHLrPsec6O697t7b2Vn0uD0Ab8OB4a6q66rq+tdfJ/lokufWHgyA\n/S1ZKnlfkger6vXj/6W7v73qVABc0IHh7u6fJ/njQ5gFgAVsBwQYRrgBhhFugGGEG2AY4QYYRrgB\nhhFugGGEG2AY4QYYRrgBhhFugGGEG2AY4QYYRrgBhhFugGGEG2AY4QYYRrgBhhFugGGEG2AY4QYY\nRrgBhhFugGGEG2AY4QYYRrgBhhFugGGEG2AY4QYYRrgBhhFugGGEG2CYxeGuqmuq6odV9dCaAwHw\n1i7mjvvuJKfXGgSAZRaFu6puSnJHki+vOw4AB1l6x/2lJJ9N8psVZwFggSMHHVBVdyY5292nqurP\n3uK440mOJ8nRo0ff9kC79zz8tv/uO3Hm3ju2cl6Ai7XkjvtYko9X1Zkk9ye5raq++uaDuvtEd+91\n997Ozs4lHhOA1x0Y7u7+XHff1N27ST6R5Lvd/cnVJwNgX/ZxAwxz4Br3G3X3Y0keW2USABZxxw0w\njHADDCPcAMMIN8Awwg0wjHADDCPcAMMIN8Awwg0wjHADDCPcAMMIN8Awwg0wjHADDCPcAMMIN8Aw\nwg0wjHADDCPcAMMIN8Awwg0wjHADDCPcAMMIN8Awwg0wjHADDCPcAMMIN8Awwg0wjHADDCPcAMMI\nN8Awwg0wzIHhrqp3VdVTVfVsVT1fVZ8/jMEA2N+RBcf8T5LbuvuVqro2yRNV9W/d/f2VZwNgHweG\nu7s7ySubt9dufnrNoQC4sEVr3FV1TVU9k+Rskke6+8l1xwLgQhaFu7tf6+4PJ7kpya1V9aE3H1NV\nx6vqZFWdPHfu3KWeE4CNi9pV0t2/SvJYktv3+bMT3b3X3Xs7OzuXaDwA3mzJrpKdqnrv5vW7k3wk\nyU/WHgyA/S3ZVXJjkn+qqmtyPvTf6O6H1h0LgAtZsqvkR0luOYRZAFjAk5MAwwg3wDDCDTCMcAMM\nI9wAwwg3wDDCDTCMcAMMI9wAwwg3wDDCDTCMcAMMI9wAwwg3wDDCDTCMcAMMI9wAwwg3wDDCDTCM\ncAMMI9wAwwg3wDDCDTCMcAMMI9wAwwg3wDDCDTCMcAMMI9wAwwg3wDDCDTCMcAMMc2C4q+rmqnq0\nqk5X1fNVdfdhDAbA/o4sOObVJH/f3U9X1fVJTlXVI93945VnA2AfB95xd/cvuvvpzeuXk5xO8v61\nBwNgfxe1xl1Vu0luSfLkGsMAcLDF4a6q9yR5IMlnuvvX+/z58ao6WVUnz507dylnBOANFoW7qq7N\n+Wh/rbu/ud8x3X2iu/e6e29nZ+dSzgjAGyzZVVJJvpLkdHd/cf2RAHgrS+64jyX5VJLbquqZzc/H\nVp4LgAs4cDtgdz+RpA5hFgAW8OQkwDDCDTCMcAMMI9wAwwg3wDDCDTCMcAMMI9wAwwg3wDDCDTCM\ncAMMI9wAwwg3wDDCDTCMcAMMI9wAwwg3wDDCDTCMcAMMI9wAwwg3wDDCDTCMcAMMI9wAwwg3wDDC\nDTCMcAMMI9wAwwg3wDDCDTCMcAMMI9wAwwg3wDAHhruq7quqs1X13GEMBMBbW3LH/Y9Jbl95DgAW\nOjDc3f14kl8ewiwALHDkUn1QVR1PcjxJjh49eqk+FuCi7d7z8FbOe+beOw7lPJfsy8nuPtHde929\nt7Ozc6k+FoA3sasEYBjhBhhmyXbAryf5XpIPVNULVfXp9ccC4EIO/HKyu+86jEEAWMZSCcAwwg0w\njHADDCPcAMMIN8Awwg0wjHADDCPcAMMIN8Awwg0wjHADDCPcAMMIN8Awwg0wjHADDCPcAMMIN8Aw\nwg0wjHADDCPcAMMIN8Awwg0wjHADDCPcAMMIN8Awwg0wjHADDCPcAMMIN8Awwg0wjHADDCPcAMMs\nCndV3V5VP62qn1XVPWsPBcCFHRjuqromyT8k+cskH0xyV1V9cO3BANjfkjvuW5P8rLt/3t3/m+T+\nJH+17lgAXMiScL8/yX+/4f0Lm98DYAuOLDim9vm9/q2Dqo4nOb55+0pV/fRtznRDkpfe5t992+oL\nh33G/8c1X/mututNrsJrri+8o2v+g6UHLgn3C0lufsP7m5K8+OaDuvtEkhNLT3whVXWyu/fe6edM\n4pqvfFfb9SaueU1Llkp+kOSPquoPq+p3k3wiyb+uOxYAF3LgHXd3v1pVf5vk35Nck+S+7n5+9ckA\n2NeSpZJ097eSfGvlWV73jpdbBnLNV76r7XoT17ya6v6t7xkBuIx55B1gmMsm3FfjY/VVdV9Vna2q\n57Y9y2Goqpur6tGqOl1Vz1fV3dueaW1V9a6qeqqqnt1c8+e3PdNhqaprquqHVfXQtmc5DFV1pqr+\no6qeqaqTq57rclgq2TxW/59J/iLntx/+IMld3f3jrQ62sqr60ySvJPnn7v7QtudZW1XdmOTG7n66\nqq5PcirJX1/J/52rqpJc192vVNW1SZ5Icnd3f3/Lo62uqv4uyV6S3+vuO7c9z9qq6kySve5efe/6\n5XLHfVU+Vt/djyf55bbnOCzd/Yvufnrz+uUkp3OFP4Xb572yeXvt5mf7d0srq6qbktyR5MvbnuVK\ndLmE22P1V5mq2k1yS5IntzvJ+jZLBs8kOZvkke6+4q85yZeSfDbJb7Y9yCHqJN+pqlObJ8lXc7mE\ne9Fj9VwZquo9SR5I8pnu/vW251lbd7/W3R/O+aeOb62qK3pZrKruTHK2u09te5ZDdqy7/yTn/yXV\nv9ksha7icgn3osfqmW+zzvtAkq919ze3Pc9h6u5fJXksye1bHmVtx5J8fLPme3+S26rqq9sdaX3d\n/eLm17NJHsz5JeBVXC7h9lj9VWDzRd1Xkpzu7i9ue57DUFU7VfXezet3J/lIkp9sd6p1dffnuvum\n7t7N+f+Xv9vdn9zyWKuqqus2X7inqq5L8tEkq+0WuyzC3d2vJnn9sfrTSb5xNTxWX1VfT/K9JB+o\nqheq6tPbnmllx5J8KufvwJ7Z/Hxs20Ot7MYkj1bVj3L+BuWR7r4qtsddZd6X5ImqejbJU0ke7u5v\nr3Wyy2I7IADLXRZ33AAsJ9wAwwg3wDDCDTCMcAMMI9wAwwg3wDDCDTDM/wFB6DqrxkwCzwAAAABJ\nRU5ErkJggg==\n",
"text/plain": [
- "<matplotlib.figure.Figure at 0x106cd7b38>"
+ "<matplotlib.figure.Figure at 0x110782ba8>"
]
},
"metadata": {},
@@ -350,16 +344,14 @@
{
"cell_type": "code",
"execution_count": 8,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [
{
"data": {
"text/plain": [
- "(array([ 437., 303., 144., 75., 28., 9., 4.]),\n",
- " array([0, 1, 2, 3, 4, 5, 6, 7]),\n",
- " <a list of 7 Patch objects>)"
+ "(array([ 429., 306., 160., 63., 26., 16.]),\n",
+ " array([0, 1, 2, 3, 4, 5, 6]),\n",
+ " <a list of 6 Patch objects>)"
]
},
"execution_count": 8,
@@ -368,9 +360,9 @@
},
{
"data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXcAAAEACAYAAABI5zaHAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAFBtJREFUeJzt3X+s3fV93/Hny/GwoCkIJcOOMOGHSJlpVJlUM5rYpBs1\ncQ3RMMofzCUqSRFbJMqCmimqjTTZVJUoy0I0afMfCxkzETFxkBAmyghYzkFKpQINeHFih1rrTIyX\ne9MoWSJEldrlvT/O193BXPuec38dzsfPh/SVP/dzPt/7fR9f+3U+93O+3/NNVSFJasuKcRcgSVp8\nhrskNchwl6QGGe6S1CDDXZIaZLhLUoOGDvckK5K8nGRv9/X2JK8leanbNg2M3ZbkSJLDSTYuReGS\npDNbOcLYe4AfABcO9D1YVQ8ODkqyDrgVWAesBfYl+UB5Qr0kLZuhZu5J1gI3AQ+d/tAswzcDj1XV\nyao6ChwBNiykSEnSaIZdlvki8Dng9Nn33UkOJHkoyUVd36XAsYExx7s+SdIymTPck3wMmKmqA7x1\npr4TuKqq1gPTwBeWpkRJ0qiGWXO/Abg5yU3A+cCvJ3mkqm4fGPMl4KmufRy4bOCxtV3fWyRxDV6S\n5qGqZlsSf4s5Z+5VdW9Vvb+qrgK2APur6vYkawaGfRz4ftfeC2xJcl6SK4GrgRfO8L0ndtu+ffvY\na7D+8ddxLtY/ybW3UP+wRjlb5nT/Icl64E3gKPDpLrAPJdkDHAJOAHfVKBVJkhZspHCvqueA57r2\n7WcZdz9w/8JKkyTNl1eoztPU1NS4S1gQ6x+vSa5/kmuHya9/WBnXikmS+vznPz+WYy+GTZs28cEP\nfnDcZUg6xyShhnhDdazhvnLlvxvLsReq6jCbNl3IN76xe9ylSDrHDBvuC3lDdcFOnvyP4zz8Auym\nau+4i5CkM3LNXZIaZLhLUoMMd0lqkOEuSQ0y3CWpQYa7JDXIcJekBhnuktQgw12SGmS4S1KDDHdJ\napDhLkkNGjrck6xI8lKSvd3XFyd5JskrSb6V5KKBsduSHElyOMnGpShcknRmo8zc76F/67xTtgL7\nquoaYD+wDSDJtcCtwDrgRmBnkjk/nlKStHiGCvcka4GbgIcGujcDu7r2LuCWrn0z8FhVnayqo8AR\nYMOiVCtJGsqwM/cvAp8DBu/ssbqqZgCqahq4pOu/FDg2MO541ydJWiZz3qwjyceAmao6kGTqLEPn\ncUunHQPtqW6TJJ3S6/Xo9Xoj7zfMnZhuAG5OchNwPvDrSb4CTCdZXVUzSdYAP+nGHwcuG9h/bdc3\nix0jFyxJ55Kpqam33NT7vvvuG2q/OZdlqureqnp/VV0FbAH2V9XvA08Bn+qGfRJ4smvvBbYkOS/J\nlcDVwAvDPQ1J0mJYyD1U/wzYk+QO4FX6Z8hQVYeS7KF/Zs0J4K4a1124JekcNVK4V9VzwHNd+2fA\nR84w7n7g/gVXJ0maF69QlaQGGe6S1CDDXZIaZLhLUoMMd0lqkOEuSQ0y3CWpQYa7JDXIcJekBhnu\nktQgw12SGmS4S1KDDHdJapDhLkkNMtwlqUGGuyQ1aM5wT7IqyfNJXk5yMMn2rn97kteSvNRtmwb2\n2ZbkSJLDSTYu5ROQJL3dnHdiqqpfJflwVb2R5F3Anyf5H93DD1bVg4Pjk6yjf8u9dfRvjr0vyQe8\n1Z4kLZ+hlmWq6o2uuYr+C8KpoM4swzcDj1XVyao6ChwBNiywTknSCIYK9yQrkrwMTAPPVtWL3UN3\nJzmQ5KEkF3V9lwLHBnY/3vVJkpbJUDfIrqo3geuSXAg8keRaYCfwJ1VVSf4U+AJw52iH3zHQnuo2\nSdIpvV6PXq838n5DhfspVfXLJD1g02lr7V8Cnurax4HLBh5b2/XNYscoh5ekc87U1BRTU1P/8PV9\n99031H7DnC3z3lNLLknOBz4K/DDJmoFhHwe+37X3AluSnJfkSuBq4IWhqpEkLYphZu7vA3YlWUH/\nxeBrVfXNJI8kWQ+8CRwFPg1QVYeS7AEOASeAuzxTRpKW1zCnQh4EPjRL/+1n2ed+4P6FlSZJmi+v\nUJWkBhnuktQgw12SGmS4S1KDDHdJapDhLkkNMtwlqUGGuyQ1yHCXpAYZ7pLUIMNdkhpkuEtSgwx3\nSWqQ4S5JDTLcJalBhrskNWiY2+ytSvJ8kpeTHEyyveu/OMkzSV5J8q1Tt+LrHtuW5EiSw0k2LuUT\nkCS93ZzhXlW/Aj5cVdcB64Ebk2wAtgL7quoaYD+wDSDJtcCtwDrgRmBnkixR/ZKkWQy1LFNVb3TN\nVfRvzVfAZmBX178LuKVr3ww8VlUnq+oocATYsFgFS5LmNlS4J1mR5GVgGni2ql4EVlfVDEBVTQOX\ndMMvBY4N7H6862vKvn1Pk2RitzVrrhj3X6GkJTTnDbIBqupN4LokFwJPJPlN+rP3twwb/fA7BtpT\n3TYZ/u7v/i/zesrvEDMzrpRJk6DX69Hr9UbeL1WjBVSSfw+8AdwJTFXVTJI1wLeral2SrUBV1QPd\n+KeB7VX1/GnfpyY3HHcDtzG59QOEUX/2ksYvCVU15+xsmLNl3nvqTJgk5wMfBQ4De4FPdcM+CTzZ\ntfcCW5Kcl+RK4GrghZGfgSRp3oZZlnkfsCvJCvovBl+rqm8m+QtgT5I7gFfpnyFDVR1Ksgc4BJwA\n7iqniJK0rEZellm0A7ssM2Yuy0iTaNGWZSRJk8dwl6QGGe6S1CDDXZIaZLhLUoMMd0lqkOEuSQ0y\n3CWpQYa7JDXIcJekBhnuktQgw12SGmS4S1KDDHdJapDhLkkNGuZOTGuT7E/ygyQHk/zbrn97kteS\nvNRtmwb22ZbkSJLDSTYu5ROQJL3dMHdiOgl8tqoOJHk38N0kz3aPPVhVDw4OTrKO/l2Z1gFrgX1J\nPuDdmCRp+cw5c6+q6ao60LVfp3//1Eu7h2e7G8hm4LGqOllVR4EjwIbFKVeSNIyR1tyTXAGsB57v\nuu5OciDJQ6duok0/+I8N7Hac//9iIElaBkOHe7ck8zhwTzeD3wlcVVXrgWngC0tToiRpVMOsuZNk\nJf1g/0pVPQlQVX8zMORLwFNd+zhw2cBja7u+WewYaE91myTplF6vR6/XG3m/DPM+Z5JHgJ9W1WcH\n+tZU1XTX/iPgn1bVbUmuBR4Frqe/HPMs8LY3VJMUTOp7rLuB25jc+gGC73FLkycJVTXb+51vMefM\nPckNwCeAg0lepp9o9wK3JVkPvAkcBT4NUFWHkuwBDgEngLs8U0aSltdQM/clObAz9zFz5i5NomFn\n7l6hKkkNMtwlqUGGuyQ1yHCXpAYZ7pLUIMNdkhpkuEtSgwx3SWqQ4S5JDTLcJalBhrskNchwl6QG\nGe6S1CDDXZIaZLhLUoMMd0lq0JzhnmRtkv1JfpDkYJLPdP0XJ3kmyStJvpXkooF9tiU5kuRwko1L\n+QQkSW83zMz9JPDZqvpN4J8Bf5jknwBbgX1VdQ2wH9gG0N1D9VZgHXAjsDPJnHcNkSQtnjnDvaqm\nq+pA134dOAysBTYDu7phu4BbuvbNwGNVdbKqjgJHgA2LXLck6SxGWnNPcgWwHvgLYHVVzUD/BQC4\npBt2KXBsYLfjXZ8kaZkMHe5J3g08DtzTzeBPv7uyd1uWpHeIlcMMSrKSfrB/paqe7Lpnkqyuqpkk\na4CfdP3HgcsGdl/b9c1ix0B7qtskSaf0ej16vd7I+6Vq7gl3kkeAn1bVZwf6HgB+VlUPJPlj4OKq\n2tq9ofoocD395ZhngQ/UaQdKUpM72d8N3Mbk1g8QhvnZS3pnSUJVzXmSypwz9yQ3AJ8ADiZ5mX6i\n3Qs8AOxJcgfwKv0zZKiqQ0n2AIeAE8Bdpwe7JGlpDTVzX5IDO3MfM2fu0iQadubuFaqS1CDDXZIa\nZLhLUoMMd0lqkOEuSQ0y3CWpQYa7JDXIcJekBhnuktQgw12SGmS4S1KDDHdJapDhLkkNMtwlqUGG\nuyQ1yHCXpAbNGe5JvpxkJsn3Bvq2J3ktyUvdtmngsW1JjiQ5nGTjUhUuSTqzYWbuDwO/O0v/g1X1\noW57GiDJOvq321sH3AjsTDLnHUMkSYtrznCvqu8AP5/lodlCezPwWFWdrKqjwBFgw4IqlCSNbCFr\n7ncnOZDkoSQXdX2XAscGxhzv+iRJy2jlPPfbCfxJVVWSPwW+ANw5+rfZMdCe6jZJ0im9Xo9erzfy\nfqmquQcllwNPVdVvne2xJFuBqqoHuseeBrZX1fOz7Fcw97HfmXYDtzG59QOEYX72kt5ZklBVc76X\nOeyyTBhYY0+yZuCxjwPf79p7gS1JzktyJXA18MKQx5AkLZI5l2WSfJX+esl7kvwI2A58OMl64E3g\nKPBpgKo6lGQPcAg4AdxVTg/foVYxyScyrV59OdPTR8ddhvSONdSyzJIc2GWZMQuTXr/zBp2LFntZ\nRpI0QQx3SWqQ4S5JDTLcJalBhrskNchwl6QGGe6S1CDDXZIaZLhLUoMMd0lqkOEuSQ0y3CWpQYa7\nJDXIcJekBhnuktQgw12SGjRnuCf5cpKZJN8b6Ls4yTNJXknyrSQXDTy2LcmRJIeTbFyqwiVJZzbM\nzP1h4HdP69sK7Kuqa4D9wDaAJNcCtwLrgBuBnZnke7lJ0oSaM9yr6jvAz0/r3gzs6tq7gFu69s3A\nY1V1sqqOAkeADYtTqiRpWPNdc7+kqmYAqmoauKTrvxQ4NjDueNcnSVpGKxfp+8zzTsU7BtpT3SZJ\nOqXX69Hr9Ubeb77hPpNkdVXNJFkD/KTrPw5cNjBubdd3BjvmeXhJOjdMTU0xNTX1D1/fd999Q+03\n7LJMuu2UvcCnuvYngScH+rckOS/JlcDVwAtDHkOStEjmnLkn+Sr99ZL3JPkRsB34M+DrSe4AXqV/\nhgxVdSjJHuAQcAK4q6rmuWQjSZqvjCt7k9S8l+rHbjdwG5NbP/R/EZvs+p036FyUhKqa8xRzr1CV\npAYZ7pLUIMNdkhpkuEtSgwx3SWqQ4S5JDTLcJalBhrskNWixPjhMWmarmORbBaxefTnT00fHXYYa\nZrhrQv2KSb7CdmZmcl+YNBlclpGkBhnuktQgw12SGmS4S1KDDHdJapDhLkkNWtCpkEmOAr8A3gRO\nVNWGJBcDXwMuB44Ct1bVLxZYpyRpBAudub8JTFXVdVW1oevbCuyrqmuA/cC2BR5DkjSihYZ7Zvke\nm4FdXXsXcMsCjyFJGtFCw72AZ5O8mOTOrm91Vc0AVNU0cMkCjyFJGtFCP37ghqr6cZJ/DDyT5BXe\nfk34Wa4R3zHQnuo2SdIpvV6PXq838n5ZrDvIJ9kOvA7cSX8dfibJGuDbVbVulvE1uZ8Nshu4jcmt\nH/oratY/PmGx/u/p3JKEqprzw4nmvSyT5IIk7+7avwZsBA4Ce4FPdcM+CTw532NIkuZnIcsyq4En\n+jNwVgKPVtUzSf4S2JPkDuBV4NZFqFOSNIJ5h3tV/W9g/Sz9PwM+spCiJEkL4xWqktQgw12SGmS4\nS1KDDHdJapDhLkkNMtwlqUEL/fgBSfOyimTOiwzfkVavvpzp6aPjLkNzMNylsfgVk/rxCTMzk/mi\ndK5xWUaSGmS4S1KDDHdJapDhLkkNMtwlqUGGuyQ1yHCXpAYtWbgn2ZTkh0n+KskfL9VxJC23/gVY\nk7qtWXPFuP8Cl8WSXMSUZAXwn4HfAf4P8GKSJ6vqh0txPEnLaXIvwIJz5yKspZq5bwCOVNWrVXUC\neAzYvETHkiSdZqk+fuBS4NjA16/RD3xJGrN3Tezn+oxirJ8tc+GF/3Kch5+3EyeO87d/O+4qJM3P\n3zPJy0ow3AvTUoX7ceD9A1+v7fre4pe//MYSHX65TPqrv/WP1yTXP8m1w+TXP7dULf4rWJJ3Aa/Q\nf0P1x8ALwO9V1eFFP5gk6W2WZOZeVX+f5G7gGfpv2n7ZYJek5bMkM3dJ0niN5QrVSb7AKcmXk8wk\n+d64a5mPJGuT7E/ygyQHk3xm3DUNK8mqJM8nebmrffu4a5qPJCuSvJRk77hrGVWSo0n+Z/czeGHc\n9YwqyUVJvp7kcPd/4Ppx1zSsJL/R/b2/1P35i7P9/132mXt3gdNfMXCBE7BlUi5wSvLPgdeBR6rq\nt8Zdz6iSrAHWVNWBJO8GvgtsnqC//wuq6o3ufZ0/Bz5TVRMVMkn+CPht4MKqunnc9YwiyV8Dv11V\nPx93LfOR5L8Dz1XVw0lWAhdU1S/HXNbIuhx9Dbi+qo7NNmYcM/eJvsCpqr4DTOQ/bICqmq6qA137\ndeAw/esSJkJVvdE1V9F/z2ii1hWTrAVuAh4ady3zFCb0M6mSXAj8i6p6GKCqTk5isHc+AvyvMwU7\njOeHNNsFThMTLi1JcgWwHnh+vJUMr1vSeBmYBp6tqhfHXdOIvgh8jgl7URpQwLNJXkzyr8ddzIiu\nBH6a5OFuaeO/Jjl/3EXN078Cdp9twES+AmvhuiWZx4F7uhn8RKiqN6vqOvrXTlyf5Npx1zSsJB8D\nZrrfnMJknmx9Q1V9iP5vH3/YLVNOipXAh4D/0j2HN4Ct4y1pdEn+EXAz8PWzjRtHuA91gZOWTrfW\n+Djwlap6ctz1zEf36/S3gU3jrmUENwA3d+vWu4EPJ3lkzDWNpKp+3P35N8ATTNbHirwGHKuqv+y+\nfpx+2E+aG4Hvdj+DMxpHuL8IXJ3k8iTnAVuASTtrYFJnXaf8N+BQVf2ncRcyiiTvTXJR1z4f+Cgw\nEW8EA1TVvVX1/qq6iv6/+/1Vdfu46xpWkgu63/hI8mvARuD7461qeFU1AxxL8htd1+8Ah8ZY0nz9\nHnMsycAYPltm0i9wSvJVYAp4T5IfAdtPvUEzCZLcAHwCONitXRdwb1U9Pd7KhvI+YFd3psAK4GtV\n9c0x13QuWQ08kaToZ8ejVfXMmGsa1WeAR7uljb8G/mDM9YwkyQX030z9N3OO9SImSWqPb6hKUoMM\nd0lqkOEuSQ0y3CWpQYa7JDXIcJekBhnuktQgw12SGvT/AD61nhW9UkwLAAAAAElFTkSuQmCC\n",
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXoAAAD8CAYAAAB5Pm/hAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAADTdJREFUeJzt3V+IpXd9x/H3xyT+qVZXzShhd+lYDEXphQlLSAmIJLao\nEZMLQyOtBgnsTVoiKdjVGxF6EW9UhJISsrZra40hKgkm/RPyB+uF0VmNf+Jqsw1bMyR1R2KiqViJ\nfnsxv4VhMzpnZs7ZM/P1/YJlnud3np3zfQh577PPnHM2VYUkqa/nzXsASdJsGXpJas7QS1Jzhl6S\nmjP0ktScoZek5gy9JDVn6CWpOUMvSc2dPe8BAM4999xaXFyc9xiStKscPXr0R1W1sNFxOyL0i4uL\nLC0tzXsMSdpVkvz3JMd560aSmjP0ktScoZek5gy9JDVn6CWpOUMvSc0ZeklqztBLUnOGXpKa2xHv\njN2OxUN3zXuEqTlx4+XzHkFSQ17RS1Jzhl6SmjP0ktScoZek5gy9JDVn6CWpOUMvSc0ZeklqztBL\nUnMThz7JWUm+keSLY/81SR5M8kiSzyZ5/lh/wdg/Ph5fnM3okqRJbOaK/nrg2Jr9jwAfq6rzgR8D\n1471a4EfV9VrgY+N4yRJczJR6JPsAy4Hbhn7AS4Fbh+HHAGuHNtXjH3G45eN4yVJczDpFf3HgfcD\nvxr7rwSeqqpnx/4ysHds7wUeAxiPPz2OlyTNwYahT/J24GRVHV27vM6hNcFja7/vwSRLSZZWVlYm\nGlaStHmTXNFfArwjyQngVlZv2Xwc2JPk1Mcc7wMeH9vLwH6A8fjLgCdP/6ZVdXNVHaiqAwsLC9s6\nCUnSr7dh6KvqA1W1r6oWgauB+6rqz4D7gXeOw64B7hjbd459xuP3VdVzruglSWfGdl5H/9fADUmO\ns3oP/vBYPwy8cqzfABza3oiSpO3Y1L8wVVUPAA+M7UeBi9Y55ufAVVOYTZI0Bb4zVpKaM/SS1Jyh\nl6TmDL0kNWfoJak5Qy9JzRl6SWrO0EtSc4Zekpoz9JLUnKGXpOYMvSQ1Z+glqTlDL0nNGXpJas7Q\nS1Jzhl6SmjP0ktScoZek5gy9JDVn6CWpOUMvSc0ZeklqztBLUnOGXpKaM/SS1Jyhl6TmDL0kNWfo\nJak5Qy9JzRl6SWrO0EtSc4Zekpoz9JLUnKGXpOYMvSQ1Z+glqTlDL0nNGXpJam7D0Cd5YZKvJvlm\nkoeTfHisvybJg0keSfLZJM8f6y8Y+8fH44uzPQVJ0m9y9gTH/B9waVU9k+Qc4MtJ/gW4AfhYVd2a\n5O+Aa4GbxtcfV9Vrk1wNfAT40xnN38riobvmPcLUnLjx8nmPIGnY8Iq+Vj0zds8Zvwq4FLh9rB8B\nrhzbV4x9xuOXJcnUJpYkbcpE9+iTnJXkIeAkcA/wX8BTVfXsOGQZ2Du29wKPAYzHnwZeOc2hJUmT\nmyj0VfXLqnoDsA+4CHjdeoeNr+tdvdfpC0kOJllKsrSysjLpvJKkTdrUq26q6ingAeBiYE+SU/f4\n9wGPj+1lYD/AePxlwJPrfK+bq+pAVR1YWFjY2vSSpA1N8qqbhSR7xvaLgDcDx4D7gXeOw64B7hjb\nd459xuP3VdVzruglSWfGJK+6OQ84kuQsVv9guK2qvpjku8CtSf4G+AZweBx/GPjHJMdZvZK/egZz\nS5ImtGHoq+pbwAXrrD/K6v3609d/Dlw1lekkSdvmO2MlqTlDL0nNGXpJas7QS1Jzhl6SmjP0ktSc\noZek5gy9JDVn6CWpOUMvSc0ZeklqztBLUnOGXpKaM/SS1Jyhl6TmDL0kNWfoJak5Qy9JzRl6SWrO\n0EtSc4Zekpoz9JLUnKGXpOYMvSQ1Z+glqTlDL0nNGXpJas7QS1Jzhl6SmjP0ktScoZek5gy9JDVn\n6CWpOUMvSc0ZeklqztBLUnOGXpKaM/SS1Jyhl6TmDL0kNbdh6JPsT3J/kmNJHk5y/Vh/RZJ7kjwy\nvr58rCfJJ5IcT/KtJBfO+iQkSb/eJFf0zwJ/VVWvAy4GrkvyeuAQcG9VnQ/cO/YB3gqcP34dBG6a\n+tSSpIltGPqqeqKqvj62fwocA/YCVwBHxmFHgCvH9hXAp2rVV4A9Sc6b+uSSpIls6h59kkXgAuBB\n4NVV9QSs/mEAvGocthd4bM1vWx5rp3+vg0mWkiytrKxsfnJJ0kQmDn2SlwCfA95XVT/5TYeus1bP\nWai6uaoOVNWBhYWFSceQJG3SRKFPcg6rkf90VX1+LP/w1C2Z8fXkWF8G9q/57fuAx6czriRpsyZ5\n1U2Aw8CxqvromofuBK4Z29cAd6xZf8949c3FwNOnbvFIks68syc45hLg3cC3kzw01j4I3AjcluRa\n4AfAVeOxu4G3AceBnwHvnerEkqRN2TD0VfVl1r/vDnDZOscXcN0255IkTYnvjJWk5gy9JDVn6CWp\nOUMvSc0ZeklqztBLUnOGXpKaM/SS1Jyhl6TmDL0kNWfoJak5Qy9JzRl6SWrO0EtSc5N8Hr20aYuH\n7pr3CFNx4sbL5z2CtG1e0UtSc4Zekpoz9JLUnKGXpOYMvSQ1Z+glqTlDL0nNGXpJas7QS1Jzhl6S\nmjP0ktScoZek5gy9JDVn6CWpOUMvSc0ZeklqztBLUnOGXpKaM/SS1Jyhl6TmDL0kNWfoJak5Qy9J\nzRl6SWpuw9An+WSSk0m+s2btFUnuSfLI+PrysZ4kn0hyPMm3klw4y+ElSRub5Ir+H4C3nLZ2CLi3\nqs4H7h37AG8Fzh+/DgI3TWdMSdJWbRj6qvoS8ORpy1cAR8b2EeDKNeufqlVfAfYkOW9aw0qSNm+r\n9+hfXVVPAIyvrxrre4HH1hy3PNYkSXMy7R/GZp21WvfA5GCSpSRLKysrUx5DknTKVkP/w1O3ZMbX\nk2N9Gdi/5rh9wOPrfYOqurmqDlTVgYWFhS2OIUnayFZDfydwzdi+Brhjzfp7xqtvLgaePnWLR5I0\nH2dvdECSzwBvAs5Nsgx8CLgRuC3JtcAPgKvG4XcDbwOOAz8D3juDmSVJm7Bh6KvqXb/mocvWObaA\n67Y7lCRpenxnrCQ1Z+glqTlDL0nNGXpJas7QS1Jzhl6SmjP0ktScoZek5gy9JDVn6CWpOUMvSc0Z\neklqbsMPNZN+my0eumveI0zNiRsvn/cImhOv6CWpOUMvSc0ZeklqztBLUnOGXpKaM/SS1Jyhl6Tm\nDL0kNWfoJak5Qy9JzRl6SWrO0EtSc4Zekpoz9JLUnKGXpOYMvSQ1Z+glqTn/hSnpt4T/WtZvL6/o\nJak5Qy9JzRl6SWrO0EtSc/4wVtKu4w+WN8creklqztBLUnOGXpKam0nok7wlyfeTHE9yaBbPIUma\nzNRDn+Qs4G+BtwKvB96V5PXTfh5J0mRmcUV/EXC8qh6tql8AtwJXzOB5JEkTmEXo9wKPrdlfHmuS\npDmYxevos85aPeeg5CBwcOw+k+T7W3y+c4EfbfH37jSey87T5TzAc9mR8pFtncvvTXLQLEK/DOxf\ns78PePz0g6rqZuDm7T5ZkqWqOrDd77MTeC47T5fzAM9lpzoT5zKLWzdfA85P8pokzweuBu6cwfNI\nkiYw9Sv6qno2yV8A/wacBXyyqh6e9vNIkiYzk8+6qaq7gbtn8b3Xse3bPzuI57LzdDkP8Fx2qpmf\nS6qe83NSSVIjfgSCJDW3q0Pf5aMWknwyyckk35n3LNuRZH+S+5McS/JwkuvnPdNWJXlhkq8m+eY4\nlw/Pe6btSnJWkm8k+eK8Z9mOJCeSfDvJQ0mW5j3PViXZk+T2JN8b/8/80cyea7feuhkftfCfwB+z\n+pLOrwHvqqrvznWwLUjyRuAZ4FNV9YfznmerkpwHnFdVX0/yu8BR4Mpd+t8kwIur6pkk5wBfBq6v\nqq/MebQtS3IDcAB4aVW9fd7zbFWSE8CBqtrVr6NPcgT4j6q6ZbxC8Xeq6qlZPNduvqJv81ELVfUl\n4Ml5z7FdVfVEVX19bP8UOMYufVd0rXpm7J4zfu3OqyIgyT7gcuCWec8iSPJS4I3AYYCq+sWsIg+7\nO/R+1MIOlmQRuAB4cL6TbN241fEQcBK4p6p27bkAHwfeD/xq3oNMQQH/nuToeIf9bvT7wArw9+N2\n2i1JXjyrJ9vNoZ/ooxZ05iV5CfA54H1V9ZN5z7NVVfXLqnoDq+/uvijJrrytluTtwMmqOjrvWabk\nkqq6kNVPyL1u3Prcbc4GLgRuqqoLgP8FZvZzxt0c+ok+akFn1rif/Tng01X1+XnPMw3jr9QPAG+Z\n8yhbdQnwjnFv+1bg0iT/NN+Rtq6qHh9fTwJfYPU27m6zDCyv+Vvi7ayGfyZ2c+j9qIUdZvwA8zBw\nrKo+Ou95tiPJQpI9Y/tFwJuB7813qq2pqg9U1b6qWmT1/5P7qurP5zzWliR58fhBP+NWx58Au+7V\nalX1P8BjSf5gLF0GzOxFCzN5Z+yZ0OmjFpJ8BngTcG6SZeBDVXV4vlNtySXAu4Fvj3vbAB8c75Te\nbc4DjoxXdz0PuK2qdvXLEpt4NfCF1WsKzgb+uar+db4jbdlfAp8eF6qPAu+d1RPt2pdXSpIms5tv\n3UiSJmDoJak5Qy9JzRl6SWrO0EtSc4Zekpoz9JLUnKGXpOb+H4IrJt/FCM6vAAAAAElFTkSuQmCC\n",
"text/plain": [
- "<matplotlib.figure.Figure at 0x10cee86d8>"
+ "<matplotlib.figure.Figure at 0x11901ae80>"
]
},
"metadata": {},
@@ -602,14 +594,12 @@
{
"cell_type": "code",
"execution_count": 13,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [
{
"data": {
"text/plain": [
- "<matplotlib.colorbar.Colorbar at 0x10fb446a0>"
+ "<matplotlib.colorbar.Colorbar at 0x119326b00>"
]
},
"execution_count": 13,
@@ -618,9 +608,9 @@
},
{
"data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAScAAAEACAYAAADx87DPAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAFQ5JREFUeJzt3X+QXWV9x/H3Jz9AEAlSHCyEsIKiIEKaKSGK1gW0/LBF\nxv4KOFqxdZxOFKqt1WHskMwwlTowFmosRpGCg0IbHUmnqOjAQrECATYQCCkohh8JpBP5YSH8yG6+\n/eOcXW52995z9t577nnu5vOaOZN77j73ud+5s/nu8zz3OeeriMDMLDWz6g7AzGwqTk5mliQnJzNL\nkpOTmSXJycnMkuTkZGZJcnIys45I2lPSHZKGJa2XdEGTdpdJeljSOkkLi/qd0/1QzWx3EhEvSzox\nIrZLmg38TNIPI+LOsTaSTgMOj4i3SDoeuBxY0qpfj5zMrGMRsT1/uCfZoGfi7u4PAlfnbe8A5kk6\nsFWfTk5m1jFJsyQNA08BP4mItROaHAw83nC+OX+uKScnM+tYROyMiN8B5gPHSzqq0z67tuYkyRfp\nmdUkItTJ6/eT4rnyzbdGxBubxPEbSTcDpwIbGn60GTik4Xx+/lxTXV4Qn3KRfgpDwGB337od9y8v\n127lclhWsi0Qr+/o96QlPVPyb8A0Y+boabSdhti8onTb5ZfA8r+pJIxpqewzhoo+5/KfcTPPAReW\nbPtF2GWtSNIBwI6IeE7SXsD7gYsmvGwNsAy4TtIS4NmI2NrqffxtnZkBMLf9l/42cJWkWWRLRddF\nxA2SPglERKzKz0+X9AvgBeCcok6dnMwMaD8ZRMR6YNEUz399wvmnehFPhwbqedt2HTdYdwTT14cx\nD76z7gimqQ8/41b2qjuACZycylg8WHcE09eHMQ++q+4IpqkPP+NWOpjWVaLUVgJJp0raKOkhSZ+v\nOigz6705JY9extNSvsj1VeBkYAuwVtL1EbGx6uDMrHdSGzmVSYSLgYcj4lEASdeSbUV3cjKbQVL7\ndqxMPBO3nT9BlrDMbAbpx5GTme0G+jE5bQYWNJy32HY+1PB4gL77Vs6sL2zKj+7qx60Ea4E3SzoU\neBJYCpw1ddPBbsVlZk0NsOsf/lu60mtq06jCeCJiVNKngBvJth5cEREPVh6ZmfVUP07riIgfAW+t\nOBYzq1HfjZzMbPfQlyMnM5v5UksGqcVjZjXxyMnMktTuVgJJ88mKFxwI7AS+ERGXTWjzt8CHyQof\nzAWOBA6IiGeb9evkZGZARyOnEeCzEbFO0j7A3ZJubLz+NiIuBi4GkPQHwF+3Skzg5GRmuQ5uNvcU\nWdUVIuJ5SQ+SXfbW7Prbs4DvVhXPzFDRfbNV+l7qbbi/qn6XV9KtWhb/6VBFMVf1e5G6uWWzwUjz\nH0kaABYCdzT5+V5kxQ+WFb3N7p2czGzcnCbZ4L9G4badxa/Pp3SrgfMi4vkmzf4QuK1oSgdOTmaW\nmzt76udPmg0nNZxf9MLkNpLmkCWmb0fE9S3eZiklpnTg5GRmuWYjp5K+BWyIiEubNZA0D3gv2bd2\nxfF0FI6ZzRhz92zvdZJOIEs46/OS5AGcDxxKXhoqb3om8OOIeLFMv05OZpZpMxtExM+AJpPCXdpd\nBVxVcThmNuMklg0SC8fMapNYNkgsHDOrTeHErLecnMwsk1g2SCwcM6tNm9/WVcXJycwyiWWDxMIx\ns9oklg0SC8fMauMFcTNLUmLZILFwzKw2iWWDxMIxs9oklg0SC8fMauOtBGaWpMSyway6AzCzRMwu\neUwgab6kmyQ9IGm9pHObvYWk4yTtkPShonASy5VmVpv2s0Fh9RUASbOAi4Afl+nUIyczy8wpeUwQ\nEU9FxLr88fPAWPWViT5Ndivf/y0bjplZVzZhNqu+Iukg4MyIOFHS4jJ9pZ+cqir/A/1ZAqgfY7ZX\nVfH7fPSK7vTTJBsMbYGhJ4tfXlB95Z+Azzc2bzMcM9vtvGbqpwcPy44xK4YntylRfeV3gWslCTgA\nOE3SjohY0ywcJyczy3Q2rWtZfSUixtObpCuB/2iVmMDJyczGtJkNplF9ZUxUGI6ZzTgVV19paP/x\nCsMxsxnHt0wxsyQllg0KN2FOZ2u6mfWxNjdhVhlOkVJb082sz/XbXQki4ingqfzx85LGtqY7OZnN\nJIlN66YVTrOt6WY2A/RrcirYmp4bang8kB9m1lV3DsHaoe7324/f1pXYmp4b7EpQZtbC4sHsGPMv\n1V5bV5ey4bTcmm5mM0BiyanMVoKxreknSRqWdI+kU6sPzcx6qs07YValzLd109qabmZ9qsldCeqS\n2EDOzGqTWDZILBwzq01i8yPfQ9zMMm1evlLmEjdJb5X035JekvTZsuGYmVVdfeXXZAUOzizbqUdO\nZpZp89u6MtVXImJbRNxNlshK8cjJzDJd+Laum5e4pZ+cXG2kv7l6zq5SjrnDBfFyl7iVl35yMrPe\naFYa6m4Yuqf1S8tf4tZxOGa222mSDQaPz44xK66Ystl0LnErrFnXIhwz2+1UWH1F0oHAXcDrgJ2S\nzgOOajX9c3Iys0yba05lLnGLiK3AIdPp18nJzDKJZYPEwjGz2vTbPcTNbDeRWDZILBwzq01i2SCx\ncMysNollg8TCMbO6RGK3THFyMjMARhPLBomFY2Z1cXIysyS9vOceJVu+UmkcY5yczAyA0dlpLTo5\nOZkZAKOJ3UTcycnMABhxcjKzFI0mlg58D3EzA7JpXZljKpKukLRV0n1Nfr6vpDWS1uUVWj5WFI+T\nk5kBnSUn4ErglBbdLwMeiIiFwInAJfndM5tKaxxnZrV5mbJbCSaLiNskHdqqCdmN5sj//XVEtKzE\n4uRkZkDla05fBdZI2gLsA/xZ0QucnMwMqHwrwSnAcEScJOlw4CeSjundbXqrKANUYSmd2Lyikn51\n8AWV9Ful6j6LSroFKoz5maikXyDp0lDNktNdQy9w19D2Trs/B/gSQET8UtKvgLeR3Vd8Sh45mRnQ\nfJ/TwsF9WTi47/j5qhXbmnUhmldWeRR4H/CzvNjBEcAjreJxcjIzoLM1J0nfAQaB35L0GHABsAd5\n9RXgQuBfG7Ya/F1EPN2qTycnMwM6W3OKiLMLfv4krbcaTOLkZGYAvNLBVoIqODmZGeBr68wsUald\nW1c6GkmzyL72eyIizqguJDOrQz/fMuU8YAOwb1FDM+s/qSWnUhf+SpoPnA58s9pwzKwuI8wudfRK\n2ZHTV4DPAfMqjMXMavRKYvXIC5OTpA8AWyNinaRBmu8AhZXLX3183CAsHuwwPDObbFN+dFdq07oy\nI6cTgDMknQ7sBbxO0tUR8dFJLZct7250ZjaFgfwYc0tXek1tK0HhmlNEnB8RCyLiMGApcNOUicnM\n+tooc0odvZLWxgYzq00/TuvGRcQtdGsMaWZJ6evkZGYzl5OTmSXp5cS2Erj6ipkBlZeGeq+kZyXd\nkx9fLIrHIyczAzqe1l0J/DNwdYs2t07nulwnJzMDOtvnVKI0FLTawD0FT+vMDOjJPqd35hV//1PS\nUUWNuzpyitdPKzGWs7n7XY7pxyoplVS4ocIqKRXFC7D84Gqqr8DyivpNW7Np3aahR3l06NFOu78b\nWBAR2yWdBvyArMhBU57WmRnQPDkdMngYhwweNn5+64rbpt13Y326iPihpK9J2r9VkQMnJzMDOitH\nnmtaGkrSgRGxNX+8GJCrr5hZKRWXhvpjSX8F7ABexOXIzaysiktDrQRWTqdPJyczA3z5ipklKrX7\nOTk5mRnQx6WhzGxm87TOzJLkcuRmliSvOZlZkrzmZGZJ8pqTmSXJycnMkuQ1JzNLkteczCxJ3kpg\nZklKbVrn2/SaGdDZbXpLVF85W9K9+XGbpHcUxePkZGZAZ6WhyKqvnNKi+0eA34uIY4ELgW8UxeNp\nnZkBHd/PqWX1lYi4veH0dqDwrvVOTmYG9HSf018CPyxq1NXk1HfVTCqsDFKZo5dX0m1srqaSSWVV\nXYAV9NnvW+J6UY5c0onAOcC7i9p65GRmQPOR0/ahtWwfuqvj/iUdA6wCTo2IZ4raOzmZGdA8Oe05\nuIQ9B5eMnz+94vJmXbSqvrIA+B7wkYj4ZZl4nJzMDOhsn1OJ6it/D+wPfE2SgB0RsbhVn05OZgZ0\ndvlKieornwA+MZ0+nZzMDPBdCcwsUX2ZnCTNA74JHA3sBD4eEXdUGZiZ9dbLr/Tnhb+XAjdExJ9I\nmgPsXWFMZlaD0ZG0JlKF0UjaF3hPRHwMICJGgN9UHJeZ9djoSP9N694EbJN0JXAscBdwXkS8WGlk\nZtZTqSWnMnclmAMsAlZGxCJgO/CFSqMys54b2TG71NErZUZOTwCPR8TY/vXVwOenbjrU8HggP8ys\nuzblR3ftHO2zNaeI2CrpcUlHRMRDwMnAhqlbD3Y1ODObygC7/uG/pTvdJjatK5sqzwWukTSX7KZR\n51QXkpnV4qU+GzkBRMS9wHEVx2JmdRqpO4BdpZUqzaw+Tk5mlqTEkpMLHJhZZkfJYwqSTpW0UdJD\nkiZ9my9pP0nfz6uv3C7pqKJwnJzMLDNa8phA0izgq2TVV94OnCXpbROanQ8M59VX/hy4rCgcJycz\ny4yUPCZbDDwcEY9GxA7gWuCDE9ocBdwEEBH/AwxIekOrcJyczCzzUsljsoOBxxvOn2By6ad7gQ8B\nSFoMLADmtwrHC+Jmlql2Qfwi4FJJ9wDrgWGmnCS+ysmpChWVb6qSnomKel5eUb/9qYoSXF0rv9Us\nOa0fgvuHWr1yM9lIaMz8/LlxEfF/wMfHziX9imxDd1NOTmaWaZacjhzMjjHXTkqwa4E35xV/nwSW\nAmc1NshvWLk9InZI+gRwS0Q83yocJyczyzTZJlAkIkYlfQq4kWwd+4qIeFDSJ3m1+sqRwFWSdgIP\nAH9R1K+Tk5llWq4AtRYRPwLeOuG5rzc8vn3iz4s4OZlZJrEd4k5OZpaZeptAbZyczCzjkZOZJcnJ\nycyS5ORkZklqcytBVZyczCzTwVaCKjg5mVnG39aZWZK85mRmSfKak5klyWtOZpYkT+vMLElOTmaW\npMTWnHwPcTPLvFzymEJRaai8zaCkYUn3S7q5KByPnMws0+a0rqE01MnAFmCtpOsjYmNDm3nASuD3\nI2KzpAOK+nVyMrNM+9O68dJQAJLGSkNtbGhzNvC9iNgMEBHbijr1tM7MMm0W1aRcaagjgP0l3Sxp\nraSPFIWT/MipimoVY7pWtWKi+5dX1HGF+rBiTGWfc4WfhQ6+oIJeu/R/pNm0btsQ/Hqo097nAIuA\nk4DXAj+X9POI+EWrF5iZNU9O+w1mx5iHJiXDwtJQZKOpbRHxEvCSpFuBY4GmycnTOjPL7Ch5TDZe\nGkrSHmSlodZMaHM98G5JsyXtDRwPPNgqHI+czCzTZJtAkTKloSJio6QfA/eRrVytiogNrfp1cjKz\nTAc7xItKQ+XnFwMXl+3TycnMMontEHdyMrNMYnclKLUgLukz+Zbz+yRdky96mdlMMlLy6JHC5CTp\nIODTwKKIOIZstLW06sDMrMcSS05lp3WzgddK2gnsTXb9jJnNJP225hQRWyRdAjwGbAdujIifVh6Z\nmfVWm1sJqlKYnCTtR3YR36HAc8BqSWdHxHcmtx5qeDyQH2bWXZvyo8v68GZz7wMeiYinASR9H3gX\nMEVyGuxiaGY2tQF2/cN/S3e67bdpHdl0bomk15AN/E4m265uZjNJYlsJyqw53SlpNTBMlluHgVVV\nB2ZmPdaH0zoiYgVduy+DmSWpH5OTme0G+nDNycx2B4mNnHw/JzPrWFH1FUlnSLo3r75yp6QTivr0\nyMnMOlKm+grw04hYk7d/B/BvwJGt+vXIycw6NV59JSJ2AGPVV8ZFxPaG032AnUWdeuRkZrm2V8Sn\nqr6yeGIjSWcCXwLeAHygqFMnJzPLNVsRvzU/OhMRPwB+IOndwIXA+1u1Tz45VVNKp49VVbbI5axe\nVeVnkXQJrmYjp3fmx5h/mNigTPWVcRFxm6TDJO0/dlncVJJPTmbWKy+2+8Lx6ivAk2T3ezursYGk\nwyPil/njRcAerRITODmZ2bj21pzKVF8B/kjSR4FXyLLgnxb16+RkZrn2d2EWVV+JiC8DX55On05O\nZpZL6/oVJyczy6V1/YqTk5nlPHIysyS1/W1dJZyczCznaZ2ZJcnTOjNLkkdOZpYkj5zMLEkeOZlZ\nkjxyMrMkeSuBmSXJIyczS1Jaa06+h7iZ5XaUPCYrqr6St7lM0sOS1klaWBRNTclpUz1v27ZNdQfQ\nhk11BzB9dw7VHcH09Fu8hUZKHrtqqL5yCvB24CxJb5vQ5jTg8Ih4C/BJ4PKiaJycStlUdwBt2FR3\nANO3dqjuCKan3+It1PbIqbD6Sn5+NUBE3AHMk3Rgq2g8rTOzXHsjJ6auvnJwQZvNU7TZhRfEzSyX\n1lYCRUR3OpK605GZTVtEqJPXS9oEHFqy+daIeGPDa5cAyyPi1Pz8C1lI8Y8NbS4Hbo6I6/LzjcB7\nI2Jrszfp2sip0w/HzOoTEQMdvLyw+gqwBlgGXJcns2dbJSbwtM7MOlSm+kpE3CDpdEm/AF4Azinq\nt2vTOjOzburpt3VlNmqlRNJ8STdJekDSeknn1h1TGZJmSbpH0pq6YylD0jxJ/y7pwfyzPr7umIpI\n+oyk+yXdJ+kaSXvUHdNM07PkVGajVoJGgM9GxNvJ6jEv64OYAc4DNtQdxDRcCtwQEUcCxwIP1hxP\nS5IOAj4NLIqIY8iWR5bWG9XM08uRU5mNWkmJiKciYl3++Hmy/zQt92bUTdJ84HTgm3XHUoakfYH3\nRMSVABExEhG/qTmsMmYDr5U0B9gb2FJzPDNOL5NTmY1ayZI0ACwE7qg3kkJfAT4H9Mti4puAbZKu\nzKeiqyTtVXdQrUTEFuAS4DGyzYTPRsRP641q5vEO8RIk7QOsBs7LR1BJkvQBsj0o6wDlR+rmAIuA\nlRGxCNgOfKHekFqTtB/ZqP9Q4CBgH0ln1xvVzNPL5LQZWNBwPj9/Lmn5sH018O2IuL7ueAqcAJwh\n6RHgu8CJkq6uOaYiTwCPR8Rd+flqsmSVsvcBj0TE0xExCnwfeFfNMc04vUxO4xu18m82lpJtzErd\nt4ANEXFp3YEUiYjzI2JBRBxG9vneFBEfrTuuVvKNeI9LOiJ/6mTSX8x/DFgi6TWSRBZz0ov4/ahn\nmzCbbdTq1fu3Q9IJwIeB9ZKGydZxzo+IH9Ub2YxzLnCNpLnAI5TYoFeniLhT0mpgmOwy/WFgVb1R\nzTzehGlmSfKCuJklycnJzJLk5GRmSXJyMrMkOTmZWZKcnMwsSU5OZpYkJyczS9L/A0r/Ow/ocDbC\nAAAAAElFTkSuQmCC\n",
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAScAAAD8CAYAAAA11GIZAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAFKdJREFUeJzt3WusXVW5xvH/QykUioiKidBWi4F4Iyqm4SI5hAOaU5FA\nTsSknoBiNE0MCBoSA37AyDcT4y0YTiqgqEQxFUwlVdSDRP0AUkq5FnN68ByoYLjJpSCl3fs5H+as\nbDZ7rzW3e869xup8fskM6zLWWG93d1/GGHPM+co2ERGl2WfUAUREzCTJKSKKlOQUEUVKcoqIIiU5\nRUSRkpwiokhJThHRCkmLJN0p6cYZ3ttf0nWStkm6TdLKYf0lOUVEWy4Ets7y3qeAv9k+Evg68JVh\nnSU5RcS8SVoOfBi4cpYmZwLX1I/XA6dK0qA+920vvJftp/29hKVddB0BwOQh7f9+7fP086332ZUX\neZ6XvHPgP+5h/u1fl/rJpyYatb3j7p33AS9OeWmd7XVTnn8D+ALwmlm6WAY8DGB7t6RngDcAT8z2\nnZ0kpyUs5Tid2kXXEQC8cMpxrfd54A23td5nV27zf827jyefmuCPN725UdtFh/33i7ZXzfSepNOB\nx2zfIenkWbqYKZEOvHauk+QUEeUzMMlkG12dCJwh6TRgCXCwpB/aPntKm+3ACmC7pH2B1wJPDeo0\na04RPWXMLk80Ogb2Y19ie7ntlcAa4OZpiQlgA/CJ+vFZdZuMnCJiZi2NnGYk6TJgk+0NwFXADyRt\noxoxrRn2+SSniJ4yZqLlWybZvgW4pX586ZTXXwQ+Ope+kpwiemxy8Jr0SCU5RfSUgYmCk1OjBXFJ\nqyX9qd56fnHXQUXEwpjEjY5RGDpykrQI+DbwQarTgbdL2mD7/q6Di4juGNhV8G26m4ycjgW22X7Q\n9kvAj6m2okfEGDNmouExCk3WnP6x7by2HXjV9lxJa4G1AEs4sJXgIqJDholyB06NklOjbef1dTbr\nAA7W6wv+I0cE7NkhXq4myWnPtvM9lgOPdBNORCwcMTHj2KMMTZLT7cBRko4A/kK1s/M/Oo0qIjpX\nLYiPcXKqb29wPnATsAi42vZ9nUcWEZ2q9jmNcXICsL0R2NhxLBGxwCbHeeQUEXunvWLkFBF7HyMm\nCr5rUpJTRI9lWhcRxTHiJS8adRizSnKK6KlqE2amdVG4F/69/YIBMF5FA7r6GXRh8uZbW+knC+IR\nURxbTLjckVO5kUVE5yZRo2MQSUsk/VHSXZLuk/TlGdqcK+lxSVvq49PDYsvIKaKnqgXxVlLATuAU\n2zskLQb+IOkXtqfPPa+zfX7TTpOcInqqrQXxusTTjvrp4vqY951JMq2L6LEJq9ExjKRFkrYAjwG/\ntj3TmZCPSLpb0npJK2Z4/xWSnCJ6as8O8SYHcKikTVOOta/oy56w/V6qWyodK+noaV/3c2Cl7XcD\nvwGuGRZfpnURPTbZ/GzdE7ZXDWtk+2lJtwCrgXunvP7klGbfAb4yrK+MnCJ6qrrwt/HIaVaS3ijp\nkPrxAcAHgAemtTlsytMzgK3D4svIKaKnjNjVzuUrhwHX1JWa9gF+YvvGaeXIL5B0BrCbqhz5ucM6\nTXKK6CmbVjZh2r4bOGaG16eWI78EuGQu/SY5RfTW8A2Wo5TkFNFTpp2RU1eSnCJ6LDebi4jiGOVm\ncxFRnqo0VLkpoNzIIqJj419UMyL2QmZOO8QXXJJTRI9l5BQRxbGVkVNElKdaEE/1lYgoTtn3EE9y\nGjPjVCGkS+NU1eWmR7a03uex//b4vPuoFsSz5hQRBcoO8YgoTnaIR0SxUvE3Iopjw67JJKeIKEw1\nrUtyiogCZYd4RBSn9K0EQ8d0klZI+q2krXUd9AsXIrCI6Fo1rWtyDOxFWiLpj5LuqnPEl2dos7+k\n6yRtk3SbpJXDomsy4dwNXGT7HcDxwHmS3tngcxFRuMn6PuLDjiF2AqfYfg/wXmC1pOOntfkU8Dfb\nRwJfp426dbYftb25fvwcVb2pZcM+FxFlq87WLWp0DO7Htr2jfrq4Pjyt2Zm8XOV3PXCqpIFZb05L\n9fVQ7BjgVdcOSFq7p1TxLnbOpduIGIE9mzCbHAwpRy5pkaQtwGPAr21PzxHLgIcBbO8GngHeMCi+\nxgvikg4Cfgp8zvazr/qD2uuAdQAH6/XTs2ZEFGgOpaEGliO3PQG8t678e4Oko23fO6XJTF80ME80\nGjlJWkyVmK61fX2Tz0RE2facrWs4cmrWp/00cAuwetpb24EVAJL2BV5LVfl3Vk3O1gm4Cthq+2uN\no4yI4rV0tu6N9YgJSQcAHwAemNZsA/CJ+vFZwM22B46cmkzrTgTOAe6p55QAX7S9scFnI6JQttjd\nzg7xw4BrJC2iGvD8xPaNki4DNtneQDXA+YGkbVQjpjXDOh2anGz/gZnnixEx5trYhGn7bqoTZdNf\nv3TK4xeBj86l3+wQj+ip0neIJzlF9FiSU0QUJzebi4hizWGf04Ibq+TU1c39x+lm+V3FOm6FE8Yp\n3n85r/1YH3jom/Puw4bdudlcRJQo07qIKE7WnCKiWE5yiogSZUE8IopjZ80pIookJnK2LiJKlDWn\niChOrq2LiDK5WncqVZJTRI/lbF1EFMdZEI+IUmVaFxFFKvlsXbljuojolF0lpybHIJJWSPqtpK11\nOfILZ2hzsqRnJG2pj0tn6muqjJwieqylrQS7gYtsb5b0GuAOSb+2ff+0dr+3fXrTTpOcInqsjTUn\n248Cj9aPn5O0larC7/TkNCeZ1kX0lBGTk/s0OhhSjnwPSSupKrHMdFfEEyTdJekXkt41LL6MnCJ6\nbA4Dp4HlyAEkHURVGfxztp+d9vZm4C22d0g6DfgZcNSg/jJyiuirlhbEASQtpkpM19q+/lVfZT9r\ne0f9eCOwWNKhg/pMcoroMzc8BpAkqoq+W21/bZY2b6rbIelYqtzz5KB+M62L6LGW9jmdCJwD3CNp\nS/3aF4E3V9/h/wTOAj4jaTfwd2CNPXg5PsmpI+NWKeaRk7rZjHc43fwcuoj38N91s126i7+zffz8\nvPswMDnZSjnyP8Dgi/RsXw5cPpd+k5wi+spAwTvEk5wieizX1kVEmZKcIqI8zbYJjEqSU0SfZeQU\nEcUxuIWzdV1JcorotXKTU+Md4pIWSbpT0o1dBhQRC6iFHeJdmcvlKxcCW7sKJCJGYNyTk6TlwIeB\nK7sNJyIWzJ5NmE2OEWi65vQN4AvAa2ZrUN/fZS3AEg6cf2QR0bmSN2EOHTlJOh14zPYdg9rZXmd7\nle1Vi9m/tQAjokOTanaMQJOR04nAGfUNopYAB0v6oe2zuw0tIrqmcR452b7E9nLbK4E1wM1JTBF7\ngaaL4SNKYNnnFNFbo1vsbmJOycn2LcAtnUQSEQuv4GldRk4RfTY56gBml+QU0VeF32wuBQ4iekxu\ndgzso1k5ckn6lqRtku6W9L5hsWXkFNFn7aw5NSlH/iGqOnVHAccBV9T/nVVGThExL7Yftb25fvwc\n1TW4y6Y1OxP4viu3AodIOmxQv52MnCYPWcoLp7RfdaOryiNd6KqayZE3dNJtZ5VHunLk528ddQiN\ndVGJZ/Lmdv78c9iEeaikTVOer7O97lX9zV6OfBnw8JTn2+vXHp3tCzOti+grM5dLU+ZbjnymL0rd\nuoiYRUsD5mHlyKlGSiumPF8OPDKoz6w5RfRYS2frhpYjBzYAH6/P2h0PPGN71ikdZOQU0W/tjJya\nlCPfCJwGbANeAD45rNMkp4g+ayE5NSxHbuC8ufSb5BTRU02mbKOU5BTRZykNFRElysgpIsqU5BQR\nxcmaU0QUK8kpIkqkgm82lx3iEVGkjJwi+izTuogoThbEI6JYSU4RUaQkp4gojSj7bF2SU0RfZc0p\nIoqV5BQRRepbctrn6efHqlJKF8atmsm4/X11UdFk3H4Gbci0LiLKVHByyuUrEX3l6mxdk2MYSVdL\nekzSvbO8f7KkZyRtqY9Lh/WZkVNEn7U3cvoecDnw/QFtfm/79KYdJjlF9Fhba062f1dX+21NpnUR\nfeaGR12OfMqx9p/4thMk3SXpF5LeNaxxRk4RffVy4mliaDnyITYDb7G9Q9JpwM+AowZ9ICOniJ4S\n7VT8bcL2s7Z31I83AoslHTroM42Sk6RDJK2X9ICkrZJOmH+4ETFqC5WcJL2pLluOpGOpcs+Tgz7T\ndFr3TeCXts+StB9w4LwijYgytLQgLulHwMlUa1PbgS8Bi+Ef5cjPAj4jaTfwd2BNXQV4VkOTk6SD\ngZOAc+svegl46Z/+U0REOdo7W/exIe9fTrXVoLEm07q3Ao8D35V0p6QrJS2d3kjS2j0r+bvYOZcY\nImIUGk7pRnWJS5PktC/wPuAK28cAzwMXT29ke53tVbZXLWb/lsOMiE4030qw4Jokp+3Adtt7ropc\nT5WsImLMtXX5SheGJifbfwUelvS2+qVTgfs7jSoiFkTJ07qmZ+s+C1xbn6l7EPhkdyFFxIIY4ZSt\niUbJyfYWYD67QyOiROOenCJi77Nnh3ipkpwiekyT5WanJKeIvtob1pwiYu+UaV1ElKlvyWnniqVs\nu+j4LrruRBeVUsatkkcX1UwAHjlJnfTbxd9ZVz+DLn4X9vHzrfSTkVNElCnJKSKK49FdmtJEklNE\nT2WfU0SUa/D93kYqySmixzJyiojyFL4JM9VXInpsAcuRS9K3JG2TdLekofeES3KK6LEWbzb3PWD1\ngPc/RFWn7ihgLXDFsA6TnCL6ylQL4k2OYV3ZvwOeGtDkTOD7rtwKHCLpsEF9Zs0posfmsCB+qKRN\nU56vs71uDl+1DHh4yvPt9WuPzvaBJKeIPlu4cuQzXcc0v7p1EbF3WuBNmNuBFVOeLwceGfSBrDlF\n9JWNJpsdLdgAfLw+a3c88IztWad0kJFTRL8tXDnyjcBpwDbgBRoUSUlyiuixtqZ1DcqRGzhvLn0m\nOUX0lYHcQzwiilRubkpyiuizXPgbEUVKaaiIKE/hdyXoJDktfm68igZ0dWP7LozTTfgBjryhk26j\nBdUmzHKzU0ZOEX2We4hHRIkycoqI8vRxzSkixkFr1811Iskpos8yrYuI4qSoZkQUq+CRU6P7OUn6\nvKT7JN0r6UeSlnQdWEQsADc8RmBocpK0DLgAWGX7aGARsKbrwCKie5qcbHSMQtNp3b7AAZJ2AQcy\n5PaaETEGTNGbMIeOnGz/Bfgq8BBVpYRnbP9qejtJayVtkrRp184d7UcaEa0SRm52jEKTad3rqGpO\nHQEcDiyVdPb0drbX2V5le9Xi/Q9qP9KIaF9Ldeu60GRB/APAn20/bnsXcD3w/m7DiogF0VJykrRa\n0p/qcuMXz/D+uZIel7SlPj49rM8ma04PAcdLOhD4O3AqsGnwRyKieC2tOUlaBHwb+CBVCajbJW2w\nff+0ptfZPr9pv03WnG4D1gObgXvqz8yl0mdEFKqls3XHAttsP2j7JeDHVEtB89Jon5PtL9l+u+2j\nbZ9je+d8vzgiRq3hlG74tG62UuPTfUTS3ZLWS1oxw/uvkKKaEX1l5pKcDt1zNr4+1k7pqUmp8Z8D\nK22/G/gNcM2w8HL5SkSfNV9zesL2qlneG1pq3PaTU55+B/jKsC/MyCmix1ra53Q7cJSkIyTtR3UF\nyYZXfI902JSnZwBbh3WakVNEn7Wwh8n2bknnAzdRXd52te37JF0GbLK9AbhA0hnAbuAp4Nxh/SY5\nRfSVDRPtXL9ieyOwcdprl055fAlwyVz67CQ57fP0851U8xinyiPjVNElutXF78Lkzbe201HBt0zJ\nyCmiz5KcIqI4BnIP8Ygoj8Hl3jMlySmir0xrC+JdSHKK6LOsOUVEkZKcIqI8o7uRXBNJThF9ZWBE\nxQuaSHKK6LOMnCKiPO1dvtKFJKeIvjI4+5wiokjZIR4RRcqaU0QUx87ZuogoVEZOEVEe44mJUQcx\nqySniL7KLVMiolgFbyVI9ZWInjLgSTc6hpG0WtKfJG2TdPEM7+8v6br6/dskrRzWZ5JTRF+5vtlc\nk2MASYuAbwMfAt4JfEzSO6c1+xTwN9tHAl8ndesiYhBPTDQ6hjgW2Gb7QdsvAT8GzpzW5kxervK7\nHjhV0kyVgv9B7uBUoqTHgf9r0PRQ4InWA+jOOMU7TrHCeMVbQqxvsf3G+XQg6ZdUf5YmlgAvTnm+\nzva6up+zgNW2P10/Pwc4zvb5U77r3rrN9vr5/9RtZv05drIg3vSHJmnTgBLHxRmneMcpVhiveMcp\n1kFsr26pq5lGQNNHPU3avEKmdRExX9uBFVOeLwcema2NpH2B11JV/p1VklNEzNftwFGSjpC0H7AG\n2DCtzQbgE/Xjs4CbPWRNadT7nNaN+PvnapziHadYYbziHadYO2d7t6TzgZuARcDVtu+TdBmwyfYG\n4CrgB5K2UY2Y1gzrt5MF8YiI+cq0LiKKlOQUEUUaWXIatt29FJJWSPqtpK2S7pN04ahjakLSIkl3\nSrpx1LEMIukQSeslPVD/jE8YdUyDSPp8/Xtwr6QfSVoy6pj2ViNJTg23u5diN3CR7XcAxwPnFRzr\nVBcCW0cdRAPfBH5p++3Aeyg4ZknLgAuAVbaPplr8HbqwG/+cUY2cmmx3L4LtR21vrh8/R/WPZ9lo\noxpM0nLgw8CVo45lEEkHAydRncnB9ku2nx5tVEPtCxxQ79U5kFfv54mWjCo5LQMenvJ8O4X/gweo\nr6Q+BrhttJEM9Q3gC0C598OovBV4HPhuPQW9UtLSUQc1G9t/Ab4KPAQ8Cjxj+1ejjWrvNarkNOet\n7KMm6SDgp8DnbD876nhmI+l04DHbd4w6lgb2Bd4HXGH7GOB5oOT1x9dRjfCPAA4Hlko6e7RR7b1G\nlZyabHcvhqTFVInpWtvXjzqeIU4EzpD0v1TT5VMk/XC0Ic1qO7Dd9p6R6HqqZFWqDwB/tv247V3A\n9cD7RxzTXmtUyanJdvci1Ld1uArYavtro45nGNuX2F5ueyXVz/Vm20X+3932X4GHJb2tfulU4P4R\nhjTMQ8Dxkg6sfy9OpeAF/HE3kstXZtvuPopYGjgROAe4R9KW+rUv2t44wpj2Jp8Frq3/J/Ug8MkR\nxzMr27dJWg9spjqLeye5lKUzuXwlIoqUHeIRUaQkp4goUpJTRBQpySkiipTkFBFFSnKKiCIlOUVE\nkf4fThnIruhslcgAAAAASUVORK5CYII=\n",
"text/plain": [
- "<matplotlib.figure.Figure at 0x10fa4b860>"
+ "<matplotlib.figure.Figure at 0x1191a07b8>"
]
},
"metadata": {},
@@ -737,9 +727,7 @@
{
"cell_type": "code",
"execution_count": 15,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [],
"source": [
"model = MoneyModel(50, 10, 10)\n",
@@ -757,14 +745,12 @@
{
"cell_type": "code",
"execution_count": 16,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [
{
"data": {
"text/plain": [
- "<matplotlib.axes._subplots.AxesSubplot at 0x10fa4b278>"
+ "<matplotlib.axes._subplots.AxesSubplot at 0x1192d1518>"
]
},
"execution_count": 16,
@@ -773,9 +759,9 @@
},
{
"data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXEAAAEACAYAAABF+UbAAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzt3Xmc1fP+B/DXu41SIlEqTSil3CK0iGsQxtYipbiKixK5\nbn6Xyg/1u2Qvy+Wiy5W9KCpXiMshbUabpGlTmVal0qpmef/+eJ8xZ2bO8j1nvmf5nvN6Ph7n0fl+\nz+d8vp/5NvM+n/NZRVVBRETeVCXZBSAiotgxiBMReRiDOBGRhzGIExF5GIM4EZGHMYgTEXmYoyAu\nIjkikiciK0RkWJDXDxeRaSKySESWiMj1rpeUiIgqkEjjxEWkCoAVAC4AsBFALoC+qpoXkGYEgMNV\ndYSI1AewHEADVS2MW8mJiMhRTbwDgJWquk5VCwBMANC9XBoFUMf/vA6AXxjAiYjiz0kQbwwgP+B4\nvf9coGcBtBaRjQAWA7jDneIREVE4bnVsXgxgoao2AnAagOdEpLZLeRMRUQjVHKTZAKBpwHET/7lA\nNwB4GABUdbWIrAHQCsC3gYlEhAu1EBHFQFUl2HknNfFcAM1FJEtEagDoC2BauTTrAHQFABFpAOAk\nAD+GKAgf/sfIkSOTXoZUefBe8H7wfoR+hBOxJq6qRSIyBMAMWNB/WVWXicgge1nHAXgQwHgR+c7/\ntrtVdXukvImIqHKcNKdAVT8G0LLcuRcDnm+CtYsTEVECccZmEmVnZye7CCmD96Is3o+yeD9CizjZ\nx9WLiWgir0dElA5EBBqiY9NRc0q8NWvWDOvWrUt2MVJCVlYW1q5dm+xiEJFHpERN3P8pk7BypDLe\nCyIqL1xNnG3iREQexiBORORhDOJERB7GIO6CwYMHY/To0a6nJSKKhB2bDk2YMAFPPfUUvv/+e9Su\nXRvHH388+vfvj8GDB7t6HS/cCyJKLHZsVtKYMWMwdOhQDBs2DFu2bMHmzZvxwgsvYPbs2SgoKEh2\n8YgogzGIR7Br1y6MHDkSzz//PHr27InDDjsMANCuXTu8/vrrqF69Om644Qbcf//9AIAvv/wSxx13\nHMaOHYsGDRqgcePGGD9+/O/5BaYlIqosBvEI5syZg4MHD6Jbt26O37N582bs3r0bGzduxEsvvYTb\nbrsNv/76axxLSUSZyjNBXKTyj1hs27YN9evXR5UqpbeqS5cuOPLII1GrVi18/fXXFd5To0YN3Hff\nfahatSouueQS1K5dG8uXL4/1RyciCiklpt07kay+vqOOOgrbtm1DcXHx74F81qxZAICmTZuiuLg4\n6HsCg36tWrWwZ8+exBSYiDKKZ2riydK5c2cccsghmDp1aoXXOIqEiJKNQTyCunXr4v7778ett96K\nyZMnY8+ePVBVLFq0CPv27Ut28YjIRSNHAvPnJ7sU0WEQd+Cuu+7C2LFj8dhjj6Fhw4Zo2LAhBg8e\njMceewydO3eO+H6JtUGeiBJm61bg4YeBp59Odkmiw8k+KYb3gig5xo4FPv0UmDMHWL8eqF072SUq\nxck+RBQS6wx2D15+GRgxAvjjH4HJk5NdIuccBXERyRGRPBFZISLDgrz+NxFZKCILRGSJiBSKyBHu\nF5eI3DR2LHD99ckuRfzdfjuwbFno1+fOBQoKgHPOAfr3B159NXFlq6yIzSkiUgXACgAXANgIIBdA\nX1XNC5H+cgB/VdWuQV5jc0oEvBeUKN98A1x2mQWvrVuB6tWTXaL4yM8HTjgBaN0amDcPOPTQimlu\nuglo0QIYNgw4cABo1AhYsADIykp8eYOpbHNKBwArVXWdqhYAmACge5j0/QC8HX0xiShRdu0C+vUD\nXnjBAtzcuckuUfx88IH9rC1aAMOHV3x9925rPhkwwI4POQTo0wd4443EljNWToJ4YwD5Acfr/ecq\nEJGaAHIAeKhFiSjzDBkCXHAB0KsXcPHFwIwZyS3Prl1AvObDTZsGdOsG/OtfwPvvA9Onl339nXeA\nc88FGjYsPTdgAPDaa97oL3C7Y/MKAF+r6k6X8yUil7zxBpCbCzz5pB1fdBHwySfu5F1UBHz7bfTv\n69PHAunu3e6Uo8Tu3cDs2fYzHnmk/ew33ggsXQps2GCPf/3LzgXq2NEC+Lx5wfOdPBkIsuJGUjiZ\ndr8BQNOA4yb+c8H0RYSmlFGjRv3+PDs7G9nZ2cjKyuJYar+sVGmEo7Q0Zw4wdKgNpfMvyImzzgLy\n8oBffgGOOir2vHfsAK69FvjoI2DJEuCUU5y97+OPgTVrbFTIVVcB//mPe+3zM2YAnTsDhx9ux+ec\nA9x5pwX1EscfD1xySdn3iVhtfNQo4MUXS9vGd++2TtJZs4BffwXGjQN69HCnrIF8Ph98Pp+zxKoa\n9gGgKoBVALIA1ACwCMDJQdLVBfALgJph8lIiSo68PNUGDVSnT6/42mWXqU6YEHveixernnii6l//\nqvr3v6v+6U/O3ldQoNqmjerUqfb8iitU+/dXLS6OvSyBBgxQffbZ2N67a5fq3Xer1quneu21qpMm\nqbZooXrjjap79qh++61qw4aq//63O2UNxx87g8fVUC9o2eCbA2A5gJUAhvvPDQIwMCDNAABvRcgn\n/j8tEVWwcaNqs2ahA84zz6j++c9lzy1apPrjj+HzPXDA3lu/vuobb9i5nTst8EV6r6rqCy+onnde\nadDeu1e1UyfVgQPtwyYvT/W33yLnE0xhoZVr7drY3l9i507VRx9Vbd++4gddXp5qVpbqAw+o7t9f\nueuEU+kg7taDQZzIPStXqt57b+R0v/6qetppFmhCyctTbdy4NJhu3my1zKws1Q0bKqYvLFR97TX7\nYMjJUV2ypOzrw4er3npr5HI1bKi6YEHZ81u3qg4ZonrRRarNm6vWrGm13mjNnKnarl3074vWTz+p\nXn65/SyjR6tu3+7+NRjEiYKYP1+1e3fVe+6xr/JeM2KE/QXPnBk6zYEDql27qg4aFL6JorjYAvbS\npapFRaoXX2wfEA8/rNq2rdVGS+TmWnA86yzVL78Mnt/mzapHHKG6aVPoaw4frnr99WF/RFW1gP7E\nE5HTlXf33c4+5NyyZIk139Srp/rFF+7mzSBOFGDZMtXevVWPPVb16actyHXtajVArygutlrqX/6i\n2rlz8ABdVGRtud27W805koEDVceOVR0zxvIsKLB8b7tN9fzzreY8YoTqMcdY00mkduvbblMdNqzi\n+U2brN27SRPV9esjl+uVV1T79YucrrxWrVS/+Sb691XWJ5+oNmoU/gMsWgzi5JrCQtUHH7Q/eLc6\nn+KtoEB12jQLeK1bqx51lNUw9+yx1wsLLdhkZVnt3AsWLbKmjMJCqym/917FNMOGWTDeu9dZnpMm\nqZ58surRR5dtzy4sVO3ZU7V2bdUrr3QenNassVrp3LmqCxfaY+xYu/93320dh058953qSSc5S1ti\n+XL7kC4qiu59brnvPmvrd/Lh6QSDOLkiP1/13HNVs7Ptj/3tt5NdImcef9xGQDz0kNXMQv1hvfKK\nBcRkW7VKdcqU8GnuvVf1b3+z5x99pNqyZWmT0IEDVmNu2VJ12zbn192xQ7VaNdW33qr42r591nQS\n7Qf3/fdb00vJo0cP+yYUjYIC1Vq17JuAU3fdpXrnndFdx02FhRbE77svdJpoPmAYxKnSPvrIvkaP\nHm2/oPPm2XC1LVuSXbLwiovta/XXX0dOu3ev6qGHuld7itVNN1ln3oABwWurxcUWoOfNKz0+7zzV\nF1+0DsC2ba2jbePG6K/900+VKnrcdO6s6vM5S7t3r41KWbUqvmWKZNMma1Z57jkrS8kH4OLF1s5/\nxBGq11xjH7qRhAviXIqWItq3z1a6mzgRuOceoGpVoEMHW+3t9tuTXbrw5s0DiottQksktWoBxxwD\nrFsX/3KFcuAA8N57tvhStWpA+/YVd5pZutT+T848045FgEcfBe6+G7j0Uvt32jTg2GOjv/5xx1X+\nZ4iH0093PhN0wgSbcXniifEtUyQNG9r/5SefANnZQL16tgjXZZfZpKp582ypgR497P8zlBUrIlwo\nVHSPxwOsiXvSP/5hnWPl7dtnbZWTJye+TE4NHGjNKE5ddJHqhx/GrzyRvP++6h//WHo8caLVKmfM\nKD03cqTq0KEV3ztxorudaanEaedmcbENpww2oSnZtmyxb0+B3/QKCqyT96yzQg9N/NOf2JwSkyVL\nrD0v2V+tk+3gQevwmzs3+OuzZlkHktPOs0Tau1f1yCOdjYAocfvtNjojWXr3tmaRQDNnWiAvGUrY\nurXq7NmJL1syOe3cnD3bZo4mq0MzFkVF1unetWvF13bvVq1bl80pMfnsM9tv75prgIMHk12a5Hn7\nbVuqtGPH4K+fdZZ9RXRrASU3vfeelbtx0DU3g2vVCli+PH5lCmfXLruPvXqVPX/22cCbbwJXXmn/\n/vpr6P+PdHXyybZl2q5d4dM9+yxw221AFQ9FtipVgCeeAH74wdacCTRlCtClS4T3x69o3rZ2LfB/\n/2cB/Ior4rdMZioZMQLo2RPY6V+DsrgYeOQROx9Oz562xGeqeeUV4IYbontPy5a2GFQyTJliK/kF\nW4TqootsIaYBAyzIeylIuaFaNaBtW2DhwtBptmyxZWa9uFNR9erAoEHAc8+VPf/GG8B114V/b4b9\nKji3Zo3Vyt59F2jSBLjwQtsBJV298Yb9rI0aWYfZ998DU6faSnddK+zRVFb37sCHH6bW/Vm7Fli8\n2NaRjkYya+JvvWXf/ELp2dNWCPyf/0lcmVLJ6adX7OQtsW8fcO+9QO/etuSsFw0caIMHSipRmzZZ\n52ek32EG8RDWrgWaNbMawEsv2b9TpiS7VPGxcKEtT/r++1YTGDkSOO88CxYjRtjoh3CaNLGRAF99\nlZjyOvHqq0DfvsG34gqnUSNg797SP6RE2bLFdteJ9Ad74YVA06bh06SrYEG8oMC+obRoYUvhBqx0\n7TkNG9qSuOPH2/GECVZBqlUrwhtDNZbH4wGPdGwWF6vWqWOTH0q8/bZNPU43W7fazL+JE8ueX7DA\n1ttw2kH00EM2zTrZiottZbyjj7Z1QGJx+umhO3Lj5ZlnnC/fmqkWL7bx8SXy8+34wgttPZd0MGuW\nLadQVGSrJn72mZ0HR6dEZ9s26xEOdOCATW7Jy0tOmeLlssuCr28RrWXLbBW8ZI4K2L/f1npu3dqm\nXcfq2mtVx493r1yRbNhgIyo+/jhx1/Sikpmbu3ZZBatNG1siNp2UDJEcM8YmCpWMjgsXxNmcEsTa\ntbbbR6AaNYA//9k2lk0Xa9ZYm9sDD1Q+r1atgDp1Ytuayw3bt9vOMLt22c900kmx55XIzs1Vq2z0\nyc03216XFFq1asAf/mDNTj16WF/NXXclu1TuErHRNXffbf0jVatGfg+DeBBr1lh7eHkDBwKvvx5+\ndpWXvP22dQS5tRVWjx7JGaVy8KBt69Wli3UM1a5dufwS1bm5aJF98IwYAQwbFv/rpYPTT7ct4Bo0\nAMaOjdxf40X9+tmw2P79naVnEA8iWE0csMDeqZMFCq9TtTHH117rXp7JGGqoajWXOnVsrK0bf9St\nWsW3Jv7TT9Z5fNFFwDPPWC2cnOnaFTjjDOu4TtdhlrVqAT/+aN86nEjT21A5a9YED+IAcMstwPPP\nJ7Y88bBkiY3C6NzZvTzPOMPG0ydynPWTT9rO7W++6eyrpxPNm9vvQGFh6bl162wUxIwZ1gQSywSw\nRYtsbZPTTrPmny++sG8Q5FzPnjYWPNpRR14Tze8yg3gQoZpTABsC9PPPwDffJLRIrnvzTfva5mZt\npkoVoE8fG68bGADjZdo0YMwY+7eyTSiBata0xaPWrCk9N2iQfct47DGrQTdoYB/mxcXO8x061Ga4\n5ucD//gH0KaNe2WmDBaqx1PLjirJAZAHYAWAYSHSZANYCOB7AF+ESJOA/t3KO/nkinsGBnrtNdtP\nz+0tmBKlqEj1uONsPQq37d9vey727m3rrsTLnDm2nkjJcqxuy8lR/eADez5/vo28Cdyw94cfVDt2\ntCVgV6+OnN/q1Tbs0cmyo0TloTKjU0SkCoBnAVwMoA2AfiLSqlyaugCeA3C5qp4CoLcrnzBJoFo6\n0SeU666zDs6+fa1zxT6f3LN/v7v5lTdrFlC3rvM2t2gceqjVWPfutZp+PGZxrlhhnajjx9uSuPEQ\n2C7+8MPAnXcChxxS+vrJJ9t9vPRSK0PnzqWPe++tmN+rr9r9qFEjPuWlDBYqumtp7bkTgI8Cjoej\nXG0cwGAAf3eQV0I+tSpj0yar4TmxZo0NyI+0q3c0tmyxMepXX22TGeLhlltse7J4+u03G4N+3XXu\n5rtpk+rxx6u+9JK7+Zb3/PM25jwvz34fdu8OnTY/31bPmz3bVho89tiy27wVFdlKkAsXxrfMlL5Q\nyXHijQHkBxyv958LdBKAeiLyhYjkikiEJVtSV6iRKcE0a2adU6+/bivLueHBB4Grr7axyqeearXA\nAwfcyRuwDrl337VvEfF0yCHAO+8AH3xga0DEauZMG4bXpYs9zjzTFji68UbXihpUyTDDxx4DhgwJ\n3+bepElpLfzss4H77is7ZPCLL2w9j1NPjW+ZKTNVczGf9gDOB3AYgDkiMkdVV5VPOCpgcYPs7Gxk\nZ2e7VAR3hOvUDObwwy3IfPRR5QPj6tW2CNKyZcDRR9s40RtuAIqKgn9Fj8Unn1iAiuZnjFWtWrYC\n5KRJse0AtGqVjWN//HFbDhew5pr27d0tZzCtWtlokqVLrRzRuOkmGzXz6ae21kksqylSZvP5fPD5\nfM4Sh6qia9nmlI81fHPKMAAjA45fAtArSF6J+e5RCQ89ZDtxR2PcONW+fSt/7auvVn3ggbLnJk1S\nveKKyuddolevipsOxNOHH9r+iNHavt3WxUhkWQMVF6sefnjsm+2++65Nn96xw5rHtm51t3yUWVDJ\n5pRcAM1FJEtEagDoC2BauTRTAZwtIlVFpBaAjgCWOfsYSS3R1sQBq21+/HHlNo/49ltbBXDo0LLn\nTz3VaoTR+vLLih2u27db7bBPn9jLGa0LL7SOyLVrQ6fZs8eG3g0dCsyebfexd28bzjlwYMKKWoYI\n8Pe/xz6tu1cvmwnbpw9wwQVA/frulo+oRMQgrqpFAIYAmAFgKYAJqrpMRAaJyEB/mjwAnwD4DsBc\nAONU9Yf4FTt+wk30CaVhQxut4PTbD2BNLx072izDNWusDXXkSFu/O9Dxx9uyqNu3O8/7559tY9bp\n08uenzgRyMkBjjjCeV6VVb26BbR33gmd5r//tSajI46woF2/vrWpP/FE4soZzB132P9tLESsPf3T\nT9mUQnEWqooejwc80JzSvLmtyBetRx5xPkpl2TLVY46xzVxvvtlGP7Rsaau0BXP22aqff+68LB9/\nbF/h27Uru6pgx47J2QT488+taSGUm25SffLJ0uPVq8uOyfayadNC/78SOYUwzSmibg9yDkNENJHX\ni1ZRkdWEd+6MflpvXp41Hfz0U+T1O265xWp4JX28hYU2NrxOneDpb7/dOvbKN7WE8vDDwLZt1jxz\n1132lX75cqud5+fbanCJVFQEHHecjdJo2bLsa8XFNrrjq69sujsRVSQiUNWgkYXT7gNs2gTUqxfb\nugytWtlojAULwqfbts2aNQYPLj1XrVroAA4A7dpF1y6+YIGt9vbQQzbcrbAQeO01W9oy0QEcsHUg\n+vQJvnDYwoU2wocBnCg2DOIBYunUDNS9e+Qt3F54wdqIGzRwnm+0nZsLF9owvK5dbbux8eNtLPuA\nAc7zcFvfvrb0bfkvYv/5D3D55ckpE1E6YBAPEEunZqAePWxz4VAOHLA9LJ02i5Ro08ZGeDiZ9LNz\np+3X2KKFNeuMHg389a+2g3rbttFd100dOwK//WajTwIxiBNVDoN4gEhrpkTSsaONDHn8cWs2Ke+t\nt6xpJNrV62rWtI2If3Aw3mfRIgvWJUtZnnWWjUgZNCi6a7pNxGaj3npr6XoqmzbZRJouXZJbNiIv\nS0ILaepas6ZyAaVqVRvW99RT1sabk2NjhEuWe33iCeDpp2PLu6RJ5bTTwqdbsKDijMZ33kmNBfSv\nucba5p980rafmj7dtiRza2chokzEIO732282euLWWyuXT/v2Fqh27ADeeMP2Ayxx1VU2giUWwdrF\nVSuOhFm4EDj//LLnUiGAA1bW55+3Vf969bKmlF69kl0qIm/jEEO/0aNt1mQy9oh04rPPbEPjL7+0\n44ICa74ZPdpmNpZo08Y2fEjlxZYee8xmuM6fb9tQHXVUsktElNrCDTHMiCD+5ptld2np2tX2yiyx\nfr21Vefmli60lGq2brXOyh07rEb73HM2zvy002zLMMDW8D76aOvcTOV1qwsKbDXCOnVslUIiCi+j\nx4kfPGhTuffvtyaTPXuAbt3KDgUcNszGbadqAAcsONeubXs97txp63pMnw4sXly6ecF33wGtW6d2\nAAesDfzdd60DmIgqJ+3bxBctspEdo0eXnuvd24a17dgBnHSSzRYcNy55ZXSqZNLPrFn2QXTmmbbs\n6T//abumB+vUTFUtWtiDiCon7YP4nDkVd3Q//XRbrOrii23s9ZgxFReeSkWnngq89x7w4Ye2zjVg\nU/jbtbMPqYUL7WcjosyR9s0pwYI4YGt4zJxpzSj9+iW+XLE49VSbeTl0aOnqescdB5x3no2E8VJN\nnIjckfYdm1lZ1vFXfuElL8rPt2GKX3xh67SUKBkauW4d8MsvNjmIiNJHxo5O2bjRdnTfti3yyoJe\npgqccoqNB1+yJNmlISK3hQviad0mXtKUks4BHLCfb8QIG6lCRJklrWvif/ub7TL+v/+bsEsSEbku\nY8eJh+rUJCJKF2lbEz940GrhW7bYJBkiIq+qdE1cRHJEJE9EVojIsCCvnysiO0Vkgf9xb2ULXVkL\nF9pkEgZwIkpnETs2RaQKgGcBXABgI4BcEZmqtsN9oK9UtVscyhgTNqUQUSZwUhPvAGClqq5T1QIA\nEwB0D5IupcaAMIgTUSZwEsQbA8gPOF7vP1deZxFZJCIfikhrV0pXCQziRJQJ3BonPh9AU1XdJyKX\nAJgC4KRgCUeNGvX78+zsbGRnZ7tUhFLr19uqhdxBnYi8yOfzwefzOUobcXSKiHQCMEpVc/zHwwGo\nqj4a5j1rAJyuqtvLnU/I6JRJk4BXXwU++CDulyIiirvKjk7JBdBcRLJEpAaAvgCmlbtAg4DnHWAf\nDtuRJPPmld30gYgoXUUM4qpaBGAIgBkAlgKYoKrLRGSQiAz0J7tKRL4XkYUAngJwddxK7MDcubZ1\nGRFRuku7yT4FBTbJZ8MGoG7duF6KiCghMmra/fff2/KzDOBElAnSLoizKYWIMknaBfF58xjEiShz\npF0QnzuXI1OIKHOkVcfmjh1A06b2b7W03u6CiDJJxnRs5ubabu8M4ESUKdIqiLNTk4gyTVoFcXZq\nElGmSZs2cVXg6KOB774DGjWKyyWIiJIiLdvEf/vNat1vvWUBfPVqoGZNBnAiyiye7QJcsADYtg14\n6CFg4kTgnHPYlEJEmcezNfE5c4BLLwXmzwfatgWGD+f4cCLKPJ5tE7/qKuDKK4FrrrHjlSuBhg2B\nOnVcyZ6IKGWEaxP3ZBBXBZo0Ab7+Gjj+eBcKRkSUwtKuYzM/HygqApo1S3ZJiIiSy5NBfPZs2wRZ\ngn4uERFlDk8Gce5kT0RkGMSJiDzMcx2b+/cD9evbGPGaNV0qGBFRCqt0x6aI5IhInoisEJFhYdKd\nKSIFInJlrIWNZP58oHVrBnAiIsBBEBeRKgCeBXAxgDYA+olIqxDpHgHwiduFDMSmFCKiUk5q4h0A\nrFTVdapaAGACgO5B0t0OYBKAn10sXwUM4kREpZwE8cYA8gOO1/vP/U5EGgHooarPA4jbwD9VBnEi\nokBuLYD1FIDAtvKQgXzUqFG/P8/OzkZ2drbji6xbZ/9mZUVXOCIiL/H5fPD5fI7SRhydIiKdAIxS\n1Rz/8XAAqqqPBqT5seQpgPoA9gIYqKrTyuVVqdEpEyYA77wDvPdezFkQEXlOuNEpTmriuQCai0gW\ngE0A+gLoF5hAVU8IuNgrAD4oH8DdkJsLnHmm27kSEXlXxDZxVS0CMATADABLAUxQ1WUiMkhEBgZ7\ni8tl/N38+bYRMhERGc9M9ikuBo48EvjxR+Coo1wuGBFRCkuLVQxXrbIgzgBORFTKM0GcTSlERBUx\niBMReRiDOBGRh3miY7OkU3P1alvBkIgok3i+Y3P1auCIIxjAiYjK80QQZ1MKEVFwDOJERB7GIE5E\n5GEp37Gpap2aK1cCRx8dp4IREaUwT3dsrl4NHH44AzgRUTApH8TZlEJEFBqDOBGRh6V0EJ8/H5gy\nhWuIExGFkpJBfPlyoHdvoFs3YOhQ4MILk10iIqLUlHJBfPVqoFMn4IwzbETK4MFAlZQrJRFRanBr\no2TXzJwJ5OQAw4ZFTktElOlSro7LfTSJiJxjECci8jBHQVxEckQkT0RWiEiFhg4R6SYii0VkoYh8\nIyJdYinMwYPA998D7dvH8m4ioswTsU1cRKoAeBbABQA2AsgVkamqmheQ7DNVneZP/wcA7wA4OdrC\nfPcd0Lw5cNhh0b6TiCgzOamJdwCwUlXXqWoBgAkAugcmUNV9AYe1ARTHUphvvmFTChFRNJwE8cYA\n8gOO1/vPlSEiPURkGYAPAPw5lsKwPZyIKDquDTFU1SkApojI2QAeBBB0is6oUaN+f56dnY3s7Ozf\nj3NzgSFD3CoREZE3+Xw++Hw+R2kjLkUrIp0AjFLVHP/xcACqqo+Gec9qAGeq6vZy50MuRbtnD9Cg\nAbBjB1CjhqOyExFlhMouRZsLoLmIZIlIDQB9AUwrd4ETA563B1CjfACPZMEC4JRTGMCJiKIRsTlF\nVYtEZAiAGbCg/7KqLhORQfayjgPQS0T6AzgIYD+APtEWJDcX6NAh2ncREWW2lNnZp29f4NJLgf79\nE1YcIiJP8MTOPhyZQkQUvZQI4tu2AVu3Ai1bJrskRETekhJB/NtvbfceLjlLRBSdlAibc+eyKYWI\nKBZJD+Lhr48+AAAIQElEQVSFhcD48baTDxERRSfpQXzaNKBRI9bEiYhikfQg/vTTwB13JLsURETe\nlNQgvmiR7al55ZXJLAURkXclNYg/8wxw661A9erJLAURkXclbcbm1q3ASSfZjvb16yesCEREnpOS\nMzZffBHo1YsBnIioMpJWEz/hBGDyZOC00xJ2eSIiTwpXE09aED/0UGD7dqBWrYRdnojIk1KuOaWg\nwCb51KyZjKsTEaWPpATxPXuA2rUBCfq5QkRETiUliO/eDdSpk4wrExGlFwZxIiIPS1oQr107GVcm\nIkovrIkTEXmYoyAuIjkikiciK0RkWJDXrxGRxf7H1yLyh3D5MYgTEbkjYhAXkSoAngVwMYA2APqJ\nSKtyyX4E8EdVbQfgQQD/CpcngzgRkTuc1MQ7AFipqutUtQDABADdAxOo6lxV/dV/OBdA43AZMogT\nEbnDSRBvDCA/4Hg9wgfpmwB8FC5DBnEiIndUczMzETkPwA0Azg6VZtSoUfjvf235WZ8vG9nZ2W4W\ngYjI83w+H3w+n6O0EddOEZFOAEapao7/eDgAVdVHy6VrC2AygBxVXR0iL1VV/OUvwIknckcfIiIn\nKrt2Si6A5iKSJSI1APQFMK3cBZrCAvh1oQJ4IDanEBG5I2JziqoWicgQADNgQf9lVV0mIoPsZR0H\n4D4A9QD8U0QEQIGqdgiVJyf7EBG5w1GbuKp+DKBluXMvBjy/GcDNTi/KmjgRkTs4Y5OIyMMYxImI\nPIxBnIjIw5K2KQSDOBFR5bEmTkTkYQkP4gcPAsXFwCGHJPrKRETpJ+FBvKQWzv01iYgqLylBnBN9\niIjckbSaOBERVR6DOBGRhzGIExF5GIM4EZGHJTyIc6IPEZF7WBMnIvIwBnEiIg9jECci8jBO9iEi\n8jDWxImIPMxREBeRHBHJE5EVIjIsyOstRWS2iPwmIneGy4tBnIjIPRH32BSRKgCeBXABgI0AckVk\nqqrmBST7BcDtAHpEyo9BnIjIPU5q4h0ArFTVdapaAGACgO6BCVR1m6rOB1AYKTOOEycico+TIN4Y\nQH7A8Xr/uZiwJk5E5B52bBIReVjENnEAGwA0DThu4j8Xk59/HoV//hOoVQvIzs5GdnZ2rFkREaUl\nn88Hn8/nKK2oavgEIlUBLId1bG4C8A2Afqq6LEjakQD2qOqYEHlptWqKvXuBGjUclY+IKOOJCFQ1\n6H5oEYO4P4McAE/Dml9eVtVHRGQQAFXVcSLSAMC3AOoAKAawB0BrVd1TLh+tXl1x8GDlfiAiokxS\n6SDuYkG0Xj3FL78k7JJERJ4XLognvGOTnZpERO5hECci8jAGcSIiD2MQJyLyMAZxIiIPYxAnIvIw\nBnEiIg9LeBDnrj5ERO5hTZyIyMMYxImIPIxBnIjIwxjEiYg8jEGciMjDGMSJiDyMQZyIyMMYxImI\nPIyTfYiIPIw1cSIiD0t4EK9ePdFXJCJKX46CuIjkiEieiKwQkWEh0jwjIitFZJGInOpuMYmIKJiI\nQVxEqgB4FsDFANoA6CcircqluQTAiaraAsAgAC/Eoaxpx+fzJbsIKYP3oizej7J4P0JzUhPvAGCl\nqq5T1QIAEwB0L5emO4DXAEBV5wGoKyINXC1pGuIvZinei7J4P8ri/QjNSRBvDCA/4Hi9/1y4NBuC\npCEiIpclvGOTiIjcI6oaPoFIJwCjVDXHfzwcgKrqowFpXgDwhapO9B/nAThXVbeUyyv8xYiIKChV\nlWDnqzl4by6A5iKSBWATgL4A+pVLMw3AbQAm+oP+zvIBPFwhiIgoNhGDuKoWicgQADNgzS8vq+oy\nERlkL+s4VZ0uIpeKyCoAewHcEN9iExER4KA5hYiIUlfCOjadTBhKVyLSREQ+F5GlIrJERP7iP3+k\niMwQkeUi8omI1E12WRNJRKqIyAIRmeY/zsj7ISJ1ReRdEVnm/x3pmKn3AgBEZKiIfC8i34nImyJS\nI5PvRyQJCeJOJgyluUIAd6pqGwCdAdzm//mHA/hMVVsC+BzAiCSWMRnuAPBDwHGm3o+nAUxX1ZMB\ntAOQhwy9FyLSCMDtANqraltYk28/ZOj9cCJRNXEnE4bSlqpuVtVF/ud7ACwD0AR2D171J3sVQI/k\nlDDxRKQJgEsBvBRwOuPuh4gcDuAcVX0FAFS1UFV/RQbeiwBVARwmItUA1ITNO8nk+xFWooK4kwlD\nGUFEmgE4FcBcAA1KRvGo6mYAxySvZAn3JIC7AAR2ymTi/TgewDYRecXftDRORGohM+8FVHUjgDEA\nfoIF719V9TNk6P1wgpN9EkhEagOYBOAOf428fK9yRvQyi8hlALb4v52EG3aaCfejGoD2AJ5T1faw\n0V3Dkbm/G0fAat1ZABrBauTXIkPvhxOJCuIbADQNOG7iP5cx/F8NJwF4XVWn+k9vKVljRkQaAvg5\nWeVLsC4AuonIjwDeBnC+iLwOYHMG3o/1APJV9Vv/8WRYUM/U342uAH5U1e2qWgTgfQBnIXPvR0SJ\nCuK/TxgSkRqwCUPTEnTtVPFvAD+o6tMB56YBuN7/fACAqeXflI5U9R5VbaqqJ8B+Fz5X1esAfIAM\nux/+JoJ8ETnJf+oCAEuRob8bsGaUTiJyqIgI7H78gMy9HxElbJy4iOTAeuFLJgw9kpALpwAR6QLg\nKwBLYF8DFcA9AL4B8A6A4wCsA9BHVXcmq5zJICLnAvgfVe0mIvWQgfdDRNrBOnirA/gRNlmuKjLw\nXgCAiIyEfbgXAFgI4CYAdZCh9yMSTvYhIvIwdmwSEXkYgzgRkYcxiBMReRiDOBGRhzGIExF5GIM4\nEZGHMYgTEXkYgzgRkYf9Px+XBpify8XiAAAAAElFTkSuQmCC\n",
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXcAAAD8CAYAAACMwORRAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzt3Xd4VFX+x/H3N70SCISWTm9SQxVQQRQboGtB17ooNuz7\ns+2uu7pud3XdlXVFdO26ig0RRQULRZAAUkKAhISSShJISE8mc35/TMCQepNMMkPyfT2Pj7n3nrn3\nZBw/OXPuueeIMQallFIdi4erK6CUUsr5NNyVUqoD0nBXSqkOSMNdKaU6IA13pZTqgDTclVKqA9Jw\nV0qpDkjDXSmlOiANd6WU6oC8rBQSkdnAs4AnsNQY8+dax6OAV4Gu1WUeNsasbOycPXr0MDExMS2p\ns1JKdVpbtmzJNcaENVWuyXAXEU9gMTALSAM2i8hyY8zuGsV+DbxrjHleRIYBK4GYxs4bExNDfHx8\nU5dXSilVg4gctFLOSrfMBCDZGJNijKkA3gHm1ipjgC7VP4cAGVYrqpRSyvmsdMuEA4drbKcBE2uV\n+R3whYjcBQQC5zqldkoppVrESstd6tlXeyrJq4FXjDERwIXA6yJS59wislBE4kUkPicnp/m1VUop\nZYmVlnsaEFljO4K63S4LgNkAxpjvRcQP6AEcqVnIGLMEWAIQFxdXZ67hyspK0tLSKCsrs/wLnC78\n/PyIiIjA29vb1VVRSnUCVsJ9MzBQRGKBdGA+cE2tMoeAmcArIjIU8AOa3TRPS0sjODiYmJgYROr7\nwnB6MsaQl5dHWloasbGxrq6OUqoTaLJbxhhjAxYBq4BEHKNiEkTkCRGZU13sAeAWEdkOvA3caFqw\nCkhZWRndu3fvUMEOICJ07969Q34jUUq5J0vj3KvHrK+ste+xGj/vBs50RoU6WrCf0FF/L6WUe7IU\n7kop1R4qq+x8uC2dtKMlJ/f5envSJ8SP8K7+RHUPoE+IvwtrePrQcK9HdnY29913Hxs3bqRbt274\n+Pjw4IMPEhkZyWuvvcY///nPRl8/ZcoUNmzY0E61Ver0Z4zh052ZPLVqLwfyHMF+4stu7Q7eG6fE\n8NjFw/Dw0G/DjdFwr8UYw7x587jhhht46623ADh48CDLly/n0ksvJS4urslzaLArZY0xhm/25vCP\nr/axPa2Awb2C+e+N4zl7cNjJrsyyyioy8ktJzy/l811ZvLLhALlF5fz9ylH4enlauoYxNPnH4OMf\n03n+m/2MjuzK+JhQJsSGEhka4JTf0xU03GtZs2YNPj4+3HbbbSf3RUdHc9ddd/HNN9/w1FNPsWLF\nCn73u99x6NAhUlJSOHToEPfeey933303AEFBQRQVFbnqV1DK7dnthlUJWTz3dTIJGccJ7+rP3y4f\nyWVjI/CsFcJ+3p70CwuiX1gQUwf0IDI0gD9/tof8kkr+c904gnzrj7H9OUV8tC2dD7el4+vlwcp7\npjX4xyD+wFH+770d9ArxZeXOTN7Z7Hhuc/E1Y7loZB/n/vLtxG3D/fFPEtidcdyp5xzWtwu/vWR4\no2USEhIYO3aspfPt2bOHr7/+msLCQgYPHsztt9+u49iVakKFzc5tb2xhzZ4jxPYI5K+Xj2Te6HB8\nvJp+plJEuO2s/vQI8uWh93cw5okv6F3dH98jyJeSiioKyyrJK6ogJbcYD4GxUd2IP3iM1zYc5Jbp\n/eqcM+1YCbe+voXwbv58dMeZBPt5sTe7kNve2MKbmw5quHdUd955J+vWrcPHx4e//e1vpxy76KKL\n8PX1xdfXl549e5KdnU1ERISLaqqU+7PbDQ+8t501e47wm4uHceOUmDotdSsuHxdBdPcA1uw5Qvqx\nUjLyS0nIOE6gryfBvt4M7BXENROjmDOqLz27+HHjf3/gX2uSuCIugq4BPifPU1xu4+ZX46mosvPi\n9XGEBDgaZ0P7dGHuqL4893UyOYXlhAX7Ou09aC9uG+5NtbDbyvDhw3n//fdPbi9evJjc3Nx6+9p9\nfX/6D+7p6YnNZmuXOirl7owx5BZVUFJhIyo0ABHBGMPvPkngk+0ZPHzBEBZMbd0DfeNjQhkfE2qp\n7CMXDOWCZ7/jX2uS+c3FwwAoqbCx6K2t7Msu5JWbJjCgZ9Apr7l4VF/+uSaZz3Zlcv3kmFbV1RXc\nNtxdZcaMGTz66KM8//zz3H777QCUlJQ08SqlOqeC0krufnsbhWWVgGPSqYKSStLzSym32QEIDfQh\nLrobgb5efLgtnYXT+3HbWf3btZ6DewdzZVwkr31/gOsnR+Pr5cnNr21md8Zxnpx3BtMH1Z0efVCv\nYAb2DGLFjvYN9wqbnSq7wd+n6ZvFjdFwr0VE+Oijj7jvvvv461//SlhYGIGBgfzlL39xddWUcjtf\n7znCt/tymBATiq+3o8+8b4g/5w7rRXhXf3y8PNhy8Bg/pB7l0NES5o+P5JELhrikrvfPGsTHP2bw\n0Ps7SM0tpqjMxtIb4pgxpFeDr7l4ZF/+sXof2cfL6NXFDwBblZ3dmccZGdHVqfWzVdl5b0saz3y5\nj8IyGzdPi2Xh9H4E+7XsPp60YJYAp4iLizO1F+tITExk6NChLqlPe+jov5/qfH753na+Ssxmy69n\nNdl3XlRua3BkS3t55st9PLs6ifCu/iy9IY6hfbo0Wj75SBHnPv0tv71kGDed6ehGevTDnby16RBL\nr4/j3GEN/2Fojm/2HuHJTxNJPlLE2Kiu9Onqz6c7MgkN9OGemQO5fnL0yaGhIrLFGNPkmGxtuSul\nAKiyG+zG4O1pbWllYwzrknI5s38PSzdFXR3sALed1Z9gPy/mjO5Lz2C/JssP6BnEkN7BrNiRyU1n\nxvK/zYd4a9MhPD2EJWtTnBLuWQVlLHg1nujQAP5z7TjOH94LEeHW6fn8+bM9/HZ5AmHBvlx4RvNG\n7egC2UopAO5+extXvvA9dru1b/P7c4rJOl7GmQN6tHHNnMffx5Obp/WzFOwnXDKqL1sOHuOznZn8\n5qMEpg7owYPnD+aH1KPsSMtvdZ1W7Migym546cbxzB7R+2QLfWREV15fMJHwrv68ucnSynqncLtw\nd1U3UVvrqL+X6hgKSipZlZDFtkP5rNiZaek165Ics3pPPY3CvSUuqm4x3/HWVsKCffnn1WO4ZmIU\nQb5eLF2b2urzf/RjOqMiQojtEVjnmKeHcPWESNYn55GaW9ys87pVuPv5+ZGXl9fhgvDEfO5+ftZb\nC0q1p1UJWdjshh5Bvvz9i71UVI90acy65DyiQgOI6n76PqJvRUyPQM4ID8HH04MXrhtHaKAPwX7e\nzB8fyac7M0nPLz1Z9kBuMWuTrC9lkXykiF3px5kzOrzBMlfGReLlIbz9w6Fm1dv1nWA1REREkJaW\nRkdcgu/ESkxKuaNPdmQQFRrA43OGc9Mrm/nf5kNc18jwP1uVnY0peVwyqm/7VdKFnrtmDEXlNob3\nDTm576apsfx3wwFe3XCARy8cymc7M/nle9spqaxi5d3TmrxZC7B8ewYeApc08hRszy5+zBrWi/fi\nD3P/rEGW6+xW4e7t7a0rFSnVzo4WV7Bhfx4Lp/fj7MFhTIgN5dnVyVw2NoLABm6Cbk/Lp6jcxrSB\nHbtL5oTo7nW7TMK7+nPhGX14e9MhbFWGl9enMjqyK6m5xfxxZSKvL5jY6DmNMSz/MZ0p/XvQs0vj\n3+p/PjGaz3ZlsSohy3Kd3apbRil1qhe+3c+S7/a36TU+35VFld1w8cg+iAgPzR5CblE5L69ruD95\nXVIeIjC5X/c2rZu7u3lqLIXlNl5en8rPJ0bxv1sncdeMAaxNyuXbfY33QGxPK+BAXglzRjf97WdK\n/+5Edw/gzY3Wu2YstdxFZDbwLOAJLDXG/LnW8WeAc6o3A4CexhjnjvBXqhEfbUsnJaeI8G7+hHcN\nIMjPyzFN7LFSjpdVcs3EqNNukYcqu2Hx18mUVFQxe3ifFvVt7844zkPv7+DMAT14uIGHhz7dmUFs\nj0CGVXcjjIvuxqxhvXjhuxTmjQmvd9rbdck5jOgbQrdAnzrHOpNRkV15YNYgoroHMLe63/y6ydG8\n+v0B/rQykakDGh4m+vGP6fh4eTB7RO8mr+PhIVwzIYo/fbbHct2aDHcR8QQWA7OANGCziCyvXloP\nAGPMfTXK3wWMsVwDpVqppMLGg+/vaPQm4FeJR/jg9imtfqS7Pe1Iy+d4mWO+on+s3sfTV44+eexo\ncQU/X7qJ1NyfppbuE+LPNROiuCIughB/b17ZcIA/rdyDzW4nMfM4V0+IrNO9kFNYzvf787jznAGn\nLAX5yAVDuPTfG7j6xY3879bJhHf96Q9jUbmNbYfy651hsTO6a+bAU7Z9vTx5aPYQFr21jfe3pnFl\nXGSd11TZDZ9sz2TG4J50sfgE6uXjIvj7F/ss18tKt8wEINkYk2KMqQDeAeY2Uv5qHItkK9Uu1ibl\nUmGz8+ovJrD2wXN4Z+Ekll4fx6d3T2X7Y+fxyk3j2ZN1nEc+2HFajcRam5SLCFwxLoKPtqWTfKQQ\ncPTVPrhsB/uPFPHzidHcMDmG6yfHEBbkyx9WJjLpT6u55Ll1PP7JbqYN7MHKe6bh5Sk8+1VSnWt8\nnpCF3Tges6+pX1gQry+YQEFJJde8uJGsgp8Wd/8hNQ+b3XT4IZCtcdEZfRgd2ZW/f7GXkoq6Ewqu\nT84lt6iceWOs35DuHuTLPecObLpgNSvdMuHA4RrbaUC9dwpEJBqIBdY0cHwhsBAgKirKciWVaszq\nxGyC/byY0r873p4edboRzh7ckwdmDeKpL/YxOrIrN55Z/037CpsdLw9xm+Xb1iY5uj4euXAoK3dm\n8vSX+/j3z8fxxsaDfJWYzW8uHlZnZsXdGcd5feMB1ifn8fic4ScfW79hcgxL1qZw+9n9Gdgr+GT5\nFdszGNAziEG9gmpfnpERXXl1wQSuW7qJa17cyMWj+rI59SjbDh8jwMeTcdHd2vw9OF2JCL+6aChX\n/Od7/rv+AHeeM+CU4y+uTaFHkA9nD+7ZrPPeec4AFlksa6XlXt8nvaHmz3xgmTGmqr6Dxpglxpg4\nY0xcWFjdWdiUai673bBmTw5nDQpr9LH5O84ewKxhvXjy00R+SD1a53hllZ2z/vY1L3yX0pbVtayw\nrJJth/KZNrAHoYE+LJgay8qdWXywNY0nP03krEFh3DQlps7rhvXtwp8uG8l3D57DDVNiTna13HZW\nfwJ9vHj6S8fXemMMb2w8yA8Hjp68kVqfsVHdeOUXE8g6XsZza5IoKrdxzYRo3r5lEn7ep08XlyuM\njwllxpCeLPkuhYLSypP7tx06xtqkXG6Z1q9N30Mr4Z4G1Ow0igAyGig7H+2SUa10+GgJy7ak8dCy\nHZz/zHe8vyWtwbLb0/LJLSrn3KGNz/Hh4SH8/cpRRHTz57GPd9U5vvnAUTILypo11KwtbUw5is1u\nmDbQ0QhaMK0fIf7e3P/udoL9vHnqilHN+obRLdCHX0yN5bNdWcQfOMov39vBrz/axfSBYU3Oqz4+\nJpRNj85k+2/P45O7pvLYJcMYFanjJay4f9YgCkoreWntT42Gf61JpluAN9dOim7Ta1sJ983AQBGJ\nFREfHAG+vHYhERkMdAO+d24VVWfyzg+HmPbXr/nle9v5PCGL0soqHvlgJz8ern8Oj68Ss/H0EM4e\n3PQ3wS5+3lw3OYY9WYUcqPUo95rEI8CJm5iV9b28zeSXVHD46KlrBqxLysHf25Ox0Y4QDfH35o6z\n+yMCT10xskUrA908LZYQf2+uWrKR97emcffMgbx843hLU8oG+3m3eOrZzmxEeAgXntGbl9alcrS4\ngl3pBazZc4QFU2MbfIbAWZoMd2OMDVgErAISgXeNMQki8oSIzKlR9GrgHXM63bFSbiWvqJw/rkxk\nQkwon987jW2/mcXHd55Jzy6+3P7GFnKLyuu8ZnXiEeKiu52ydFpjzh/uaOHXbqGv2XuE0EAf7AZ+\nSKnbbeNs5bYqPt+Vxa2vxzPhD6s59+lvScouPHl8bVIuk/qFnrKg88Lp/dj06Mxm99Oe0MXPmwfO\nG0RXf29euiGO+2cNatESd6p57p81iNLKKv7z7X7+tSaJYD8vrq+nS83ZLD3EZIxZaYwZZIzpb4z5\nQ/W+x4wxy2uU+Z0x5uG2qqjq+J76Yi8lFVX84dIRDOndBQ8PoVugD/+5dhxHiytY9NZWbFU/DXdM\nO1bCnqzCJrtkaoroFsCI8C6nhHtqbjEpOcXcdlY/fLw82LA/z6m/V23GGC5//ntue2MLWw7m8/NJ\njkmo7nv3RypsdtKOlZCSW8zUgad+GxGRZs1mWJ/rJ8cQ/+tzmdmM90y1zoCewcwbHc4rGw6wKiGb\nm6bEWB7+2Br6hKpyCzvS8nln82FunBJzymgOcHy1/dNlZ7Ax5SiPLU+gsjrgV1d3pcwc2ryW7Ozh\nvdl6KJ/s447hfWv2OM5zwYg+xEV3Y8P+3Nb+Oo3alX6cnekF/PK8QWx8ZAa/vWQ4f7zsDHalH+df\na5JYl+S4/vQ2erS/oZunqu3ce+4g7HZDoI8nv2jl2rFWabgrl7PbDY99nED3wIbH8V42NoJbp/fj\nrU2HmPvcehIyCvgqMZt+PQLpF1Z3GF9jTjwR+EV1633NnmwG9gwiMjSAMwf0YE9WYZ0uoKJy5y1+\nviohC08P4ZqJ0XhVj/A5f3hvLh8XweKvk3llwwF6d/Grs2CzOn1FdQ/gd3OG8+SlIyx3IbaWhrty\nuWVb0/jxcD6PXjik0Zt2j1w4lBeuG8eRwnLmPreeDfvzmt1qB8fX5H5hgaxKyKawrJJNKUeZUX2e\nyf0dc6VsTPmpa2Z9ci5jnviCFTsaGiTWPJ8nZDExNpTQWo/u//aSYfQJ8WdPViFTB/bQFnYHc+2k\naC4d034zw2q4d2Kf78pkwSub+dWHO/n3N8l8kZBleRUeZ9l+OJ/HlycQF92NS8c0PKf1CecP781X\n90/nklF9McZw0ciWTTk7e3hvvk/JY8WOTGx2w8zqRZJHhocQ5Ot1st+9ym74/YrdVFYZ/vL5Hkvz\nnDcm+UgRyUeKOH943flETgxx9PIQZjlpbU7VebnVlL+q/VRW2Xnik90UV1QhAvkljuF/v75oKDdP\na585Q5KPFHHjf38gNMiHxT8fa7ml2jXAh2euGs2T80a0eDjZ+cN78+9v9vO3VXsJ8fdmbJRjyKGX\npwcTY0PZkOzo9/5gaxp7sgq5Ki6S/8Uf5q1NB+t9wtVuN3yblMPH29K5ekIUExuYLfHEjdzzhtcf\n3pP7d2frY7MIdoP1RtXpTT9BndRnu7LIKCjjpRvimDm0F8XlNm5/cyvPrk7isrERdboMnC09v5Tr\nXtqEp4cHr/9iIr2amM+6Pq0ZJzwyIoQ+IX5kFpQxZ1Tfk33f4AjY1XuOsD+niL9/sY9RkV3588/O\n4ODRYv61JpnL4yJPLvZcUmHjrU2HeH3jQQ7mlSACX+7O5u2FkxgZUfdBn1UJWYyK7NroDJXtMZJC\ndXzaLeNEa5NyeHLF7nrHY7sTYwxL16bQLyyQc6rHTAf6evGbi4ZSUlHFM19an3muJQrLKrn+pU0U\nldt47RcTiKln7ci2JiInu0Zq99ufWPD5rre2kXW8jF9dOPTkPOd5xRUsrX7acFd6ARf/ax1PfppI\nWJBjbc21D55Dt0AfbvzvZpKPFJ1y3vT8UnakFTC7ni4ZpZxNw91JDuQWc/sbW1m6LpUZT33D6xsP\nUtXO/ddWOVZtL2DB1NhTHmEf2CuY6yZF8+amg+zNKmzkDK3zz9VJpOQWs+S6OIb1bXopsrZy7aQo\nZg7pyYwhp4b74F7BhAb6sDvzOLOG9WJCbCgAY6K6MXt4b178LoXn1iRx2b83UFxu482bJ7Ls9inM\nGdWXiG4BvLFgIh4C17+0iYwa62ueGJ1zfgNdMko5k4Z7C9R+CLessoo73tyKl6fw+oIJjAgP4Tcf\n7WLu4nV8uy/H7aaZfXFtKt0CvPnZ2Lp37u+ZOZBgP29+v2J3q+qdW1TO31btYX3yqWPGk7IL+e/6\nA1wVF3lyZIqrDOgZzEv1PH7v4SFM6d8dTw+ps8DFL88fTGllFU99sY/pg8L47J7pJ1v6J8T0COSV\nmyZQWGZjznPrefG7FIrLbXy+K4tBvYKaPXRTqZYQVwVPXFyciY+Pd8m1W2PFjgwefn8nV42PZNE5\nA+gW6MOvPtzJm5sO8fKNccwY0gtjDCt2ZPKnlYlkFJRxRngIi2YMYNbQXi6fTjYlp4iZT3/LXecM\n4P7zBtdb5r/rU3n8k90svT6Oc5s5aqOkwsbStam88O1+iiuq8PXy4PUFE5kQG4oxhmtf2sTOtAK+\n/uXZdA9q/vwo7SXtWAmH8kqYUs+c5e9udsyAfUVcRKM3gXelF/CnzxJZn5xHtwBvCkorWdTI+66U\nFSKyxRgT12Q5DXfrDh8t4cJn1xLo68WRwjICfb04f3hvlm1J49az+vHIBUNPKV9hs/PB1jSe/3Y/\nB/NKuHpCJH+6bGS71tlWZWdfdhH26v/OL69LZcXOTNY/NKPByacqq+xc+Oxa8ksrWXHXVMs3O7/e\ne4QHl+0gp7Cc2cN7c8v0WP5v2Q5yjpfzzq2TOJRXwu1vbuXxOcO5oR3m1nAXWw4eY/HXyaxLzuXT\nu6bWeQJXqebQcHcyW5WdK174nuQjRay8exolFVX85fM9rNlzhHHR3Xhn4aQG5xO3Vdl59MOdfLgt\nnY2PzGzzFqsxhp3pBXy4LZ1PtmeQW1RxyvGr4iL5y+WN/5HZm1XIvMXrGda3C2/fMgkfr4Z78Ox2\nwz/XJPHs6iSG9O7Ck/NGnFzIISO/lMuf30BFlcHbUwjx92bFXVNPGZ3SWVTZjU7UpVrNarh3qqGQ\nxeU2Xvv+IDdMiSbAp3m/+rOrk9h2KJ9/Xj3m5Eo/L984nt0Zx4kM9W90oQgvTw9umdaPd+PT+GBr\nutPWnrTbDRtT8/hwazpf7M6mtMKxRorBUFll8PH0YObQnpw/vPfJYYMeQoNjsGsa3DuYv14+krve\n3saTn+7mibkj6i1XUFLJfe/+yJo9R7hsbDh/mHfGKeuU9u3qz2sLJnLFfzaQW1TJP64a3SmDHdBg\nV+2qU4X7P77ax4trUwkN9Oaq8daX+duYksdzXydz+bgI5ow69YlIq6M9BvYKZlx0N97efIibp8XW\n21ebVVDGmj1H6Bns22Rf98qdmfx+xW4yC8oI8vXivGG96BXyU/dJdGgAF4zoQ0hAy8dMXzKqLzvS\n8nlxbSojI7py+bi6N2AXvb2VjSl5/H7ucK6dFF3v7zWgZxDv3TaZPVmFlv6wKKVar9OEe/IRxygN\ncMwCaDXcHZNa7SIqNIDH5wxvVR3mj4/k/5btYPOBYyeH1xljeGldKh//mMHO9AIAAn082fKbWQ0u\nwbVsSxoPLtvOiHDH+pqzhvY6pbXsTA/NHsLO9AJ+9eFOJsaGnrI+6ZaDjuXCHr1wCNdNjmn0PAN6\nBjOgp/Y1K9VeOsX3Y2MMv1u+mwAfTy48ozdrk3Ipt9W7zGsd3+w7wr7sIu6ZObDVK6dcNLIPwb5e\nvPPDoZP7/rk6mSc/TcTTQ3hw9mCenDeC4ooqvt2XU+853th4kF++t50zB/TgfwsnM2dU3zYLdnB0\nKT1z1WhE4M+f7Tnl2L/WJBEa6NPmy4UppZrPUriLyGwR2SsiySJS74IcInKliOwWkQQRecu51Wyd\nz3dlsS45lwfOG8zl4yIoqahik8XVdv7zbQp9Q/y4ZFTLJqiqKcDHi7lj+vLpzkwKSipZuTOTZ77a\nx2Vjw/nwjinccfYArhofSdcAb1buzKzz+lfWp/Lrj3Yxc0hPXrw+rk1DvaY+If7cdlZ/Pt2ZeXJx\n6R1p+XyzN4cFU2Obff9CKdX2mgx3EfEEFgMXAMOAq0VkWK0yA4FHgDONMcOBe9ugri1SWlHFk58m\nMqR3MD+fGMWU/j3w8/Y4uUBDY7YdOsYPqUf5xdTYRm+YNsf88VGU2+z8+fNE7n/3R8ZGdeWPl55x\nsq/a29OD84f1ZnXiEcoqf/p2kVlQyh9WJjJzSE+ev3Zcu688f+v0/vQJ8eOJFQnY7YZ/rUmmi58X\n10/WVrtS7shKYk0Ako0xKcaYCuAdYG6tMrcAi40xxwCMMU0nZzt54bv9pOeX8sTcEXh5euDn7cmZ\n/Xuwek92k09gLvkuhS5+XsyfYP3ma1NGhIdwRngIb/9wmNAAH164Lq5OUF84sg9F5TbWJv30dOeL\n36ViDDw+d3ijwxLbir+PJw9fMIRd6cf5w8pEvtydzU1nxuqiyUq5KSspEQ4crrGdVr2vpkHAIBFZ\nLyIbRWS2syrYGoVllby0LpXzh/80PwjAjKE9OXy0lP05RQ2+NjW3mM8TsrhucvTJGQCd5daz+tEj\nyJcXb4ir90GiKf27E+L/U9fM0eIK3v7hEHNGO+YucZU5o/oyJqorL61LJcjXi1/UM/WtUso9WAn3\n+gbn1m7yegEDgbOBq4GlIlJnvlMRWSgi8SISn5NT/w1DZ3pz0yEKy2zcec6AU/afmCjqxBqc9Xlx\nbQreHh5t8iTlxSP78sOjMxneN6Te496eHpw3rBdf7c6m3FbFK+tTKa2s4vaz+ju9Ls0hIjx28TBE\n4IYp0a0aZqmUaltWwj0NiKyxHQHUXm8sDfjYGFNpjEkF9uII+1MYY5YYY+KMMXFhYWG1DztVWWUV\nS9emMm1gjzrzavcJ8Wdony6sbqDffV92Icu2pPGzceGtXm2+IU3NMXPhyD4UVk829cqGA5w/vJdb\nPLY+Jqobq+6dzr3nDnJ1VZRSjbAS7puBgSISKyI+wHxgea0yHwHnAIhIDxzdNCnOrGhzvbcljdyi\ncm4/u/7W7swhPdly8BgF1SsQnXC0uIKbX42ni58398x0XYCd2b8HXfy8+PWHuzheZuOOswc0/aJ2\nMqhXsNNuMCul2kaT/4caY2zAImAVkAi8a4xJEJEnRGROdbFVQJ6I7Aa+Bv7PGJNX/xnbnq3KzpLv\n9jMmqiuTG3gicsbQnlRVL412QoXNzu1vbCHreBkvXj+O3iFt02q3wsfLg/OG96aw3MaZA7ozKrLu\nqj5KKdXUcarSAAARxklEQVQQS3cKjTErgZW19j1W42cD3F/9j8ut2JHJ4aOlPHbx8AanZB0V0ZUe\nQb786oOdrE/K5dKx4Xz8YwabUo/yj6tGMyaqWzvXuq7LxoTzwdY07ppRp4dLKaUa1eFmhbTbDbOf\n/Q6Az++Z3mjfdkJGAS+tS+XzXVmUVE+6dcfZ/Xlw9pAGX9PeCkoq9calUuqkTjsr5InpAp6+clST\nNy2H9w3h6StH8+Q8G1/uziaroIxbpjlnxkZn0WBXSrVEhwv3lkwXEODjxdzRtYfuK6XU6atDDXnY\nWj1dwIJp/XQ0h1KqU+tQCbjk2xRC/L2ZPz6y6cJKKdWBdZhwT8kpYtXuLK6bFN3qqXmVUup012HC\nfem6VLw922a6AKWUOt10iHDPKSxn2ZY0Lh8XUe9EXEop1dmc9uFeUmHjzje3YrcbtxvGqJRSrnJa\nh3tZZRU3vxpP/MGj/GP+aGJ7BLq6Skop5RZO2zuPZZVV3PJaPN+n5PHMlaO5eGTrl8FTSqmO4rRs\nuVdW2Vn01lbWJuXyl5+NZN4YfQBJKaVqOu3C3W43PLRsB18lHuH380ZwZZyOaVdKqdpOq3A3xvDk\np4l8sC2dB2YN4rpJujizUkrV57QK939/s5+X16dy05kxLJrhPotXKKWUuzltwv1gXjF/W7WXuaP7\n8puLhjU4T7tSSqnTKNwTMo4DcMu0fk1O5auUUp2dpXAXkdkisldEkkXk4XqO3ygiOSLyY/U/Nzu7\novuyCxGB/mFBzj61Ukp1OE2OcxcRT2AxMAtIAzaLyHJjzO5aRf9njFnUBnUEICm7iKjQAPx9PNvq\nEkop1WFYablPAJKNMSnGmArgHWBu21arrn3ZhQzsGdzel1VKqdOSlXAPBw7X2E6r3lfbz0Rkh4gs\nE5F6B5+LyEIRiReR+JycHMuVrLDZSc0tZnBv7ZJRSikrrIR7fXcva6+q/QkQY4wZCXwFvFrfiYwx\nS4wxccaYuLCwMMuVTM0txmY3DOqlLXellLLCSrinATVb4hFARs0Cxpg8Y0x59eaLwDjnVM9hX3Yh\ngHbLKKWURVbCfTMwUERiRcQHmA8sr1lARPrU2JwDJDqvio5w9/QQ+oXprI9KKWVFk6NljDE2EVkE\nrAI8gZeNMQki8gQQb4xZDtwtInMAG3AUuNGZldyXXUh09wD8vHWkjFJKWWFpyl9jzEpgZa19j9X4\n+RHgEedW7SdJ2UXa366UUs3g9k+ollVWcSCvmEG9dKSMUkpZ5fbhvj+nCLuBQb215a6UUla5fbgn\nZRcBaLeMUko1g9uH+77sQrw8hJjuOlJGKaWsOi3CvV9YID5ebl9VpZRyG26fmPuyixioXTJKKdUs\nbh3uJRU2Dh8rYZA+maqUUs3i1uGefKQIY9BhkEop1UxuHe77ToyU0WGQSinVLG4d7htT8vDz9iA6\nNMDVVVFKqdOK24Z72rESPtqWzlVxkXh5um01lVLKLbltav7n2/2IwK1n9Xd1VZRS6rTjluGeVVDG\nu5vTuHxcJH27+ru6Okopddpxy3B/4bv9VBnD7dpqV0qpFnG7cM8pLOetTYeYNzqcqO56I1UppVrC\n7cJ96boUKqvs3HmOttqVUqqlLIW7iMwWkb0ikiwiDzdS7nIRMSIS19IKvb8ljfOH96ZfmD64pJRS\nLdVkuIuIJ7AYuAAYBlwtIsPqKRcM3A1samllSiuqyC2qYER4SEtPoZRSCmst9wlAsjEmxRhTAbwD\nzK2n3O+BvwJlLa1MZkEpAH1C/Fp6CqWUUlgL93DgcI3ttOp9J4nIGCDSGLOiNZXJLHD8XegTosMf\nlVKqNayEu9Szz5w8KOIBPAM80OSJRBaKSLyIxOfk5NQ5/lO4a8tdKaVaw0q4pwGRNbYjgIwa28HA\nCOAbETkATAKW13dT1RizxBgTZ4yJCwsLq3OhzHxHt0xvDXellGoVK+G+GRgoIrEi4gPMB5afOGiM\nKTDG9DDGxBhjYoCNwBxjTHxzK5NRUEb3QB/8vD2b+1KllFI1NBnuxhgbsAhYBSQC7xpjEkTkCRGZ\n48zKZBaUaqtdKaWcwMtKIWPMSmBlrX2PNVD27JZWJqugjIhu+lSqUkq1lls9oZqRX0rfrtpyV0qp\n1nKbcC8ut3G8zKbdMkop5QRuE+4nhkH21THuSinVam4U7vp0qlJKOYv7hHt+dctdF+dQSqlWc59w\nr+6W6dnF18U1UUqp058bhXspPYJ88fXSB5iUUqq13CbcMwrKdBikUko5iduEe1ZBKb27aLgrpZQz\nuE24Z+aX6c1UpZRyErcI98KySgrLbToMUimlnMQtwv3ESBl9OlUppZzDrcJdu2WUUso53CPc8/Xp\nVKWUcia3CPeMgjJEoJeOllFKKadwi3DPKiglLMgXb0+3qI5SSp323CJNMwvK6KP97Uop5TSWwl1E\nZovIXhFJFpGH6zl+m4jsFJEfRWSdiAxrTiUy8kvpq/3tSinlNE2Gu4h4AouBC4BhwNX1hPdbxpgz\njDGjgb8CT1utgDGGzIIyHQaplFJOZKXlPgFINsakGGMqgHeAuTULGGOO19gMBIzVChwvs1FSUaWL\ndCillBNZWSA7HDhcYzsNmFi7kIjcCdwP+AAz6juRiCwEFgJERUUBNRbp0EnDlFLKaay03KWefXVa\n5saYxcaY/sBDwK/rO5ExZokxJs4YExcWFgZA1omnU3UYpFJKOY2VcE8DImtsRwAZjZR/B5hntQJF\n5TYAuvh7W32JUkqpJlgJ983AQBGJFREfYD6wvGYBERlYY/MiIMlqBUrKqwAI8NFFOpRSylma7HM3\nxthEZBGwCvAEXjbGJIjIE0C8MWY5sEhEzgUqgWPADVYrUFLhaLkH+Fjp/ldKKWWFpUQ1xqwEVtba\n91iNn+9paQWKK7TlrpRSzubyJ1RLK6rwEPD1cnlVlFKqw3B5ohZX2Aj08UKkvkE5SimlWsLl4V5a\nUYW/dskopZRTuTzciyuqCPTVm6lKKeVMLg/30gob/t7acldKKWdyebgXl1cR6KvhrpRSzuTycC+p\nrMJfx7grpZRTuT7cy20E6g1VpZRyKteHu46WUUopp3ODcHeMc1dKKeU8bhDuVTr1gFJKOZlLw73K\nbii32XXSMKWUcjKXhvtPM0Jqy10ppZzJxeFePSOkjnNXSimnco9w15a7Uko5lUvDvbhcF+pQSqm2\nYCncRWS2iOwVkWQRebie4/eLyG4R2SEiq0Uk2sp5Syu15a6UUm2hyXAXEU9gMXABMAy4WkSG1Sq2\nDYgzxowElgF/tXJxbbkrpVTbsNJynwAkG2NSjDEVwDvA3JoFjDFfG2NKqjc3AhFWLl6qfe5KKdUm\nrIR7OHC4xnZa9b6GLAA+s3LxE+un6hOqSinlXFZStb7170y9BUWuBeKAsxo4vhBYCBAVFUVp9Th3\nnVtGKaWcy0rLPQ2IrLEdAWTULiQi5wK/AuYYY8rrO5ExZokxJs4YExcWFvZTy13HuSullFNZCffN\nwEARiRURH2A+sLxmAREZA7yAI9iPWL34iXHufl4a7kop5UxNhrsxxgYsAlYBicC7xpgEEXlCROZU\nF/sbEAS8JyI/isjyBk53ipJyGwE+nnh41Nfzo5RSqqUs3ck0xqwEVtba91iNn89tycVLKnVGSKWU\naguunX6g3KZj3JVSqg24fG4ZbbkrpZTzabgrpVQH5PL53LVbRimlnE9b7kop1QFpuCulVAfk+m4Z\nX+2WUUopZ3N9y91bW+5KKeVsrg93bbkrpZTTuSzc7cYxsaT2uSullPO5MNwd/w7UcFdKKadzXbhX\np7u/jnNXSimnc3m3jLbclVLK+VzeLaOrMCmllPO5vuWuo2WUUsrpXN/nruPclVLK6bTlrpRSHZCl\ncBeR2SKyV0SSReTheo5PF5GtImITkcutnPNEn7uOc1dKKedrMtxFxBNYDFwADAOuFpFhtYodAm4E\n3rJ6YX2ISSml2o6VPpEJQLIxJgVARN4B5gK7TxQwxhyoPma3emG73fGXRedzV0op57PSLRMOHK6x\nnVa9r9lEZKGIxItIfFFJMb5eHnh6SEtOpZRSqhFWwr2+9DUtuZgxZokxJs4YE+fn569dMkop1Uas\nhHsaEFljOwLIaO2F7Ua7ZJRSqq1YCffNwEARiRURH2A+sLy1F7Yboy13pZRqI02GuzHGBiwCVgGJ\nwLvGmAQReUJE5gCIyHgRSQOuAF4QkYSmzmu3G53LXSml2oildDXGrARW1tr3WI2fN+PorrHMboyu\nwqSUUm3EZU+oVhkI9NVwV0qptuDS6Qd0LnellGobLp04TOdyV0qptuHS+dx1LnellGobLu2WCdRu\nGaWUahMuC3fQlrtSSrUVl4a79rkrpVTbcGm46/QDSinVNlwb7jrOXSml2oSLW+4a7kop1Ra0W0Yp\npTogbbkrpVQHpC13pZTqgLTlrpRSHZCLx7lry10ppdqCPqGqlFIdkKVwF5HZIrJXRJJF5OF6jvuK\nyP+qj28SkZgmzwn4eLn0b4tSSnVYTaariHgCi4ELgGHA1SIyrFaxBcAxY8wA4BngL01eWKT5tVVK\nKWWJlabzBCDZGJNijKkA3gHm1iozF3i1+udlwEyRxtPbw0PDXSml2oqVcA8HDtfYTqveV2+Z6gW1\nC4DujV5Ys10ppdqMlXCvL4ZNC8ogIgtFJF5E4r1MpZX6KaWUagEr4Z4GRNbYjgAyGiojIl5ACHC0\n9omMMUuMMXHGmLh+vUNbVmOllFJNshLum4GBIhIrIj7AfGB5rTLLgRuqf74cWGOMqdNyV0op1T6a\nfIrIGGMTkUXAKsATeNkYkyAiTwDxxpjlwEvA6yKSjKPFPr8tK62UUqpxlh4RNcasBFbW2vdYjZ/L\ngCucWzWllFItpU8RKaVUB6ThrpRSHZCGu1JKdUAa7kop1QFpuCulVAckrhqOLiKFwF6XXNw99QBy\nXV0JN6Hvxan0/ThVZ38/oo0xYU0VcuVqGXuNMXEuvL5bEZF4fT8c9L04lb4fp9L3wxrtllFKqQ5I\nw10ppTogV4b7Ehde2x3p+/ETfS9Ope/HqfT9sMBlN1SVUkq1He2WUUqpDsgl4d7UgtsdmYhEisjX\nIpIoIgkick/1/lAR+VJEkqr/3c3VdW1PIuIpIttEZEX1dmz1YutJ1Yuv+7i6ju1BRLqKyDIR2VP9\nGZncmT8bInJf9f8nu0TkbRHx66yfjeZq93C3uOB2R2YDHjDGDAUmAXdW//4PA6uNMQOB1dXbnck9\nQGKN7b8Az1S/H8dwLMLeGTwLfG6MGQKMwvGedMrPhoiEA3cDccaYETimHJ9P5/1sNIsrWu5WFtzu\nsIwxmcaYrdU/F+L4nzecUxcZfxWY55oatj8RiQAuApZWbwswA8di69BJ3g8R6QJMx7E+AsaYCmNM\nPp34s4HjWRz/6hXeAoBMOuFnoyVcEe5WFtzuFEQkBhgDbAJ6GWMywfEHAOjpupq1u38ADwL26u3u\nQH71YuvQeT4j/YAc4L/VXVRLRSSQTvrZMMakA08Bh3CEegGwhc752Wg2V4S7pcW0OzoRCQLeB+41\nxhx3dX1cRUQuBo4YY7bU3F1P0c7wGfECxgLPG2PGAMV0ki6Y+lTfW5gLxAJ9gUAc3bm1dYbPRrO5\nItytLLjdoYmIN45gf9MY80H17mwR6VN9vA9wxFX1a2dnAnNE5ACOLroZOFryXau/ikPn+YykAWnG\nmE3V28twhH1n/WycC6QaY3KMMZXAB8AUOudno9lcEe5WFtzusKr7k18CEo0xT9c4VHOR8RuAj9u7\nbq5gjHnEGBNhjInB8VlYY4z5OfA1jsXWoZO8H8aYLOCwiAyu3jUT2E0n/Wzg6I6ZJCIB1f/fnHg/\nOt1noyVc8hCTiFyIo3V2YsHtP7R7JVxERKYCa4Gd/NTH/CiOfvd3gSgcH+orjDFHXVJJFxGRs4Ff\nGmMuFpF+OFryocA24FpjTLkr69ceRGQ0jhvLPkAKcBOORlin/GyIyOPAVThGmW0DbsbRx97pPhvN\npU+oKqVUB6RPqCqlVAek4a6UUh2QhrtSSnVAGu5KKdUBabgrpVQHpOGulFIdkIa7Ukp1QBruSinV\nAf0/IYYG4Jtr1ywAAAAASUVORK5CYII=\n",
"text/plain": [
- "<matplotlib.figure.Figure at 0x10fa6c400>"
+ "<matplotlib.figure.Figure at 0x11934cdd8>"
]
},
"metadata": {},
@@ -797,14 +783,25 @@
{
"cell_type": "code",
"execution_count": 17,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
+ "<style>\n",
+ " .dataframe thead tr:only-child th {\n",
+ " text-align: right;\n",
+ " }\n",
+ "\n",
+ " .dataframe thead th {\n",
+ " text-align: left;\n",
+ " }\n",
+ "\n",
+ " .dataframe tbody tr th {\n",
+ " vertical-align: top;\n",
+ " }\n",
+ "</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
@@ -874,14 +871,12 @@
{
"cell_type": "code",
"execution_count": 18,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [
{
"data": {
"text/plain": [
- "<matplotlib.axes._subplots.AxesSubplot at 0x10fa5a978>"
+ "<matplotlib.axes._subplots.AxesSubplot at 0x11af58c18>"
]
},
"execution_count": 18,
@@ -890,9 +885,9 @@
},
{
"data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXEAAAEACAYAAABF+UbAAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAEgRJREFUeJzt3X+M5HV9x/HnG49eRMJy/uDOSLy1bdDWxByYoA02bqMC\n/SWmTaw/knpqbP/QYCRpRP650PqH5x/UprZ/qJQFIxUlR9HGIBhuqmhEFA5RT6TYU6jc+QOEXggN\nwrt/zPd0pXsz39nduc+8b56PZDLz+e53dl7Zu33vd1/zndnITCRJNZ3QOoAkae0c4pJUmENckgpz\niEtSYQ5xSSrMIS5JhY0d4hGxOSJujYg7IuKuiNjVbd8SETdGxN0R8fmIWJh+XEnSStHnPPGIOCkz\nH42IpwFfBi4E/hz4WWZ+MCLeC2zJzIunG1eStFKvOiUzH+1ubgY2AQlcAFzZbb8SeN2Gp5MkjdRr\niEfECRFxB3AQuCkzbwO2ZuYhgMw8CJw2vZiSpNX0PRJ/MjPPBE4Hzo6IFzM8Gv+13TY6nCRptE2T\n7JyZj0TEADgfOBQRWzPzUERsA3682n0iwuEuSWuQmTFunz5npzz7yJknEfF04DXAfuAzwM5ut7cA\n148IUvaya9eu5hnmNX/l7OZvf6mev68+R+LPBa6MiBMYDv1rMvNzEfFV4FMR8TbgB8Drez9qIQcO\nHGgdYV0q56+cHczfWvX8fY0d4pl5F3DWKtsfBF49jVCSpH58xeYYO3fubB1hXSrnr5wdzN9a9fx9\n9Xqxz7oeICKn/RiSdLyJCHIjnticd4PBoHWEdamcv3J2MH9r1fP35RCXpMKsUyRpBlmnSNIccIiP\nUb1Xq5y/cnYwf2vV8/flEJekwuzEJWkG2YlL0hxwiI9RvVernL9ydjB/a9Xz9+UQl6TC7MQlaQbZ\niUvSHHCIj1G9V6ucv3J2MH9r1fP35RCXpMLsxCVpBtmJS9IccIiPUb1Xq5y/cnYwf2vV8/flEJek\nwuzEJWkG2YlL0hxwiI9RvVernL9ydjB/a9Xz9+UQl6TCjkknfs8990z1MablxBNPZPv27a1jSJpD\nfTvxYzLETz75t6b6GNPy2GM/4ktfupmXv/zlraNImjN9h/imYxHm8OH/PBYPs+EWFs7ji1/8Yukh\nPhgMWFpaah1jTSpnB/O3Vj1/X3biklTY2DolIk4HrgK2Ak8CH8nMf4yIXcA7gB93u16SmTescv+E\nmueJLyycxzXXXMR5553XOoqkObORdcovgIsyc19EnAx8IyJu6j52WWZetp6gkqS1G1unZObBzNzX\n3T4M7Aee13147E+J6u68887WEdal8rmylbOD+Vurnr+viTrxiFgEdgC3dpveFRH7IuJjEbGwwdkk\nSWP0PsWwq1IGwN9l5vUR8Rzgp5mZEfF+4LmZ+fZV7mcnLkkT2tBTDCNiE3At8PHMvB4gM3+yYpeP\nAp89+mfYCSx2t09leDC/1K0H3fVsru+88042b978y1OVjvyK5tq1a9cbuR4MBiwvLwOwuLhIb5k5\n9sLw7JTLnrJt24rb7wGuPsp9E7LkZWHh3Ny9e3dWtnfv3tYR1qxy9kzzt1Y9/3A8j5/PY4/EI+Ic\n4M3AXRFxx3AocwnwpojYwfC0wwPAX/f/0SFJ2gjH5GX3duKSNBnfT1yS5oBDfAzPE2+ncnYwf2vV\n8/flEJekwuzER7ATl9SKnbgkzQGH+Bh24u1Uzg7mb616/r4c4pJUmJ34CHbiklqxE5ekOeAQH8NO\nvJ3K2cH8rVXP35dDXJIKsxMfwU5cUit24pI0BxziY9iJt1M5O5i/ter5+3KIS1JhduIj2IlLasVO\nXJLmgEN8DDvxdipnB/O3Vj1/Xw5xSSrMTnwEO3FJrdiJS9IccIiPYSfeTuXsYP7WqufvyyEuSYXZ\niY9gJy6pFTtxSZoDDvEx7MTbqZwdzN9a9fx9OcQlqTA78RHsxCW1YicuSXNg7BCPiNMj4uaI+HZE\n3BURF3bbt0TEjRFxd0R8PiIWph/32LMTb6dydjB/a9Xz99XnSPwXwEWZ+WLg94B3RsSLgIuBL2Tm\nC4GbgfdNL6YkaTUTd+IR8W/Ah7vLKzPzUERsAwaZ+aJV9rcTl6QJTaUTj4hFYAfwVWBrZh4CyMyD\nwGmTx5QkrcemvjtGxMnAtcC7M/Pw8Aj714w43N4JLHa3T2X4c2CpWw+669lc79mzh82bN7O0NFwf\n6dmqrD/0oQ+xY8eOmckzyXplpzkLecw/W/mOt/yDwYDl5WUAFhcX6S0zx14YDvsbGA7wI9v2Mzwa\nB9gG7D/KfROy5GVh4dzcvXt3VrZ3797WEdascvZM87dWPf9wPI+fz7068Yi4CvhpZl60Yttu4MHM\n3B0R7wW2ZObFq9zXTlySJtS3Ex9bp0TEOcCbgbsi4g6GE/kSYDfwqYh4G/AD4PXriyxJmtTYJzYz\n88uZ+bTM3JGZZ2bmWZl5Q2Y+mJmvzswXZua5mfnzYxH4WPM88XYqZwfzt1Y9f1++YlOSCvO9U0aw\nE5fUiu+dIklzwCE+hp14O5Wzg/lbq56/L4e4JBVmJz6CnbikVuzEJWkOOMTHsBNvp3J2MH9r1fP3\n5RCXpMLsxEewE5fUip24JM0Bh/gYduLtVM4O5m+tev6+HOKSVJid+Ah24pJasROXpDngEB/DTryd\nytnB/K1Vz9+XQ1ySCrMTH8FOXFIrduKSNAcc4mPYibdTOTuYv7Xq+ftyiEtSYXbiI9iJS2rFTlyS\n5oBDfAw78XYqZwfzt1Y9f18OcUkqzE58BDtxSa3YiUvSHHCIj2En3k7l7GD+1qrn72vsEI+IyyPi\nUER8c8W2XRFxf0Tc3l3On25MSdJqxnbiEfEK4DBwVWa+pNu2C/ifzLxs7APYiUvSxDasE8/MW4CH\nVnuMtQSTJG2c9XTi74qIfRHxsYhY2LBEM8ZOvJ3K2cH8rVXP39emNd7vn4G/zcyMiPcDlwFvP/ru\nO4HF7vapwA5gqVsPuuvZXN97770MBgOWlobrI/8xqqz37ds3U3lcu3a9+nowGLC8vAzA4uIiffU6\nTzwitgOfPdKJ9/1Y93E7cUma0EafJx6s6MAjYtuKj/0Z8K3J4kmSNkKfUwyvBr4CnBERP4yItwIf\njIhvRsQ+4JXAe6acsxk78XYqZwfzt1Y9f19jO/HMfNMqm6+YQhZJ0oR875QR7MQlteJ7p0jSHHCI\nj2En3k7l7GD+1qrn78shLkmF2YmPYCcuqRU7cUmaAw7xMezE26mcHczfWvX8fTnEJakwO/ER7MQl\ntWInLklzwCE+hp14O5Wzg/lbq56/L4e4JBVmJz6CnbikVuzEJWkOOMTHsBNvp3J2MH9r1fP35RCX\npMLsxEewE5fUip24JM0Bh/gYduLtVM4O5m+tev6+HOKSVJid+Ah24pJasROXpDngEB/DTrydytnB\n/K1Vz9+XQ1ySCrMTH8FOXFIrduKSNAcc4mPYibdTOTuYv7Xq+ftyiEtSYWM78Yi4HPgT4FBmvqTb\ntgW4BtgOHABen5kPH+X+duKSNKGN7MSvAJ46xS4GvpCZLwRuBt43eURJ0nqNHeKZeQvw0FM2XwBc\n2d2+EnjdBueaGXbi7VTODuZvrXr+vtbaiZ+WmYcAMvMgcNrGRZIk9dXrPPGI2A58dkUn/mBmPnPF\nx3+Wmc86yn0T3gIsdltOBXYAS9160F3P3nph4TyeeOLrHD78IFVt2bKVPXs+ydLSEvCroxPXrl3P\n1nowGLC8vAzA4uIil156aa9OfK1DfD+wlJmHImIbsDczf+co9y39xObDD99I1fxDwbRf0CVp4230\ni32iuxzxGWBnd/stwPUTpZN6qN5pmr+t6vn7GjvEI+Jq4CvAGRHxw4h4K/AB4DURcTfwqm4tSTrG\nfO+UEaxTJLXie6dI0hxwiGtmVe80zd9W9fx9OcQlqTA78RHsxCW1YicuSXPAIa6ZVb3TNH9b1fP3\n5RCXpMLsxEewE5fUip24JM0Bh7hmVvVO0/xtVc/fl0NckgqzEx/BTlxSK3bikjQHHOKaWdU7TfO3\nVT1/Xw5xSSrMTnwEO3FJrdiJS9IccIhrZlXvNM3fVvX8fTnEJakwO/ER7MQltWInLklzwCGumVW9\n0zR/W9Xz9+UQl6TC7MRHsBOX1IqduCTNAYf4ce9EIqLk5ZnP3Nb6i7cu1TtZ89ewqXUATdvjVK2D\nHnpo7G+S0tyzEx/heOnE6+a3z9f86tuJr+tIPCIOAA8DTwKPZ+bZ6/l8kqTJrLcTfxJYyswzHeDS\nr6veyZq/hvUO8diAzyFJWqN1deIR8X3g58ATwEcy86Or7GMn3pSduFTRMenEgXMy84GIeA5wU0Ts\nz8xb1vk5JUk9rWuIZ+YD3fVPIuI64GxglSG+E1jsbp8K7ACWuvWgu57V9ZFts5Jn0vWRbbOSZ5L1\n8Bz3qrZs2cqePZ8EYGlpCfhVT1thvbJTnoU8x3v+wWDA8vIyAIuLi/S15jolIk4CTsjMwxHxDOBG\n4NLMvPEp+1mnNFW7TqmbHarXQYPB4JfDpqLq+fvWKesZ4i8ArmP4XbYJ+ERmfmCV/RziTVUehJWz\nQ/Uhrram3oln5n8x7EUkSY14eqCkVVU/z7p6/r4c4pJUmO+dMoKdeGuVs4OduNbD9xOXpDngEJe0\nquqdcvX8fTnEJakwO/ER7MRbq5wd7MS1HnbikjQHHOKSVlW9U66evy+HuCQVZic+gp14a5Wzg524\n1sNOXJLmgENc0qqqd8rV8/flEJekwuzER7ATb61ydrAT13rYiUvSHHCIS1pV9U65ev6+HOKSVNi6\n/tq9pFE2EzG20pxZW7du5+DBA61jrFnlP5I8CYe4NDX/S+UnZg8dqvsDaJ5Yp0g6LtmJS5JmnkNc\n0nFpXjpxh7gkFeYQl3RcmpdO3LNTJB3FiaVPkdyyZSsPPniwdYypc4hLOorHqXyK5EMP1f0BNAnr\nFEkqbF1DPCLOj4jvRsT3IuK9GxVKktTPmod4RJwAfBg4D3gx8MaIeNFGBZMkjbeeI/GzgXsy8weZ\n+TjwSeCCjYklSepjPUP8ecB9K9b3d9skScfIMTk75ZRT/vRYPMyGe+yx21tHkKSR1jPE/xt4/or1\n6d22/+eRR/59HQ8zC6qfqlQ5f+XsYP62Kp/n3tea/8ZmRDwNuBt4FfAA8DXgjZm5f+PiSZJGWfOR\neGY+ERHvAm5k2K1f7gCXpGNr6n/tXpI0PVN7xWb1FwJFxOURcSgivtk6y6Qi4vSIuDkivh0Rd0XE\nha0zTSIiNkfErRFxR5d/V+tMaxERJ0TE7RHxmdZZJhURByLizu7f4Gut80wiIhYi4tMRsb/7HnhZ\n60x9RcQZ3df89u764XHfv1M5Eu9eCPQ9hn35j4DbgDdk5nc3/MGmJCJeARwGrsrMl7TOM4mI2AZs\ny8x9EXEy8A3ggmJf/5My89HuuZcvAxdmZrVh8h7gpcApmfna1nkmERHfB16amQ+1zjKpiFgG/iMz\nr4iITcBJmflI41gT6+bo/cDLMvO+o+03rSPx8i8EysxbgHL/gQEy82Bm7utuHwb2U+wc/sx8tLu5\nmeFzN6V6v4g4Hfgj4GOts6xRUPC9lSLiFOD3M/MKgMz8RcUB3nk1cO+oAQ7T+0fyhUAzIiIWgR3A\nrW2TTKarIu4ADgI3ZeZtrTNN6O+Bv6HYD58VErgpIm6LiHe0DjOBFwA/jYgrukriIxHx9Nah1ugv\ngH8dt1O5n7Tqr6tSrgXe3R2Rl5GZT2bmmQxff/CyiPjd1pn6iog/Bg51vw0FNU+2Piczz2L428Q7\nu3qxgk3AWcA/dfkfBS5uG2lyEXEi8Frg0+P2ndYQ7/1CIE1H1wVeC3w8M69vnWetul+F9wLnt84y\ngXOA13a98r8CfxARVzXONJHMfKC7/glwHcOKtIL7gfsy8+vd+lqGQ72aPwS+0X39R5rWEL8N+O2I\n2B4RvwG8ASj3DD11j6IA/gX4Tmb+Q+sgk4qIZ0fEQnf76cBrgDJPymbmJZn5/Mz8TYb/92/OzL9s\nnauviDip+y2OiHgGcC7wrbap+snMQ8B9EXFGt+lVwHcaRlqrN9KjSoEpvXfK8fBCoIi4GlgCnhUR\nPwR2HXmyZNZFxDnAm4G7ul45gUsy84a2yXp7LnBl9+z8CcA1mfm5xpnmyVbguohIhjPiE5l5Y+NM\nk7gQ+ERXSXwfeGvjPBOJiJMYPqn5V73298U+klSXT2xKUmEOcUkqzCEuSYU5xCWpMIe4JBXmEJek\nwhziklSYQ1ySCvs/3hkyICCoKsoAAAAASUVORK5CYII=\n",
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXQAAAD8CAYAAABn919SAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAADXxJREFUeJzt3W+IXHe9x/HPx6RydUfTSHSIaXF7oaglxdYMUi3IrFWJ\n9nKrDwSLt9Q/sD6wWqUgsU8URIhgqhJEXG1NwdjlUispaamW2rEIl3J3a3BTV6nUtSaNWUPq2i0B\njX59sCftTrvrzJ6ZyZn97vsFYWfOnJn58iN5c3Iyc+KIEABg/XtZ1QMAAPqDoANAEgQdAJIg6ACQ\nBEEHgCQIOgAkQdABIAmCDgBJEHQASGLz+Xyzbdu2xejoaKnnPvfccxoZGenvQOsY6/EC1qId69Eu\nw3pMT0+fiojXdtrvvAZ9dHRUU1NTpZ7barXUbDb7O9A6xnq8gLVox3q0y7Aetv/QzX6ccgGAJAg6\nACRB0AEgCYIOAEkQdABIgqADQBIEHQCSIOgAkARBB4Akzus3RXsxc3xBH91zX9VjaG7vtVWPAAAr\n4ggdAJIg6ACQBEEHgCQIOgAkQdABIAmCDgBJEHQASIKgA0ASBB0AkiDoAJBEx6Dbvtj2w7ZnbT9u\n++Zi+5dsH7d9pPj1/sGPCwBYTTfXcjkr6ZaIeMz2qyRN236weOzrEfG1wY0HAOhWx6BHxAlJJ4rb\nz9qelbRj0IMBANZmTefQbY9KulLSo8Wmm2z/yvYdtrf2eTYAwBo4Irrb0a5J+rmkr0TEPbbrkk5J\nCklflrQ9Ij6+wvPGJY1LUr1e3zU5OVlq0PnTCzp5ptRT++ryHVuqHkGStLi4qFqtVvUYQ4G1aMd6\ntMuwHmNjY9MR0ei0X1dBt32BpMOSfhIRt63w+KikwxGx89+9TqPRiKmpqY7vt5L9Bw9p30z1l28f\nluuht1otNZvNqscYCqxFO9ajXYb1sN1V0Lv5lIsl3S5pdnnMbW9fttsHJR0tMygAoD+6OeS9WtIN\nkmZsHym23SrpettXaOmUy5ykTw5kQgBAV7r5lMsvJHmFh+7v/zgAgLL4pigAJEHQASAJgg4ASRB0\nAEiCoANAEgQdAJIg6ACQBEEHgCQIOgAkQdABIAmCDgBJEHQASIKgA0ASBB0AkiDoAJAEQQeAJAg6\nACRB0AEgCYIOAEkQdABIgqADQBIEHQCSIOgAkARBB4AkCDoAJEHQASAJgg4ASRB0AEiCoANAEgQd\nAJLoGHTbF9t+2Pas7cdt31xsf43tB20/UfzcOvhxAQCr6eYI/aykWyLizZKukvQp25dJ2iPpoYi4\nVNJDxX0AQEU6Bj0iTkTEY8XtZyXNStoh6TpJdxa73SnpA4MaEgDQmSOi+53tUUmPSNop6amIuHDZ\nY89ExEtOu9gelzQuSfV6fdfk5GSpQedPL+jkmVJP7avLd2ypegRJ0uLiomq1WtVjDAXWoh3r0S7D\neoyNjU1HRKPTfpu7fUHbNUk/kvTZiPir7a6eFxETkiYkqdFoRLPZ7PYt2+w/eEj7Zroed2DmPtKs\negRJUqvVUtm1zIa1aMd6tNtI69HVp1xsX6ClmB+MiHuKzSdtby8e3y5pfjAjAgC60c2nXCzpdkmz\nEXHbsofulXRjcftGSYf6Px4AoFvdnMO4WtINkmZsHym23Sppr6T/tf0JSU9J+tBgRgQAdKNj0CPi\nF5JWO2F+TX/HAQCUxTdFASAJgg4ASRB0AEiCoANAEgQdAJIg6ACQBEEHgCQIOgAkQdABIAmCDgBJ\nEHQASIKgA0ASBB0AkiDoAJAEQQeAJAg6ACRB0AEgCYIOAEkQdABIgqADQBIEHQCSIOgAkARBB4Ak\nCDoAJEHQASAJgg4ASRB0AEiCoANAEgQdAJLoGHTbd9iet3102bYv2T5u+0jx6/2DHRMA0Ek3R+gH\nJO1eYfvXI+KK4tf9/R0LALBWHYMeEY9IOn0eZgEA9KCXc+g32f5VcUpma98mAgCU4ojovJM9Kulw\nROws7tclnZIUkr4saXtEfHyV545LGpeker2+a3JystSg86cXdPJMqaf21eU7tlQ9giRpcXFRtVqt\n6jGGAmvRjvVol2E9xsbGpiOi0Wm/zWVePCJOnrtt+7uSDv+bfSckTUhSo9GIZrNZ5i21/+Ah7Zsp\nNW5fzX2kWfUIkqRWq6Wya5kNa9GO9Wi3kdaj1CkX29uX3f2gpKOr7QsAOD86HvLavktSU9I228ck\nfVFS0/YVWjrlMifpkwOcEQDQhY5Bj4jrV9h8+wBmAQD0gG+KAkASBB0AkiDoAJAEQQeAJAg6ACRB\n0AEgCYIOAEkQdABIgqADQBIEHQCSIOgAkARBB4AkCDoAJEHQASAJgg4ASRB0AEiCoANAEgQdAJLo\n+F/Qod3onvuqHkGSdGD3SNUjABgyHKEDQBIEHQCSIOgAkARBB4AkCDoAJEHQASAJgg4ASRB0AEiC\noANAEgQdAJLoGHTbd9iet3102bbX2H7Q9hPFz62DHRMA0Ek3R+gHJO1+0bY9kh6KiEslPVTcBwBU\nqGPQI+IRSadftPk6SXcWt++U9IE+zwUAWKOy59DrEXFCkoqfr+vfSACAMhwRnXeyRyUdjoidxf2/\nRMSFyx5/JiJWPI9ue1zSuCTV6/Vdk5OTpQadP72gk2dKPTWlS7ZsUq1Wq3qMobC4uMhaLMN6tMuw\nHmNjY9MR0ei0X9nroZ+0vT0iTtjeLml+tR0jYkLShCQ1Go1oNpul3nD/wUPaN8Pl2885sHtEZdcy\nm1arxVosw3q020jrUfaUy72Sbixu3yjpUH/GAQCU1c3HFu+S9H+S3mj7mO1PSNor6T22n5D0nuI+\nAKBCHc9hRMT1qzx0TZ9nAQD0gG+KAkASBB0AkiDoAJAEQQeAJAg6ACRB0AEgCYIOAEkQdABIgqAD\nQBIEHQCSIOgAkARBB4AkCDoAJEHQASAJgg4ASRB0AEiCoANAEgQdAJIg6ACQBEEHgCQIOgAkQdAB\nIAmCDgBJEHQASIKgA0ASBB0AkiDoAJAEQQeAJAg6ACRB0AEgic29PNn2nKRnJf1D0tmIaPRjKADA\n2vUU9MJYRJzqw+sAAHrAKRcASMIRUf7J9u8lPSMpJH0nIiZW2Gdc0rgk1ev1XZOTk6Xea/70gk6e\nKT1qOpds2aRarVb1GENhcXGRtViG9WiXYT3Gxsamuzml3WvQXx8RT9t+naQHJX06Ih5Zbf9GoxFT\nU1Ol3mv/wUPaN9OPM0Q5HNg9omazWfUYQ6HVarEWy7Ae7TKsh+2ugt7TKZeIeLr4OS/px5Le1svr\nAQDKKx102yO2X3XutqT3Sjrar8EAAGvTyzmMuqQf2z73Oj+MiAf6MhUAYM1KBz0inpT0lj7OAgDo\nAR9bBIAkCDoAJMHnANepmeML+uie+6oeQ3N7r616BAAFjtABIAmCDgBJEHQASIKgA0ASBB0AkiDo\nAJAEQQeAJAg6ACRB0AEgCYIOAEkQdABIgqADQBIEHQCSIOgAkASXz0VPRofgEr63XH52KC4lPCwO\n7B6pegRUhCN0AEiCoANAEgQdAJIg6ACQBEEHgCQIOgAkQdABIAk+hw4kM3N8YSg+lz+399qqR9hw\nOEIHgCQIOgAkQdABIImegm57t+3f2v6d7T39GgoAsHalg257k6RvSXqfpMskXW/7sn4NBgBYm16O\n0N8m6XcR8WRE/E3SpKTr+jMWAGCtegn6Dkl/XHb/WLENAFCBXj6H7hW2xUt2sscljRd3F23/tuT7\nbZN0quRz0/kM6/E81qLdsKyHv1r1BM8bivXo0Ru62amXoB+TdPGy+xdJevrFO0XEhKSJHt5HkmR7\nKiIavb5OFqzHC1iLdqxHu420Hr2ccvl/SZfavsT2yyV9WNK9/RkLALBWpY/QI+Ks7Zsk/UTSJkl3\nRMTjfZsMALAmPV3LJSLul3R/n2bppOfTNsmwHi9gLdqxHu02zHo44iX/jgkAWIf46j8AJLEugs4l\nBpbYvtj2w7ZnbT9u++aqZxoGtjfZ/qXtw1XPUjXbF9q+2/Zvit8nb696pqrY/lzx5+So7bts/0fV\nMw3a0AedSwy0OSvploh4s6SrJH1qA6/FcjdLmq16iCHxTUkPRMSbJL1FG3RdbO+Q9BlJjYjYqaUP\nbny42qkGb+iDLi4x8LyIOBERjxW3n9XSH9YN/e1c2xdJulbS96qepWq2Xy3pnZJul6SI+FtE/KXa\nqSq1WdIrbG+W9Eqt8D2ZbNZD0LnEwApsj0q6UtKj1U5SuW9I+rykf1Y9yBD4T0l/lvT94hTU92yP\nVD1UFSLiuKSvSXpK0glJCxHx02qnGrz1EPSuLjGwkdiuSfqRpM9GxF+rnqcqtv9L0nxETFc9y5DY\nLOmtkr4dEVdKek7Shvw3J9tbtfQ3+UskvV7SiO3/qXaqwVsPQe/qEgMbhe0LtBTzgxFxT9XzVOxq\nSf9te05Lp+LeZfsH1Y5UqWOSjkXEub+13a2lwG9E75b0+4j4c0T8XdI9kt5R8UwDtx6CziUGCrat\npfOjsxFxW9XzVC0ivhARF0XEqJZ+X/wsItIfha0mIv4k6Y+231hsukbSryscqUpPSbrK9iuLPzfX\naAP8A3FP3xQ9H7jEQJurJd0gacb2kWLbrcU3dgFJ+rSkg8XBz5OSPlbxPJWIiEdt3y3pMS19OuyX\n2gDfGOWbogCQxHo45QIA6AJBB4AkCDoAJEHQASAJgg4ASRB0AEiCoANAEgQdAJL4F0aLaKKEPOUh\nAAAAAElFTkSuQmCC\n",
"text/plain": [
- "<matplotlib.figure.Figure at 0x110e7d6d8>"
+ "<matplotlib.figure.Figure at 0x119132898>"
]
},
"metadata": {},
@@ -914,14 +909,12 @@
{
"cell_type": "code",
"execution_count": 19,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [
{
"data": {
"text/plain": [
- "<matplotlib.axes._subplots.AxesSubplot at 0x10f311438>"
+ "<matplotlib.axes._subplots.AxesSubplot at 0x11ae5a588>"
]
},
"execution_count": 19,
@@ -930,9 +923,9 @@
},
{
"data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXEAAAEPCAYAAAC0r/QVAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzt3X2wHNV55/HvIwlJVy8oxjgyQbzYvNgmlZhlMchWstzY\njg1sAvxBgrHLXlNxlsJmTRlqyyxxCkHlj01Vki1sixBiQsDlF7J2QHIBCZvFt6jYC3GwZBSDMBQG\ng2wJiCzDvZJAL8/+0TO6fedOz/TMdE+f0/37VKl0Z6bvndPn9Dzz9NNn5pi7IyIicVpQdQNERGR4\nCuIiIhFTEBcRiZiCuIhIxBTERUQipiAuIhKxvkHczJaY2SNmttnMtprZ9Rnbfd7MnjKzLWZ2evFN\nFRGRTov6beDur5nZb7n7HjNbCHzHzO53939pb2Nm5wEnufspZnY2cAuwtrxmi4gI5CynuPue1o9L\nSAJ/5yeELgTubG37CLDKzFYX1UgREekuVxA3swVmthnYAfwfd/9exybHAs+nbm9v3SciIiXKm4kf\ncvf/AKwBzjaz08ptloiI5NG3Jp7m7q+Y2beBc4HHUw9tB45L3V7Tum8OM9MXtYiIDMHdrdv9eWan\nHG1mq1o/TwC/DWzr2GwT8LHWNmuB3e6+M6Mh+tf6d/3111fehlD+qS/UH0X2x9//vXPRRf23O3jQ\nWbDAOXCg+n3s9a+XPJn4McAdZraAJOjf5e73mdnlSUz2W1u3zzezp4EZ4LIcf1dEpBTT07B8ef/t\nFiyAiQnYswdWriy/XWXIM8VwK3BGl/v/quP2lQW2S0RkaDMzsGJFvm1XrEi2jzWI6xObFZqcnKy6\nCcFQX8yl/phr0P7Im4lDst309OBtCoWCeIX0Qp2lvphL/THXMEF8kExcQVxEJCDDlFNipSAuIrWj\ncoqISMRUThERiZjKKSIiEVM5RUQkYiqniIhETOUUEZGIqZwiIhIxZeIiIhFTJi4iEqlDh5JvJVy2\nLN/2urApIhKQvXuTr5dduDDf9iqniIgEZJBSCqicIiISlEHmiIPKKSIiQRlkZgqonCIiEhSVU0RE\nIqZyiohIxAYtp0xMwGuvwcGD5bWpTAriIlIrg5ZT0ivex0hBXERqZdBMHOK+uKkgLiK1MmgmDnFf\n3FQQF5FaGfTCJsR9cVNBXERqReUUEZGIqZwiIhIxlVNERCKmckoHM1tjZg+a2Q/NbKuZfbrLNueY\n2W4z+37r3+fKaa6ISG9NK6csyrHNAeBqd99iZiuAR83sAXff1rHdQ+5+QfFNFBHJT+WUDu6+w923\ntH6eBp4Aju2yqRXcNhGRgamc0oOZnQicDjzS5eF3m9kWM7vXzE4roG0iIgNTOSVDq5TyDeCqVkae\n9ihwvLvvMbPzgHuAU4trpohIPk3LxHMFcTNbRBLAv+zuGzsfTwd1d7/fzG42s6PcfVfntuvXrz/8\n8+TkJJOTk0M0W0Skuzpk4lNTU0xNTeXa1ty9/0ZmdwIvu/vVGY+vdvedrZ/PAv7O3U/ssp3neT4R\nkWEcOgSLFsH+/fkXSga4+2644w64557y2jYKM8Pdu1537JuJm9k64CPAVjPbDDhwHXAC4O5+K3Cx\nmV0B7Af2ApcU1XgRkbwGXem+rdblFHf/DtCzS9x9A7ChqEaJiAxjmFIKhFdOGYQ+sSkitTHMHHGo\n+TxxEZFYDDMzBeIupyiIi0htqJwiIhIxlVNERCI2MzNcJj4xAfv2xbnivYK4iNTGsJn4ggWwbFmc\nK94riItIbQwbxCHekoqCuIjUxrDlFEh+L8YZKgriIlIbysRFRCI27DxxiHeuuIK4iNTGsPPEId65\n4griIlIbKqeIiERM5RQRkYipnCIiEjGVU0REIqZ54iIiEVMmLiISMQVxEZGIqZwiIhKpQ4eSbyFc\ntmy431cmLiJSoWFXum/TPHERkQqNMkccNE9cRKRSo1zUBJVTREQqNcpH7kHlFBGRSqmcIiISMZVT\nREQiNsoccYh3xXsFcRGphVEz8VhXvFcQF5FaGDWIQ5wllb5B3MzWmNmDZvZDM9tqZp/O2O7zZvaU\nmW0xs9OLb6qISLZRyykQ50fvF+XY5gBwtbtvMbMVwKNm9oC7b2tvYGbnASe5+ylmdjZwC7C2nCaL\niMynTDyDu+9w9y2tn6eBJ4BjOza7ELiztc0jwCozW11wW0VEMo06TxzinCueJxM/zMxOBE4HHul4\n6Fjg+dTt7a37dg7aoNdfh/vvhwsvHPQ3JXT33guTk6Of8tbR3XfDD34we/v974ff+I3q2jNumzfD\nxo2zt086CT760cH+xvQ0vOUto7Vj+XJ49dX+223enAT8U06Z/9g3vwlbt47WjkHkDuKtUso3gKta\nGflQ1q9ff/jnyclJJicn5zy+bRt85jMK4nV03XVw882wbl3VLQnPn/wJnHkmHHMMPPYYPPNMs4L4\nV76S7Pd73gOvvJIcK8ME8VEThLyZ+Je+BMcdB9deO/+xG29M9mP1CLWIZ5+d4tlnp3JtmyuIm9ki\nkgD+ZXff2GWT7cBxqdtrWvfNkw7i3czMxHc6I/lobLPNzCTJy9vfnmRyX/1q1S0ar5kZuOgi+OQn\nYdcu+Nu/He5vrFw5WjvyBvFex/LMDFxzDZx88igtmWz9S5jdkLll3imGfwM87u43ZTy+CfhY8mS2\nFtjt7gOXUiB5N43twoLko7HNlr4oF+PFtVEVsf9FZOJ5P3rf61gu4gLrIPpm4ma2DvgIsNXMNgMO\nXAecALi73+ru95nZ+Wb2NDADXDZsg6ank8n2hw4lk++lPhTEs6UDUKzf4TGK9P4vXgzuyfWxxYsH\n+xvjmp3SL4iP87pP3yDu7t8B+n7NurtfWUSD2qcoe/aM991MytVedUXllPnc585xjnGGxKg6Z5a0\n+2CQID7OeeJZ5ZRDh5LFKYZdXWgYweW67Xe3pmUidbd3bxKsNK7zvf46mM0GrKZm4ukgPkwfhJCJ\n790LS5YMv7rQMBTEZSw0rtk6g09Ta+LpLHqYPgghiI+7Hg4BBvH2KUrTTifrTuOaLauU0CRF9EEI\n5ZQiPnA0qOCCuDK2etK4ZuvMQpctS4KBe3VtGrdRyymjrnTfVkQmPu4PsymIy1hoXLN1Zm8LF8LS\npUl9tSk6s+hBM/FRV7of5HnbF6JVTsmg0+560rhm6/bCb1JdvFsWPej+F5UB5zkD2LcvabPKKRmm\np5PBbMoB3BQa12zdAlCTZqjs2ZOceaSz6EH3v6gMOM+bR69jWeUUkk5485uVsdVNe1ybEpgG0S17\na9LFzSL2v6gMOM/zzszA0UfD/v1w4EA57RhEcEF8Zib54hi92OulPa5NCUyDaHo5pYj9H2c5pd3e\nbjNZVBMn6QQF8frRuGZrejmliP0fdzllxYru26qcgjK2utK4ZlM5pZhyShHBM8+K9+32dmujyiko\nY6srjWs2ZeLzA19VmXieFe/b49WtjcrE0QWwupqehje9aXZ6lsxSTXx+4BumJl5UBtzvufuVUxqd\nibe/AexNb2rOqWRTtL+wf2Kid5bTRCqnhFNOgf4fvW8/V7ftGl9O2bs3mS965JHNyUKaolf20nQq\np4RTToHRM/FGl1P0Qq8vjW02ZeLdyylVzBPP89zpC5sqp3TodZoicdPYZlNNPJx54tD/LCB9YVPl\nlA7K1upLY5tN5RSVU0YRXBBfvrxZp5JNkf6UW1OCU14qp8RbTlEm3qHdAXqh10/7hdqk4JSXyinx\nllNUE+/Q7oBly5KZKppPXB8qp2RTOWV+4EuveD/s3xiWyikjaHdAnk9NSTzS3xfdpOCUR+dK921N\nOmPJmuM9SB+EUE6pYqV7CCyIpwdCL/b6aM//X7iwWcEpj86V7tuadPxnZdGD9EEI5ZQqVrqHwIJ4\nejB12l0fGtdsWQGsSf2UFYAH6YMQyilV1MMhwCDeHkzNJ66PznFtSnDKI6sM0KQzliL6IISP3Vcx\nMwUCC+LpTmhSJlJ3nePalOCUR1YW2qQV70ctpxS10n3bKJn4uC9qQmBBPN0JCuL1kX6RKhOfKyt7\na9KK96Ne2Cxqpfs8z5u+EK1yShedFzaVsdVD+kWqTHyuXi/8JiQyvbLovPtfdAbcK9HYtw+OOAIW\nLYqonGJmt5nZTjN7LOPxc8xst5l9v/Xvc8M2RhfA6knjmq1XAGrCWcuePdlZdN79LzoD7nWM9jqW\nqyqnLMqxze3AF4A7e2zzkLtfMGpjVE6pJ5VTsvXK3ppw1tLrgmTe/S86A+71vOnnWrp0dsX7RYsC\nzsTd/Z+Bn/fZzIpojMop9aRySraml1OK2P9xllPSz2U2N07FXhN/t5ltMbN7zey0Yf+ITrvrSeOa\nrenllF6BL/RySue2IZdT+nkUON7d95jZecA9wKlZG69fv/7wz5OTk0xOTh6+3VlO2bGjgNZJ5VRO\nyaZySu9yyosv5vsbRQbxiQl47bVkxfvOWn3nc6XHqMh2TE1NMTU1lWvbkYO4u0+nfr7fzG42s6Pc\nfVe37dNBvJM+dl9PMzPJuqnQjMA0iOnpZO3RbprwGigqEy8yA16wYHYt2M6x6XyudBunp2eP81F1\nJrg33HBDdntz/k0jo+5tZqtTP58FWFYA70cXNusp/UKdmEjm9R48WG2bQqFMfPT9L+OCYtZzd8vE\n00G8ipp430zczL4KTAJvNLOfANcDiwF391uBi83sCmA/sBe4ZJiGdM4XbcIB3BTpgzv9DZVZGWiT\n6MJm73JKFZk4ZJ8FdKuJl1FOGUTfIO7uH+7z+AZgw6gN6fzUVRNOJZuis+7ZPvAVxHVhM8QLm5D9\nBtKvnNLoj913dkATspCm6HyRNSE45aVyisopowoqiHe+0Ot+ADeF3qCzqZwSXzmlMxOvupwSTBDv\n9Q4nces1LavpVE6Jr5wS2jzxYIK4srX6Ujklm8op4X3svtdzq5zSg8op9aU36GzKxMObJ97rufuV\nUxqdiXd2gFa8r4/O7EVv0LNUE8/e/7wr3odUTlEmnuoArXhfD92+L7oJwSmPrJXu25peTjHL1wdV\nl1NmZqpb6R4CCuLdBqIJp5N1l17pvq0JwSmPrJXu25pw/PfLXvP0QdXllOnp6la6h4CCeLeBUMYW\nv24v0iYEpzz6ZZBNeLMrog+qzsSnp6srpUBgQbyzE5pwENdd1rgqiPd/4Tehn/pl0Xn6oOqa+MxM\ndXPEIaAgrnJKPXWreerNOdEvgDVhxftRyylFr3Tf73mzyilVzRGHgIK4yin1pHJKtn7ZWxNWvB+1\nnNL+zqUFBUeybs/b7UK0yikpWS92ZWxx05tztjwv/Dr3VZ4sut/+l5UBd0s00ivdp7dTOaUl67S7\nrgdwU2SVyfTmnC8A1fmspb3Sfa8sut/+l5UBd4s9va7vqJyCLoDVlcY1W57src7XD4rY/7Iy4G7P\n2+252ive796tTFzllJpSOSVb0zPxIvZ/nOWUbs/VXvH+xReViaucUlMqp2Rrek28iP2vupzS3nbn\nTmXimideUyqnZFM5JdxySnrF+37PpSDeoqlo9aRySjaVU8Itp6RXvO/3XMuXw44dKqeonFJT3bKX\niYlkulbTV7xXJh5uJt7tuZWJ95A1X7TOB3BTdDvD0jdUJlQT75+9VlUT7/bcqon3sGfP/G+6g3qf\nSjZFr1PQpo+tyin9A19V5ZRuz93rWH755YaXU3qdptT1AG6KXmPb9LMslVPqU05J/z9uQQTxrHdk\nTUWLX1b2ojdolVOKKqeEkImDgrhe6DXV6w266WOrckox5ZQQauKgcorKKTWlcko2lVNUTilCEEG8\n12lKXQ/gptCFzWzKxMOdJ97tufuVU5SJd3kXa6943/T5xDFTJp5NNfH++99vxfuQyinBZuJmdpuZ\n7TSzx3ps83kze8rMtpjZ6YM2IqtzNJ84br2+L7rOwSmPfivdt9X5zS5PKaTfivchlVOqWOke8mXi\ntwMfzHrQzM4DTnL3U4DLgVsGbUSvUyKVVOKVNf8f6l0myKPfSvdtde6nvKWQXn0QSjllYqKale4h\nRxB3938Gft5jkwuBO1vbPgKsMrPVgzSi17tp0zO2mPUb1ya/OefNIOvcT0X0QSiZeFWlFIBF/Tfp\n61jg+dTt7a37dnbb+Kc/nX/fz342vhf7wYPJR2S7WbAAVq9OMiQZzv798NJLyc/PPdd7XJ96qvvx\nUIajjkrOCqqU7pvt2/MHsFdfndtPxxwznmP09deTTyKW5Re/yN8HP/4xHHnk/MdefbXcIP7SS7N9\n/8or9Q3iA3nb29Yf/nnx4kmWLJkE4M/+rPv2y5cnA1WUv/gLuPFGWLly/mP//u9w//3w3vcW93xN\n88d/DH/5l7OnnWvXdt/u1FOTsfjWt8pv0969cNFFcPvt5T9XLzfeCDfdNPuCf9e7+v/O8uVwwglw\n5pnJ7d274bbb4NJLy2tn23XXwV//dXnliqVL4eij+2/3znfCH/xB98dOPLG8WvRb3woPPzzb9ytX\nwhveMH+7446DdeuKfe6pqSmmpqZybWvu3n8jsxOAb7n7r3d57Bbg2+5+V+v2NuAcd5+X75qZ53m+\ntA98AK65Bj6YWZUfzGc/mwzEtdfOf+zii+GSS+D3fq+Y52qiT3wCzj4b/vAPq27JrI0bk8C3aVO1\n7bjiCvi1X4NPfnL4v/GpT8E73gFXXllcu7J8/ONwzjlw2WXlP5f0Zma4e9fzr7xTDK31r5tNwMda\nT7QW2N0tgA+r6HKK6rTlqnLV7yyhjGsRfTPOfQlxLGW+vuUUM/sqMAm80cx+AlwPLAbc3W919/vM\n7HwzexqYAQp93y766nyveaW6iDq6MuftDiuUcS2ib8a5LyGOpczXN4i7+4dzbFPayV3RB22/6Ywh\nvNhjVuaUr2GFMq5F9M3y5bBrVzHt6SfEsZT5gvjEZi9FzxNXOaVcIZ6ChzKuKqdIGYIP4srE4xJi\n9hbKuBaViY+znBLaWMp8jQziqomXJ8Q6aijjqpq4lCH4IK5ySlxCPAVfujT5oM2BA9W2Q+UUKUPw\nQVzllLiEeApuFsZ38MRUTnEPcyxlviiCuDLxOOzfn3ytwZIlVbdkvqrH9tCh5JOjo366cFz7sW8f\nHHEELBr7Z7plUMEH8SIzj37ZRSi101i13yBD/O6Zqsd2797sb3QcxLj2Q6WUeAQfxIs8aPtlFyqn\njCbk0++qx7aovhnXfoQ8ljJXFEG8qNPHftlF1afcsQs5e6t6bIvqm3Hth2amxCP4IF5k5tHvwKz6\nlDt2Ib/wqx7bovpm6dLkK2LLnmkT8huyzBV8EC/yxdfvFLHqU+7YhXwKXvXYFtU345ppE/JYylzB\nB/EiD9h+2UUo84ljFXL2VpdyCoxnX0IeS5kr+CBe5Ir3/bKLUOYTxyrk7K0umTiMZ19CHkuZK/gg\nXuSK93nqklXXTmOmmni2IvtmHPsS8ljKXMEHcSguO85zilj1aXfMQj4Fr3pcVU6RskQRxIvKPPKc\nIlZ92h2zkE/Bqx5XlVOkLNEEcWXi4Qs5e6t6XGPLxFVOiUcUQbyozEM18XKF/MKvelxjq4mH/IYs\nc0URxFVOiUPIp+BVj6vKKVKWaIK4yinhCzl7q3pcVU6RskQRxFVOiUPIL/yqx1XlFClLFEFc5ZQ4\nhHwKXvW4qpwiZYkiiGueeBxCzt6qHleVU6QsUQRxZeJxCDl7q3pcY8vEZ2bCHUuZK5ogrkw8fMrE\nsykTl7JEEcR1YTMOIb/wx/U93Fliu7AZ8ljKXFEEcZVT4hByOaXqb6iMqZzirnJKTKIJ4iqnhC3k\nle7bqhrbola6byt7P7TSfVxyBXEzO9fMtpnZj8zss10eP8fMdpvZ91v/PldkI4vIPPqtdN+mcspw\nQl7pvq2qsS1qpfu2svcj5GsbMl/f91ozWwB8EXgf8FPge2a20d23dWz6kLtfUEIbCzlo82YXKqcM\nJ+RSSltVY1t035S9HzGMpczKk4mfBTzl7s+5+37g68CFXbYrLQcr4vQxb3ahcspwYsjeqhrbovum\n7P3QRc245AnixwLPp26/0Lqv07vNbIuZ3WtmpxXSupYiMo+82YUy8eHEkL3VJRMve6aNLmrGpahL\nF48Cx7v7HjM7D7gHOLWgv11IOSVvdqGa+HBiyN6qGtui+yY902bVquL+blsMYymz8gTx7cDxqdtr\nWvcd5u7TqZ/vN7Obzewod9/V+cfWr19/+OfJyUkmJyf7NmCc5ZT0ive6Op+fyinZyuib9r4oiNfT\n1NQUU1NTubbNE6a+B5xsZicAPwM+BFya3sDMVrv7ztbPZwHWLYDD3CCe18REcoX/0KFk4eRh5D2l\nLTvLqSuVU7KV0Tdl7ovKKdXrTHBvuOGGzG37BnF3P2hmVwIPkNTQb3P3J8zs8uRhvxW42MyuAPYD\ne4FLRtqDDukV74fNEAbJhsrMcupKmXi2MjPxMigTj0uugoG7/wPwto77/ir18wZgQ7FNm6udeQx7\ncA3yu6qLDy6GF35dauJQ7r7EMJYyK4pPbMLoB+0gp7SaoTI4lVOyqZwiZYoqiI9y+jhMOUXyUzkl\nm8opUqZogviomYfKKeWK4YWvcko+MYylzIomiKucEjaVU7KpnCJliiqIq5wSLpVTsqmcImWKJogX\nUU5RJl4eZeLZYsvEYxhLmRVNEC+inKKaeHliyN5UE88nhrMqmRVVEFc5JVwxvPBVTsknhjdkmRVN\nEFc5JWwxnIKrnJKPLmzGJZogrkw8bMrEsykTlzJFE8Q1TzxsMbzwq1rxPraaeAxjKbOiCeKaJx62\nGMopVa14H1M5RSvdxyeqIK5ySphiWOm+bdxjW/RK921l7YdWuo9PNEF8lMwj70r3bSqnDCaGle7b\nxj22Ra9031bWfqiUEp9ogvgoB+2g2YXKKYOJoZTSNu6xLatvytoPlVLiE1UQH/b0cdDZASqnDCaG\nmSlt4x7bsvqmrP1QJh6faIL4KJnHoNmQMvHBKBPPVlbftGfaHDxY7N+NaSwlEU0QVyYeLmXi2crq\nm7Jm2sQ0lpKIKoiPkokPcmBWNZ84VjGdgo/7wmaZfVPGvsQ0lpKIJoinV7wf1KCniFXNJ45VTKfg\ndSmnQDn7EtNYSiKaIJ5e8X5Qw5wiqqSSX0yn4HUpp0A5+xLTWEoimiAOw58+DnOKqLni+cV0Cq5y\nSm8xjaUkogriw54+DnOKqBkq+cV0Cq5ySm8xjaUkogriw54+qpxSrphOwVVO6S2msZREVEFcmXiY\nYsrelIn3FtNYSiKqIK6aeJhiqqOqJt6bMvH4RBfEVU4JT0wvfJVTeovpDVkSUQVxlVPCFNMpuMop\nvcU0lpLIFcTN7Fwz22ZmPzKzz2Zs83kze8rMtpjZ6cU2M6FMPEzKxLPFlonHNJaS6BvEzWwB8EXg\ng8CvApea2ds7tjkPOMndTwEuB24poa21q4lPTU2V+wRjUsQp+Lj6IpaaeJ7+aNI88bq8VsqQJxM/\nC3jK3Z9z9/3A14ELO7a5ELgTwN0fAVaZ2epCW0r9yil1OTCLOAUfV1/EUk7J0x9NKqfU5bVShjxB\n/Fjg+dTtF1r39dpme5dtRqZySphiOgVXOaW3mMZSElGtpLdyJXzxi/Dkk4P93o9+lPzuoM91333w\nu7872O8N4skn4dFHy/v747JjRzwv/KVLk2+n/J3fGc9yci+8MPixl9fKlfDd7xZ7jL70UjxjKQlz\n994bmK0F1rv7ua3b1wLu7n+a2uYW4Nvuflfr9jbgHHff2fG3ej+ZiIh05e5d0448mfj3gJPN7ATg\nZ8CHgEs7ttkEfAq4qxX0d3cG8F6NEBGR4fQN4u5+0MyuBB4gqaHf5u5PmNnlycN+q7vfZ2bnm9nT\nwAxwWbnNFhERyFFOERGRcI3tE5t5PjBUV2a2xsweNLMfmtlWM/t06/43mNkDZvakmf2jma2quq3j\nZGYLzOz7ZrapdbuR/WFmq8zsf5vZE61j5Oym9gWAmX3GzP7NzB4zs6+Y2eIm90c/YwnieT4wVHMH\ngKvd/VeBdwOfau3/tcA/ufvbgAeB/1FhG6twFfB46nZT++Mm4D53fwfwTmAbDe0LM/sV4L8BZ7j7\nr5OUfC+lof2Rx7gy8TwfGKotd9/h7ltaP08DTwBrSPrgjtZmdwAXVdPC8TOzNcD5wJdSdzeuP8zs\nSOA33f12AHc/4O6/oIF9kbIQWG5mi4AJks+dNLk/ehpXEM/zgaFGMLMTgdOBh4HV7Vk87r4D+OXq\nWjZ2/wv470D6okwT++MtwMtmdnurtHSrmS2jmX2Bu/8U+HPgJyTB+xfu/k80tD/yiOpbDGNnZiuA\nbwBXtTLyzqvKjbjKbGb/GdjZOjvpNe20Cf2xCDgD2ODuZ5DM7rqW5h4bv0SSdZ8A/ApJRv4RGtof\neYwriG8Hjk/dXtO6rzFap4bfAL7s7htbd+9sf8eMmb0ZeLGq9o3ZOuACM3sG+BrwXjP7MrCjgf3x\nAvC8u/9r6/Y3SYJ6U4+N9wPPuPsudz8I3A28h+b2R1/jCuKHPzBkZotJPjC0aUzPHYq/AR5395tS\n920CPt76+b8AGzt/qY7c/Tp3P97d30pyLDzo7h8FvkXD+qNVInjezE5t3fU+4Ic09NggKaOsNbOl\nZmYk/fE4ze2PvsY2T9zMziW5Ct/+wND/HMsTB8DM1gEPAVtJTgMduA74F+DvgOOA54Dfd/fdVbWz\nCmZ2DnCNu19gZkfRwP4ws3eSXOA9AniG5MNyC2lgXwCY2fUkb+77gc3AJ4CVNLQ/+tGHfUREIqYL\nmyIiEVMQFxGJmIK4iEjEFMRFRCKmIC4iEjEFcRGRiCmIS+2Z2R+1vtr0B63vJ3mXmV1lZkurbpvI\nqDRPXGqttVzgn5Os+Xqg9YGiJcB3gf/o7rsqbaDIiJSJS90dA7zs7gcAWkH7YpIvV/q2mf1fADP7\ngJl918z+1czuan2TIGb2YzP709YCBQ+b2Vur2hGRbhTEpe4eAI5vrSq1wcz+k7t/geQL2Cbd/X1m\n9kbgj4D3ufuZwKPA1am/8fPWAgUbSL46QiQYeVa7F4mWu8+Y2RnAbwLvBb5uZu1VYdpfg7sWOA34\nTutLl44Fi13sAAAA1klEQVQgKbe0fb31/9dIvgddJBgK4lJ7nlz4eQh4yMy2knwLXpoBD7j7R7L+\nROrnQyU0UWRoKqdIrZnZqWZ2cuqu04FngVeBI1v3PQysM7OTWr+zzMxOSf3OJa3/PwT8v3JbLDIY\nZeJSdyuAL7RWRz8APA38V+DDwD+Y2fZWXfwy4GtmtoQk8/4c8FTrb7zBzH4A7CNZtFckGJpiKNKD\nmf0YTUWUgKmcItKbshwJmjJxEZGIKRMXEYmYgriISMQUxEVEIqYgLiISMQVxEZGIKYiLiETs/wME\nThm2LLy88wAAAABJRU5ErkJggg==\n",
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXcAAAEKCAYAAADpfBXhAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJztnXu0JHdV77+7u7r7zHSfSUhmkgyTTIaQEF5KEkaSGIUA\ngiHXa1jLoGEpgkvvuBSXIHhdwvWCZqn3svSCD1BuNAgoN0R5Rg1GkCCvEJiEPEmCI5BkyGvynNM9\nc7q7qvf9o6q6+9Spx+/36+quX3Xtz1qz5pzuOtW/02f3rl17f397EzNDEARBWC5qRS9AEARByB9x\n7oIgCEuIOHdBEIQlRJy7IAjCEiLOXRAEYQkR5y4IgrCEiHMXBEFYQsS5C4IgLCHi3AVBEJYQp6gX\n3r59O+/Zs6eolxcEQSglN91006PMvCPruMKc+549e7B///6iXl4QBKGUENG9KsdJWkYQBGEJEecu\nCIKwhIhzFwRBWELEuQuCICwh4twFQRCWkEznTkQrRPR1IrqViO4kot+LOaZFRFcT0QEiupGI9sxj\nsYIgCIIaKpF7H8DLmPkFAM4CcBERnRc55hcBPMHMpwN4D4B35btMQRAEQYdMnTv7c/i6wbeN4F90\nNt8lAH43+PpjAN5LRMQyw0+Ykc9+62HcfvDJ2OeaTg2vO28PjtnaMDr30Bvhb77yXXTX3djnTz5u\nK3567ylG59bhqaNDfOGeR3DJWbvm/lpCdVDaxEREdQA3ATgdwPuY+cbIIbsA3A8AzOwS0VMAjgfw\naOQ8+wDsA4Ddu3fPtnKhErz9k7fj0FofRBsfD8OGE7et4DWGDvjW+5/EH157NwAknv/iH9iJTmu+\ne/2uufUB/M9P3YHzTzseJ2xbmetrCdVByWqZ2QNwFhEdC+CTRPR8Zr5j6hCK+7GY81wB4AoA2Lt3\nr0T1QiaHjw7xyy8+DW+7+DkbHn/yyABnXf5ZrCVE3UrnXh8CAD71xgtw1inHbnju/914H97+ydvR\nXXfn7twPHx2O1yPOXcgLLbUMMz8J4AsALoo8dRDAKQBARA6AYwA8nsP6hArjeiP03RHaMc41fKzX\nN3fu3b4HAOi06jHnrwfHmJ9flfB3CNcjCHmgopbZEUTsIKItAH4MwN2Rw64B8Prg60sBfF7y7cKs\n9AJnF+fcG/Uamk4N3YG58w2datz5OzlcPHTXsYjXEqqDyv3mTgAfCvLuNQB/z8z/RESXA9jPzNcA\nuBLA3xLRAfgR+2VzW7FQGULHHRdZ+487MznENOeex52BKmHEvoi7BKE6qKhlbgNwdszj75j6eh3A\na/JdmlB10pyv/3h9HN2bEDrTdjM5cl9biHP3c+5Jqh1BMEF2qArWEhZLE51705mpoNpdd7GlUUe9\ntlkPsMjIPbxA9WZIMQlCFHHugrWEjnU1wbmvrsyYlhm46KzEn3uROffuuKAqzl3ID3HugrVkp2Wc\nmaLdbt9LlDmGjy9CwSIFVWEeiHMXrCWMZJMccLvlzBTt9vruWPIYZaVRQ40WrZYRKaSQH+LcBWvJ\nitw7zdnSMt2+G1tMBQAimvniobOO6f8FIQ/EuQvW0huEOvf46LrdcmaKdnv99N2ns0otVWDm8e8p\naRkhT8S5C9bS7bto1AktJ0nnXkdv4GI0Mtsv56dlkp37IiL39eEIXrB+idyFPBHnLlhLdz3b+TID\nR4Zm0XvXAuc+fX5x7kKeiHMXrKWXkhMHZteid/tu4u5XILgzmLPDnT6/pGWEPBHnLlhLt+9iNUGH\nDmD8nEnE63ojrA9H6LSSe8F3ZszpqxCu3dfsi1pGyA9x7oK19AYZaZOmeeSeVaz1n5t/WiZc+4nb\nViQtI+SKOHfBWrp9LzMn7h9n4NwzNPThc/NuCRCe/6RtK+j1XUgzVSEvxLkL1tLLzImHkbt+OiNL\nQx8+N2+HG+6APXHbCtwRo++O5vZaQrUQ5y5YS3ZBtT4+Tpes3a/hc0Nvvg437AR54rbWhnUJwqyI\ncxesJUsK2ZkpLZM8CCSk3TS/eKivY5Jzn/drCdVCnLtgJf7OzfQdpLPk3MMe6lkFVWC+PV/Cte9Y\nlchdyBdx7oKVHB16GDESW/ICwNZmHWTY3CvMda+mSCFnkVqq0uu72Nqsj19L5JBCXohzF6ykq1Dw\nJCJ0mmZyxUlBVSFyn6NiJpR7LnI4iFANxLkLVhJGsGlqGWCiaNFF5eIxS9pHfR0eVlvOeCCJpGWE\nvBDnLljJOLJOUcsA5nNUe30XTo3QcpI/AuOC7Rxnm3bXhxsid3HuQl6IcxesREWqGD5vmpZptxwQ\nbZ6fGrKIVEmv76HdqktaRsgdce6ClXQzhmOHmKdlkkfshXSai0jL+IqgUHYpkbuQF+LcBSsJi5gq\nzt1UCplWTPXPHerc56dgCQuqTr2GlUZNInchN8S5C1Yy/7RMet8aAHDqNbSc2nzVMlM95f3fRaSQ\nQj5kOnciOoWIrieiu4joTiJ6U8wxFxLRU0R0S/DvHfNZrlAVxo29UnTugPkovDAdksXqynw7Q06v\nwzTFJAhxZFs34AJ4KzPfTESrAG4ios8y87cix32JmX8i/yUKVSSMYLc2VKSQZmqZncesZB43T4c7\n6Sk/idzFuQt5kRm5M/ODzHxz8PUagLsA7Jr3woRq4zcNq6NWS1azAL4OfuCNMNBs7pU1PzWk3XTm\nJoWM9rdptxysiXMXckIr505EewCcDeDGmKfPJ6JbiegzRPS8hJ/fR0T7iWj/oUOHtBcrVAdl52so\nIVRNy5jm9JXWMAjrCvXxa0nkLuSFsnMnog6AjwN4MzMfjjx9M4BTmfkFAP4cwKfizsHMVzDzXmbe\nu2PHDtM1CxVgTdH5mmz+8ZuSeZlqGf/89bkVVKM95SXnLuSJknMnogZ8x/4RZv5E9HlmPszM3eDr\nawE0iGh7risVKoVq5D4e2KHhgPvuCN6Ile8M5iWFjLZA6LTqopYRckNFLUMArgRwFzO/O+GYk4Lj\nQEQvCs77WJ4LFaqF79xVImv9FgFr62oyy/CYeaVloqP+2k2J3IX8UFHLXADgdQBuJ6JbgsfeDmA3\nADDz+wFcCuBXiMgFcBTAZSzDIIUZ6PY97Dp2S+ZxJgM7VOanTp9/Xg432j+n3XJwdOjBGzHqGYVk\nQcgi07qZ+csAUi2Nmd8L4L15LUoQsuanhpjMUVXpCBnSbjk4MpiPw+2OO186G/7vDVxsW0nuMy8I\nKsgOVcFK1NUy+qPwdCN3YD493bvr/jSocKNW+P88u1AK1UGcu2AlOlLF8HhVVPvWTB8zj9RMbxDq\n3Otzfy2heohzF6xj6I3Qd0dzc75dxUEg/vnnNyS723fRqBNaTqhzl86QQn6IcxesI6r/TqNRr6Hp\n1MYbgvI+/+TOIH+JYjT1FBZWZY6qkAfi3AXrmHSEzI6s/eP0WgSo9oqfPmZekfv0pCmZxiTkiTh3\nwTqiPVey8Eft6aRl1Eb4AWY5fVV6kbpCR3LuQo6IcxesQ7WXe0in1dBKm/T6LrY260rSxnk63HDE\nXkh7jsocoXqIcxesQ0eq6B+nF7mH049UmGeqZK3vojOlZ18NpJBrIoUUckCcu2AdOgXP8DidaFdl\nfmrI/NMyk8i95dRQr5GkZYRcEOcuWMeaZuSuO0dVtW8NAKw0aqjRvNIyGwuqRIR2U+8uRBCSEOcu\nWIdu5N7RbLgVVamkQURz6wzZjdmFK3NUhbwQ5y5Yx8S5q0XXus43qlLJYh6dIZk5dh3S013IC3Hu\ngnV0+96GnZtZ+H3QXYxGao1I4yLmNObhcNeHI4x4892Jbv1AEJIQ5y5Yh3ZkHahMjgzVovde3x3/\njNL55xC5r/WDpmGRu5NOyxG1jJAL4twF61DtCBmiu4tUtSlZyDyce5hGil5kZI6qkBfi3AXrUJ2f\nGqIjV3S9EdaHI+WCKqC/A1aF6KCOyWuJcxfyQZy7YB3akXtTPXKPttlVOv8c1DJJu3DD+oEgzIo4\nd8E6TNMyKk5Rd/dreGz+aZl4uadfUPUgUyqFWRHnLlhHV3HEXojOqD1dDX14bK/v5upwk0b9tVsO\nvBGj745yey2hmohzF6yj1/e0c+IA0A0UKGno7n4Nj3Vzdri9yPzU6dcCpO2vMDvi3AXr0E3L6AzU\nMIrcm/lPSAovRNHc/zjFJHJIYUbEuQtWwczoDtxxh0QVQjmhUkHVJHIPOjfmqWIJL0TROxSJ3IW8\nEOcuWMWRgQeO2bmZxpZGXbm5VzchHZLGPGab+k3D6qhFesrLwA4hLzKdOxGdQkTXE9FdRHQnEb0p\n5hgioj8jogNEdBsRnTOf5QrLjknaxO+mqKZo0e1bM72WPOWQSamn8UBuaUEgzIjKJ8gF8FZmvpmI\nVgHcRESfZeZvTR3zKgBnBP/OBfCXwf+CoIXu/NQQ1c0/SSqVrHMDeadl4jdqzXMgt1AtMiN3Zn6Q\nmW8Ovl4DcBeAXZHDLgHwYfb5GoBjiWhn7qsVlp5eQi46C38XqVpB1akRWo56RnIeefDkyF3SMkI+\naH2CiGgPgLMB3Bh5aheA+6e+Pxg89uAMa9vE+64/gO892ht/f+5px+PSF56c50ssBTfd+zg++vXJ\nn6PdcvCbP36mVp65KHTnp4Z0Wg5uvu8J/Pd/uDX1uFvufxLtlgOi7PmpIaHD/fAN38MXv31Ia11J\n3PHAYTxzRzvxtT76jftx871PABA7t42BO8K7/uVuHD4aL73decwKfuMVz0q1scPrQ/yf6+7BkUF2\nQNJ0anjTy8/ACdtWtNap/Akiog6AjwN4MzMfjj4d8yObdnwQ0T4A+wBg9+7dGssEjgxc/NF192B1\nxcFqy8GTR4f46n8+JkYfw0duvA+fvuUBnLjawsAb4dHuAC999gl4ybN2FL20TExy7gDwkjNPwMf2\n34+vHHg089iXP/sErXPv6LRwzu5j8f0njuL7TxzV+tkkGjXCj56x+e+x2nJw/mnH497Hejh0eF3s\n3ELufugwrvzyd3Fcu4mVyB1gb+DhqaND/Nx5p6Y6429893F86IZ7sb3TQrOefBHwmPHw4T5+8ORj\n8DM/pOczlT5BRNSA79g/wsyfiDnkIIBTpr4/GcAD0YOY+QoAVwDA3r17tbb7hbrf37ro2Xjdeafi\nnZ++A5+6ZdNLCAD67ginHr8Vn3/rhbj7ocO46E++VJrb/LCQqNOSFwDe8opn4S2veNY8loSmU8Mn\nfvWCuZw7Sq1GuGrfeePvxc7tI/RFf/Gz5+C8047f8Nwnv3kQv3H1rej2XaSFEOEd6tW/fB6euaOT\neNxTR4Z4weX/alSDUVHLEIArAdzFzO9OOOwaAD8fqGbOA/AUM+eakokW2uaxJXxZGLojNOv+nzbM\nXZdFN22alllWxM7tI22X86SJXbozVrXzsXrK4POr8gm6AMDrANxORLcEj70dwG4AYOb3A7gWwMUA\nDgA4AuAXtFeSwVjlELx57akt4SsNPWXFsjPwRmgGt4vjYmBJdjyG69RNyywrYuf2kZY6DD9vaxmt\nMFTt3KnX0HJqRsFZ5ieImb+M+Jz69DEM4I3ar67B+EoX3K6HOxi7fVeMPsJgOnIvmfoiXOdW+ZsC\nEDu3kbRdzpPd0umRe6/vgkjNzldXzLqSlmaHarTRkk4P76oxnIrcm04NTaeGbkk2xXT7XuzOzaoi\ndm4fabucVYOpbtAcT8XOTQe4lMi5b7yN0enhXTUG7giN+uRPW6bRbbpNw5YdsXP76PVd1AhYaWx2\nn6p7Inw7V7sTazeX3LlHCxA6PbyrxsDjceQOqG/wsYHuQG/E3rIjdm4f3SAAidOxK0fuA/UgxnRY\nTGmc++bI3byKvOwMXG+ccweg3HfFBnp9V1sGucyIndtHL2XGb5hDz/p7pZ0jimlwVjrnHr550ho1\nmWEkci9dWkaz9cAyI3ZuH92U1GGtRmg365m6dB07X/qc+1pwpQsLEJKLTMbPuU9uGdtzmAE6L7p9\nT3LuU4id20dS07eQzoqTORVsbV0vLbO2zM49WoDQGdBQNaZ17kBobOV4n7r9oXZHyGVG7Nw+slIq\nfqSdEblrDKQxvfMukXPfGNGVbeflIvF3qE5dCA2r7UUQ/TtXHbFz+/BtNDkAUSmAZp1jmnbLwZGB\nh9FIb5dyaZx79FaoXiNsadRL47QWSd8boeFsTMuURW2RdctbNcTO7SMt5w6oSRezzjHNWDGluVel\nNM49rgDh55LL4bQWBTNj6I3Q2qBzr6M3sL8/ydAbYeCOJHKPIHZuF70MuW5WjSu0845GQRXQl8OW\nxrnHXek6LYloorgjBjM2bGJqtxwwQ6l3dJGYDK+uAmLn9sDMmRvtwmAqCd221m3DGb6lce7+1XJj\njspUIrTMDL0RAEQ2MZWjKCcdIeMRO7eHvjvC0OOZCqq6dm46NL08zr3vbdrcUiaJ36IYuL5zj7Yf\nAOwvyo1H7Ilz34DYuT2o3F1mqdN07dxUDlsa596N0YWabstdZkLnHhe52/5ehdpgVRVBVRA7twcV\nx9xpOhi4o/FnMYqunZsGZ6Vw7gN3hIG3uQBRpp2Xi2IQk5YpS+Se1m2vyoid20N0aFAcWWnQ0M51\ndO5p50uiFM49qQAhKoLNjCP3mLSM7XJI0/mpy47YuT2EhdL0gmp6MKVfUF1i555UgBAVwWaGni93\njHaFBKSgWlbEzu2hq+CYx844QTETnSqXxeRisYRSyKSrZbvl4OjQg6e5c2uZKXdBVZx7HGLn9qBi\no1nBlK6drzRqqNGSRu6T25iNeS7TnVvLTFzOvSxSSEnLxCN2bg8qs0+zIm1dOyciI8VUKZx7UgGi\nLE5rkUwi90n7ga3NOsjgyr9oun2/D/30hUkQO7cJldRhVrM3Ezs3UUyV4lOUdLUcS/zWxehDwsi9\nNWU4RIR206xt6CLRGT1WJcTO7WEshWymqGWa6X+vbn+obecmG9lK4dx7CQWIjuG23GVmOFbLRHfz\n2l+U02mmVCXEzu2hN3Cx0qjBqSe7zmy1jH7n0+WN3BPVMg0A9kv8FkkYuU93hQRCrbTd75N0hIxH\n7NweVGw0W+eub+cmex1K4dyTde4S0UQZ95aJRBZl2OWY1ZCpqoid24OKjTadGpr1GroJBXCd+akh\nJnNUM507EX2AiB4hojsSnr+QiJ4ioluCf+/QWoEC3YEbW4Aw3bm1zPRjpJBAOZpPmRh9FRA7twfV\n2adpaVCTIGZeapkPArgo45gvMfNZwb/LtVagQFKhLWuzQBUZxhRUgXI0n5K0TDxi5/awtq5mo2md\nIY3TMnkP62DmLwJ4XOusORPXERIoz+acRRK3iQkwM45FozN6rEqIndtDb+DG+qIoaWlQEztvtxx0\n1/UG7uSVcz+fiG4los8Q0fNyOueYbsKtUMupoV4juV2dIq4rJGCWs1s0knOPR+zcHlSVLmkFUBM7\n77QcuCMep11VyOOTdDOAU5m5S0QXA/gUgDPiDiSifQD2AcDu3buVX6CbcCvk67frov+dIm5YBzC5\n8tsKM6ObMb6sqoid24OfUsmOutstB08cGWx63NTOTeouM0fuzHyYmbvB19cCaBDR9oRjr2Dmvcy8\nd8eOHcqv0RskX+lWVxrSMW+KMHJ3ahulkKstBwMvucd00RwZeGCW1gNJiJ3bgWpBNSktE9q5vlpG\nv7PrzM6diE4iIgq+flFwzsdmPe80aQWIMmzOWSQDj9F0agj+JGNs38IufWXSETsvntGIcWSglpZJ\n+nuZ2rnJRrbMVyCiqwBcCGA7ER0E8E4ADQBg5vcDuBTArxCRC+AogMtYJ+uvQNq29HYJCoWLZOCO\nNmncgY3TmJ7Wbi56WZmERrsqzj0WsfPiCd//WdQypm2tTRRTma/AzK/NeP69AN6r/IoG9PreeJde\nlDJszlkkQ28U25DI9s6CMj81HbHz4hk7ZgW1zGpwMWbmDXfRpnZuMirT+h2qzOzLj5Ii96b9m3MW\nycAdbegIGWJ7WmYyBEGkkHGInRePTkql3XLA7OfYpzG187EcVqOobr1zzyq0pW0WqCKDxMg9zNnZ\n+V7JoI50xM6LZzLjV00tA2wOpkzt3CQ4s965Z4216rTqWFsfLnJJVjPwRps2MAH2t41VmU1ZZcTO\niyepO20cofOOttlWGdMXe77mEqZlsgoQfqHJ09q5tcwkFlSb5UjLSOQej9h58eg45qRI27ygGo7u\nW6AUct5k5bk6Kw48zZ1by8zQG23qKwNMpljZWpRTGV9WZcTOi0cnpZLUydM0LePUa1hp1LQEEdY7\n96wChPTd2IhfUE1Oy9gauff6LoiArQ0pqMYhdl48OgXVyY7SjZH22M5TJjmlnXOp0jLhm7OaIIW0\nPd2waJKkkI2gZXJSj+mi6fY9tJsOarXNSh9B7NwGwvx5dJZzHEntAkI7j24yVEG3bXcJnHt65G6i\n/1xmkiJ3wGyay6KQ+anpiJ0XT6/vol6j2LRnlKQ7rVnsXFcOa71zzypAJN3+VJW+Gx+5A3Z3huym\n9A8SxM5toNf30G7WlaLuxILqDHbeaTlYWyade1aea1JFlogGCNIyCZF7u2nvLkeZwpSO2Hnx6AzZ\n2Nqsgyhe525q5+1WffkKqmkFiCQ9aVVJ2sQEBAUZW3Xuit32qorYefHo9GH32zQ7m3Xu6+Z23llp\nLJcUMhzUkXQrFPZ5kIjGZ+hyYuTeWbG3+VRXcQhCVRE7L56u5pCNuM6Q3b7aJKc4Oq36sqll0gsQ\ntkv8Fs3AG6HhxF8IbZ6j2u0PlVQIVUXsvHh0UypxLSN6MwykWbqCqt8RMvnNaBtsy11mhu4IzXpC\nCsvi5lMyPzUdsfPi0R1svRoTTM1i5+2Ws6kRWRrWO/esN7ReI2xpyCCDkH5G5G6r2kL3lrdqiJ0X\nj+r81JA4Xfosdq4b8Vvv3FWKGH66wU6ntUiYGQN3hFaizr0+7jFtE8Ng/F9HCqqpiJ0Xi+r81JBo\nGnRWO9e9KFjv3FWudB0ZQQYAcEe+007axJTUY7poZMSeGmLnxcHMWmoZINg0OCVgmNXOddM5pXDu\nWbcjNhcKF0k4/Dp5E5OdeVvpCKmG2Hlx9N0R3BFrq2WmpcfhBiRTO1/StEz6FUuM3mfopTt3W5tP\nyYg9NcTOi8Okm2O0xjXrzIIldO7ZRYxVi3umLJIwck/rLQPYJ6fr9v0hFKb636ogdl4cJgFIp+lg\nEOTZ/XOoz2CNY6ly7gN3hIE3wqpCWkaM3te4A2VMy6iPL6syYufFsRYGIDrOPbLxbFY7X6rIXbUA\nISoCn3HOPTNyt+u9koKqGmLnxdEbO2a9tAwwCaZmL6gukXNXHWslKgKfoeerZdK6QgI2pmXUZ1NW\nGbHz4shqPR7HOJgahJH7bHa+XJH7QK2I0W45ODr04I3s0m8vGtWcu21pGdPRY1VD7Lw4TBRd0ZYR\ns9r5SqMGnVk2mc6diD5ARI8Q0R0JzxMR/RkRHSCi24joHPWXT0f1NiZ6hawqA8+/dczKudsW/Ula\nRg2x8+IwsdHOeI6qZ3yOaYhI62dVIvcPArgo5flXATgj+LcPwF8qv3oGE11othQSgLXtbBfFwA3S\nMgmRe9hj2rbIvdv30AzGAArJiJ0Xh2qKeJro32ut785s51nikmkyX4WZvwjg8ZRDLgHwYfb5GoBj\niWin8gpSmBQx4uenhtgq8Vs0E7VM/L0bEaFj4cCO3gxtUKuE2HlxmBRUo3+vPOxc5+KSxydqF4D7\np74/GDz2YNoPfffRHl535Y2pJ37kcB9AdhEjfBN/6+O3xb75z9zRwTv/63NTx2Pd//gR/N4/3ol+\nkLdOY0ujjt9/9fNxwraVzGMXyXCslklvkfyZ2x/CgUe6AIA9x7dx+SXPMxrYmxddmZ+qxDzsvEaE\nX3vZ6fihPccpr+PAI2u4+hv34+0XP6dQu5mm23fxB//8Lbzt4udg20p6MGh2/iG2NOqoayS9w7/R\n//3if+Ifb3sA3354bWY7zzstk0Xcbxtb8SGifUS0n4j29wdDdPtu6r+trTouet5JOCnDiT5/1zH4\nkdO3A8Cmc3znUA8f/Or3Mp32Dd95DJ+76xE83hukrunQWh//+q2HcfN9T6i9OwskjNyTukICwE/v\nPRk7j10Zvzd/+7V70Su410w4kEVIZx52/uUDj+Iztz+ktY7r7nwYf/Wl7+Kx3sD4d8mbb973BK76\n+v246d75fC5NhslsW2ngkrOejm1bGuj2XTz92C34qXNOnmkdz9+1TfnYPD5RBwGcMvX9yQAeiDuQ\nma8AcAUA7N27lz/5qxfk8PLAjtUW/u6Xzo197kNf/R7eec2d6PVdrDSSr5rhrdPf/eK5eFq7mXjc\nfY8dwYv/6Hor9cbj9gMJOXcAeMsrz8RbXnkmAODvvnYvfudTdxQ+v7To1y8L87Dzc//wc9ppnuk0\nw/ZOS+tn50VUkTKP8+tuPqrVCH962dm5ruP3X/0D+APV18/h9a4B8POBauY8AE8xc2pKZpFMFCLp\nzlh9w5SdWnEA46gtSQoZxRZppG63PWEzpnbebjnoaqpvJjsu7fkMRBUpeVNGG81cLRFdBeBCANuJ\n6CCAdwJoAAAzvx/AtQAuBnAAwBEAvzCvxZowkSOl/9FVFRu2buEHJjr3lmI13hZpZLfv4uSnbS10\nDWXH1M47Bi0NJo7UnrvX6Bb/vCnjMJnM1TLzazOeZwBvzG1FOTN2YBnRiUr3ScB3nE6NCneIcWR1\nhYzSVnQI80ZG7M2OqZ3rzuUMzzH9vw1Et/jnTW/g4oRVuwQUWSy9sFg10la97Qo3Ethk2CFZO1Sj\n2NJrxs9n5q9wqBKmdm7Srya6nd4G5p9z1y+oFs3SO3dVbfCaRlGv03KwZpFhh+hH7sWnZZgZ3YF+\nsUrYiKmdm/SrmXeUbEK3P98Lztp6+Yr+S+/cVXf16RRM2pY2cAojd0dRixsaa5EXqiMDD8zSemBW\nTO3cZABI+Bo2Re7zdu4mapmiWXrnHg6jVbld1Ynci05lxDHwGE2npryxxIYdj9JXJh9M7bxj4Nxt\nVMvMMy3jjRhHh5KWsY6JdDHdGavMap2c074t/IAfuadp3KOEvWaKdO4yPzUfTO2803IwcEfjlJ4K\nNqZlwt8vbsfrAAAU4ElEQVR7Hp9L1e60trH0zt2p17DSqCmoCNQVGybysUUw8DytpkREhHbBvWZk\nfmo+mNq5bt2Fmcc7mm3ayDe54OS/prLeXS69cwfUbj31cu52Ovehy1qRO1B8/WDSba9c+UwbMbFz\n3Y1sfXc07idv02dgnmkZce4Wk+WMJ4oN9Zy7lWkZb5TaVyaO6IT2RRP+XVZFCjkzJnauurM1ZNru\nbXTu8/hclnXGbzWce8ZGDV3FRrtVR7fvwt+/ZQ8DTy/nDhR/oZLIPT9M7Fx3I9v0+W0KcNbmGLmH\n6qCyNberhHPvtJzx4I84dMdfdVoNjBhYH6oXoRbBwB0pb2AKscW5l61YZSMmdq6blpk+vy3OnZkn\naZmBh1HOYwjHNlqymQOVcO7tVj210KTrYFT7eCyaoTdS7isTUnT9oKz5TBsxsXPdgmp43PHtpjVp\nmfXhCCP21wTkP4awrDN+K+HcOyuN1JyirmLDhp2dcZQxcu/1XRD5skxhNkzsXDdyDx3nCdtWrFHL\nhGsPh+fkXUMKf+eyBSDVcO5BjjwJ3byvrZ0hB+5Iez5j8WoZD+2mY81EnzJjYue6G9lCh37itpY1\nwU24jhO3+b3l8/5cljV1WAnnnlVo0s+52xm5Dz0T5168WkaKqflgYuemaZmTtq3g6NAbyyKLpDu1\nJiD/z2Wv76JeI+2UZ9GUa7WGtFsOjqQUWnRvu1Tbqy6avklapulg4I3GfWkWjY4EVUjHxM6bTg3N\nek05xTKJklc2nLNINq0pd+fuod2sl+7ushLOvZPhjEMFgG5BNU2ZUASmkTtQ3F1It4Td9mzF1M51\nUnOT/LafArHh7rUbce55p2XK2BESqIhzz9qoYSKFTDtfURjp3FeKrR+UcXyZrZjauY5iqtd3saVR\nx+pKY/x90XQjOfd5qGXKaKMVce6hdHEY+7yuYsPWOaom7QeKnqNaxvFltmJq5zrzCcK/l013r71x\nkTeI3HNeU2/glk7jDlTEuU8cWHxEo6vYaCu2V100pu0HgOIuVD3JueeGqZ3rRO7dvodOqz7+DNhw\n99qLpIrylmjqdIy1iUo59yQD1lVs1GqErU37Bnb4LX/1lCdFb8iS+an5YWrnOl1OwxSFTXLgcA3H\nt1uozaGFda/vlq71AFAR555liN2Bfmqg3XKsUApMM5ipoFpMBCZpmfwwtXOdjWxhFLu6Yo8c2He+\nddRrNJdZC2WcnwpUxLlnRTQmio2sPh6LhpmDyF0zLdMs7kM6cH0J5moJPzg2YmrnvlpGXQrZmYrc\nbQhwpgOEecxaWFsflq4jJFAR556VVza57Sp6Z2cUN9A260buRRZUpa9Mvpjaua5axi+o2pWWCdeT\n9x11OJykjDZaCeeeXWjSr4bbNkc13ISku4mpyIJqV5x7rpjaeafloDtQa2HdDVIULaeGeo2sCHCm\npYp+Wia/z2U4nKSMNqrkCYjoIiK6h4gOENFvxzz/BiI6RES3BP9+Kf+lmrPSqKFGKRIxA8VG0Q23\nooQzMHUj96ZTQ9OpFRO5l3Q2pa2Y2nm75YDZ7/eeRbfvpyj8EY313GWHJkwX5TutOrrr8b+/CeHn\nYnUZpZBEVAfwPgCvAvBcAK8loufGHHo1M58V/PvrnNc5E0SU2kPFRLFhW0HVNHIHirtQSVomX0zt\nXPXuzfVGWB+ONuS3begMuSEt08z3jnpso0uqlnkRgAPM/B1mHgD4KIBL5rus/FlNcWAmio2i+6BH\n6btmkTtQXP2grOPLbMbEzldbavnzcDD2hvy2BZ+B3sCNXHDyW1OZU4cqnmAXgPunvj8YPBblp4jo\nNiL6GBGdksvqciTJEEPFRkfzymxrWsakc127WUwEJpF7/pjYuaocNtq+oLNix91rbypyz3tNvf7G\nC1qZUPEEcdq6aOXlHwHsYeYfBPA5AB+KPRHRPiLaT0T7Dx06pLfSGUnSv5o6mHbTwfpwBNezY9Te\nwJstLVNI5K7ZsE3IxsTOVeeoRs9hS4Az3dgr77uJsH5Rxo12Kp7gIIDpSPxkAA9MH8DMjzFzP/j2\nrwC8MO5EzHwFM+9l5r07duwwWa8xSQ7MtBH/pL9M8TlHwO8rA0C7twxQXP2grEMQbMbEzlXnE0TP\nkdU/fhG43gh9d2MdYOgx+m4+n8vukkfu3wBwBhE9g4iaAC4DcM30AUS0c+rbnwRwV35LzIekjRpj\nxYZmNTysnnctuC0FgIHn/24Ng7RMZ0UKqsuCiZ2rbkiKjukretALELOmZr5BV5ltNHPFzOwS0a8B\nuA5AHcAHmPlOIrocwH5mvgbArxPRTwJwATwO4A1zXLMRuadlCm64FWUwQ+TeKSgC6w5cNJ2aUSpJ\niMfEzlU3JEXH9GWN9VsE3bGc1l/T9OfyuGBg9iwstXMHAGa+FsC1kcfeMfX12wC8Ld+l5UtSftBU\nsWFT4yRgknNvanaFBAKHUIBeeboQJuSDiZ2PbTnDBjalZYLXYubCphTF1QGA/FoRlzl1WJmQKcxF\nRnfhmV6ZbZujGurcdbtCAv4Hvpcynm1eSEfI/DGx862NOkihm2L0HO2WA2/EYxluEUSlinn3vAmH\nk9Rr5RqxB1TIubdbDtwYQxwbh25vmQIbbsVhukMVmHwgjgwXmz/tlrSVqs2Y2HmtRkpy2GgUa0N/\nmag8M+876m5JO0ICFXLuSZG2qRwv79u/WZnsUDVLywCLv1B1191Sbuu2GVM7V9nI1uu7cGo03kth\nw91r9PfKuxWxv/u1nHeXlXHuSRs1jNMyFvWzBqZz7vp/0tWC5qj2DProC+mY2nk7aB6WRtigK8yv\n21B3iqsDAPl9Lss6PxWokHNPmjgUKjb0h1wEkiuFZkuLYJJzN9uhChQQuZf4g2MrpnauspHNH7E3\n+Xt1Ei4ki2RTQXU8AjMvnXt5bbQyzj2p0GKq2Gg5dTTqZI1aJo+c+8Ij976r3fZBSMfUzlU2JEXH\n9NkwKD4MrtpjKWS+ayqzoqtyzj0q95pFsWFL4yRg9q6QQP5T47Mo6/gymzG183Yre7JYNIod150K\nTss06oSW4/9uTr2GVo4trCUtUwKSKvuzKDZ8hYFdzt20KySw2JFp/oSb8harbMXUzn05bLZzn45i\nbdjIF+d88+x5E01FlYnKOfeoIc5y21VUw604ht4IRIBjoMfNmuAzD44MPDCXc+efzZjaeWclu5VA\ndEyfDc497qKV5x11T9Qy9pOUV56lYNK2YPt1SN8boVGvGe0ULOJDOlY5iBQyV0ztPKltwTS9/sYx\nfTbo3OPktHkFXa43wtFheVOH1XHuCQ2FTOanhnRWGlZMogH8rpAtwx4tW5tqOxTzpMzbum3G1M47\nTQcDdzQuzMcRTcvUa4QtjWIHxcfJafNKy0SHk5SNyjh3p17DSqMWryIwzLl3CppgFMfA84w6QgL+\neLbOgusHZR5fZjOmdp519+bXSDYXZfMeSK1L3A7SpM6YupS5aRhQIecOxF/RZ1Fs2NDPOmTospHG\nPWTRyp8yjy+zHRM7z0qx9N0RvBHHRMkFR+4xOfG8bFmce4mI/tFnVWyo5CkXxcAboWHQETIkr2hH\nlTKPL7MdEzvPGrWXlEYrWg4cLfIC+aVlJr+zFFStp93c2Np2VsVGUge+Ihi4o5ki907LWaheeRIV\nlfODYzMmdj4ZtTeMfT48X5wypWid++a0TE4593AQSElTh5Vy7tGJQ7OmBtotByMGji64m2IcA2+E\npmPuKBcdga0lRILC7JjY+aS/UHrkHle8LCpyZ+ZYiWe75eBIDi2swwtdWRVd1XLurY2zQmdVbCT1\n8SgCP3KfJS2z2A/puFVrST84NmNi51kF1fDxeckOTTg69DDizTa0mlNP9zLPTwUq5tyjMx+jvaB1\nmXSGLD5yH3ojo92pIasLrh/0+i5qBGxpSFomb0zsvN1ML6iGjjI+BVKM/SfdTWTVD1SRgmqJiM58\nnDktY9HAjoE7mmkWaRFqmXbTKWw82zJjYudZvdmTxvQVqZbpJaypndMdddn3YlTKuUeli7MqNmzY\noRcya+S+6En2ZW7IZDsmdq6alomLko8OPXgLHtG4YU0xapnp52c5f31qOEnZKOeqDYkWWmZVbNjQ\nWyOkP2Pk3mnVMfBG4wZk80bmp84PEztvOjU067XEFEuSc+/klN82IU2eCeTj3NvNemnvLivl3KOG\nOOttlw2TaEIGOUTuwOIuVNGt7EJ+mNp52qi9pBmsRQY4WRec2dMy5e0ICVTMuUed8aw5d+vSMjPm\n3IHF/S5lnnBjO6Z2nqYP76672NKoox7pOprUP34RZBVUZ3fuw1LbaKWce3Tuaa/vgshvnJXH+Ypk\n1k1Mqwt27mWecGM7pnaetrOzN4hvPLZou5kmfM04eSaQR1rGK7VUV8kbENFFRHQPER0got+Oeb5F\nRFcHz99IRHvyXmgeTHTpXvC/30zJNKe2tbHxfEUy9FjSMgIAcztP06wnpSjykh2akJ2WmW1NZbfR\nTG9ARHUA7wPwKgDPBfBaInpu5LBfBPAEM58O4D0A3pX3QvMgKl2cVbFRqxHaTTs6Q+YhhQQWG7mX\n+ZbXZkztPE0OG52fOvmZ4jbyhc57a2SvxEqjhloOLazj+taUCRVv8CIAB5j5O8w8APBRAJdEjrkE\nwIeCrz8G4OVkYYk56sDyUGwU3TgpZNaC6qIn2cv81PlhaudpaZmkMX15pUBMCNUstUgdgIhy6S9T\n9gBEZeW7ANw/9f1BAOcmHcPMLhE9BeB4AI/msci8CA3xnZ++E3983T144MmjOP2Ezszn/OfbHsRN\n9z6RxxKNmb39gP/hv/yf7sSffO7beS0rFoZ/MSprtz3bMbXzdquOex87gle8+983PXff40fwI6dv\nj/kZ/7X+97/cjff/+3/OuHI9Hj68nuh8Oy0Hn7j5IL5ywNwFPXR4vdQ2quLc4zxGdMeCyjEgon0A\n9gHA7t27FV46X045bitef/6pONTtAwDOOLGDVz73pJnOue/Fp+GL/3Eoj+XNxJknreKi5+80/vmn\nH7MFb/jhPXhkbT3HVSXznJ3b8OPPm+29F+IxtfNLX3gKen0PvPmjizNO7ODSF5686fHj2038tx99\nBr7/5NHZF67JGSd28KI9x8U+96sXPhM3fOexmc7/rJNW8eqzd810jiKhrHa1RHQ+gN9l5h8Pvn8b\nADDz/5o65rrgmBuIyAHwEIAdnHLyvXv38v79+3P4FQRBEKoDEd3EzHuzjlNJ0n4DwBlE9AwiagK4\nDMA1kWOuAfD64OtLAXw+zbELgiAI8yUzLRPk0H8NwHUA6gA+wMx3EtHlAPYz8zUArgTwt0R0AMDj\n8C8AgiAIQkEolYKZ+VoA10Yee8fU1+sAXpPv0gRBEARTKrVDVRAEoSqIcxcEQVhCxLkLgiAsIeLc\nBUEQlhBx7oIgCEtI5iamub0w0RqAewp5cTvZDsvaNRSIvBcbkfdjI1V/P05l5h1ZBxXZFecelV1W\nVYGI9sv74SPvxUbk/diIvB9qSFpGEARhCRHnLgiCsIQU6dyvKPC1bUTejwnyXmxE3o+NyPuhQGEF\nVUEQBGF+SFpGEARhCSnEuWcN3F5miOgUIrqeiO4iojuJ6E3B48cR0WeJ6D+C/59W9FoXCRHVieib\nRPRPwffPCIat/0cwfL1Z9BoXBREdS0QfI6K7Azs5v6r2QUS/EXxO7iCiq4hopcq2ocPCnbviwO1l\nxgXwVmZ+DoDzALwx+P1/G8C/MfMZAP4t+L5KvAnAXVPfvwvAe4L34wn4Q9irwp8C+BdmfjaAF8B/\nXypnH0S0C8CvA9jLzM+H33L8MlTbNpQpInJXGbi9tDDzg8x8c/D1GvwP7i5sHDL+IQCvLmaFi4eI\nTgbwXwD8dfA9AXgZ/GHrQIXeDyLaBuDF8GckgJkHzPwkqmsfDoAtwYS3rQAeREVtQ5cinHvcwO3y\nDiqcASLaA+BsADcCOJGZHwT8CwCAE4pb2cL5EwC/BWAUfH88gCeZORxfXyUbOQ3AIQB/E6Sp/pqI\n2qigfTDz9wH8MYD74Dv1pwDchOrahhZFOHelYdrLDhF1AHwcwJuZ+XDR6ykKIvoJAI8w803TD8cc\nWhUbcQCcA+AvmflsAD1UIAUTR1BXuATAMwA8HUAbfjo3SlVsQ4sinPtBAKdMfX8ygAcKWEdhEFED\nvmP/CDN/Inj4YSLaGTy/E8AjRa1vwVwA4CeJ6HvwU3Qvgx/JHxvcigPVspGDAA4y843B9x+D7+yr\naB8/BuC7zHyImYcAPgHgh1Fd29CiCOeuMnB7aQnyyVcCuIuZ3z311PSQ8dcD+PSi11YEzPw2Zj6Z\nmffAt4XPM/PPArge/rB1oFrvx0MA7ieiM4OHXg7gW6imfdwH4Dwi2hp8bsL3opK2oUshm5iI6GL4\n0Vk4cPsPFr6IgiCiHwHwJQC3Y5Jjfjv8vPvfA9gN36hfw8yPF7LIgiCiCwH8JjP/BBGdBj+SPw7A\nNwH8HDP3i1zfoiCis+AXl5sAvgPgF+AHYpWzDyL6PQA/A19l9k0AvwQ/x15J29BBdqgKgiAsIbJD\nVRAEYQkR5y4IgrCEiHMXBEFYQsS5C4IgLCHi3AVBEJYQce5CpSCi/xF0GbyNiG4honOJ6M1EtLXo\ntQlCnogUUqgMRHQ+gHcDuJCZ+0S0Hb6W/KvwOw8+WugCBSFHJHIXqsROAI+GG14CZ34p/L4l1xPR\n9QBARK8kohuI6GYi+oegDxCI6HtE9C4i+nrw7/SifhFByEKcu1Al/hXAKUT0bSL6CyJ6CTP/Gfze\nJC9l5pcG0fzvAPgxZj4HwH4Ab5k6x2FmfhGA98LfZS0IVuJkHyIIywEzd4nohQB+FMBLAVwdMwns\nPPhDZL7itzNBE8ANU89fNfX/e+a7YkEwR5y7UCmY2QPwBQBfIKLbMWnGFUIAPsvMr006RcLXgmAV\nkpYRKgMRnUlEZ0w9dBaAewGsAVgNHvsagAvCfHrQkfBZUz/zM1P/T0f0gmAVErkLVaID4M+J6Fj4\nXQYPANgH4LUAPkNEDwZ59zcAuIqIWsHP/Q6Abwdft4joRviBUVJ0LwiFI1JIQVAkGCgikkmhFEha\nRhAEYQmRyF0QBGEJkchdEARhCRHnLgiCsISIcxcEQVhCxLkLgiAsIeLcBUEQlhBx7oIgCEvI/weA\nlB+FtkxWaQAAAABJRU5ErkJggg==\n",
"text/plain": [
- "<matplotlib.figure.Figure at 0x110eadc50>"
+ "<matplotlib.figure.Figure at 0x11afe9a20>"
]
},
"metadata": {},
@@ -1009,7 +1002,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "We instantiate a BatchRunner with a model class to run, and a dictionary mapping parameters to values for them to take. If any of these parameters are assigned more than one value, as a list or an iterator, the BatchRunner will know to run all the combinations of these values and the other ones. The BatchRunner also takes an argument for how many model instantiations to create and run at each combination of parameter values, and how many steps to run each instantiation for. Finally, like the DataCollector, it takes dictionaries of model- and agent-level reporters to collect. Unlike the DataCollector, it won't collect the data every step of the model, but only at the end of each run.\n",
+ "We instantiate a BatchRunner with a model class to run, and two dictionaries: one of the fixed parameters (mapping model arguments to values) and one of varying parameters (mapping each parameter name to a sequence of values for it to take). The BatchRunner also takes an argument for how many model instantiations to create and run at each combination of parameter values, and how many steps to run each instantiation for. Finally, like the DataCollector, it takes dictionaries of model- and agent-level reporters to collect. Unlike the DataCollector, it won't collect the data every step of the model, but only at the end of each run.\n",
"\n",
"In the following example, we hold the height and width fixed, and vary the number of agents. We tell the BatchRunner to run 5 instantiations of the model with each number of agents, and to run each for 100 steps. We have it collect the final Gini coefficient value.\n",
"\n",
@@ -1019,17 +1012,24 @@
{
"cell_type": "code",
"execution_count": 22,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "245it [00:37, 6.58it/s]\n"
+ ]
+ }
+ ],
"source": [
- "parameters = {\"width\": 10,\n",
- " \"height\": 10, \n",
- " \"N\": range(10, 500, 10)}\n",
+ "fixed_params = {\"width\": 10,\n",
+ " \"height\": 10}\n",
+ "variable_params = {\"N\": range(10, 500, 10)}\n",
"\n",
"batch_run = BatchRunner(MoneyModel, \n",
- " parameters, \n",
+ " variable_params,\n",
+ " fixed_params,\n",
" iterations=5, \n",
" max_steps=100,\n",
" model_reporters={\"Gini\": compute_gini})\n",
@@ -1046,14 +1046,12 @@
{
"cell_type": "code",
"execution_count": 23,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [
{
"data": {
"text/plain": [
- "<matplotlib.collections.PathCollection at 0x114ab80f0>"
+ "<matplotlib.collections.PathCollection at 0x11b28bac8>"
]
},
"execution_count": 23,
@@ -1062,9 +1060,9 @@
},
{
"data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYEAAAEACAYAAABVtcpZAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJztnX2QHeV15p8z0szcq5FmBGEiO8jMkPAhYYxBLsmkjIsR\nSFj2JsZVmELKbtYxEwwmYxxSTiS8KSQvca0pyoVDvJQBT6Ks14yUXYKB2nUElGec0ibOTAAhwkgg\nB88sX2au14Yt2cMiw9k/uq+mp6f79tvf3befX9XU3Nu3P073vf0+73vOeU+LqoIQQkg16cjbAEII\nIflBESCEkApDESCEkApDESCEkApDESCEkApDESCEkApjJAIisk1EjorI8yKy0+PzXhF5WEQOicgz\nIvJ7ptsSQgjJDwmaJyAiHQCeB3A5gFcATAHYrqpHHevcAqBXVW8RkdMAPAdgDYB3grYlhBCSHyYj\ngU0AjqnqrKqeALAPwJWudRTAKvv1KgD/R1V/abgtIYSQnDARgdMBvOh4/5K9zMnXAZwnIq8AeBrA\n50NsSwghJCeSCgx/BMBTqvprAC4C8J9FZGVC+yaEEJISyw3WeRnAGY73a+1lTj4N4D8BgKr+q4j8\nCMA6w20BACLCIkaEEBISVZU425uMBKYAnCUiAyLSBWA7gIdd68wC2AIAIrIGwDkAXjDc9iSqWsq/\n3bt3524D7c/fDtpfzr8y258EgSMBVX1bREYAPApLNEZV9YiIXG99rPcC+DMAe0XksL3Zn6jqTwHA\na9tELCeEEBIbE3cQVPXvAJzrWnaP4/WrsOICRtsSQggpBpwxnABDQ0N5mxAL2p8vtD9fym5/XAIn\ni2WFiGhRbCGEkDIgItAMAsOEEELaFIoAIYRUGIoAIYRUGIoAIYRUGIoAIYRUGIoAIYRUGIoAIYRU\nGIoAIYRUGIoAIYRUGIoAIYRUGIoAIYRUGIoAIYRUGIoAIYRUGIoAIYRUGIpAjjQaDUxNTaHRaORt\nCiGkolAEcmJsbD8GBtZh69YbMDCwDmNj+/M2iRBSQfhQmRxoNBoYGFiH+flxABcAOIx6fTNmZ4+i\nv78/b/MIISWBD5UpKTMzM+jqGoQlAABwATo7BzAzM5OfUYSQSkIRyIHBwUG89dYMgMP2ksM4cWIW\ng4OD+RlFCKkkFIEc6O/vx+jo3ajXN6O3dwPq9c0YHb2briBCSOYwJpAjjUYDMzMzGBwcpAAQQkKT\nREyAIkAIISWFgWFCCCGxoAgQQkiFoQgQQkiFoQjkCMtGEELyxkgERGSbiBwVkedFZKfH518QkadE\n5EkReUZEfikiq+3PZkTkafvzyaRPoKywbAQhpAgEZgeJSAeA5wFcDuAVAFMAtqvqUZ/1fwvAH6rq\nFvv9CwA+oKo/CzhOZbKDWDaCEJIEWWUHbQJwTFVnVfUEgH0Armyx/g4AY473YnicysCyEYSQomDS\nOJ8O4EXH+5fsZUsQkTqAbQAecCxWAI+JyJSIXBfV0HaCZSMIIUUh6R76bwM4qKqvO5Z9SFU3APgY\ngD8QkUsSPmbpYNkIQkhRWG6wzssAznC8X2sv82I7FruCoKqv2v8bIvIgLPfSQa+N9+zZc/L10NAQ\nhoaGDMwrJzt2XIMtWy5j2QhCiDETExOYmJhIdJ8mgeFlAJ6DFRh+FcAkgB2qesS1Xh+AFwCsVdV5\ne9kKAB2qelxEegA8CuBLqvqox3EqExgmhJAkSCIwHDgSUNW3RWQEVgPeAWBUVY+IyPXWx3qvveon\nABxoCoDNGgAPiojax/q2lwAQQgjJBxaQI4SQksICcoQQQmJBESCEkApDESCEkApDESCEkApDESCE\nkApDESCEkApDESCEkApDESCEkApDESCEkApDESCEkApDESCEkApDESCEkApDESCEkApDESCEkApD\nESCEkApDESCEkApDESCEkApDESCEkApDESCEkApDESCEkApDESCEkApDESCEkApDESCp0mg0MDU1\nhUajkbcphBAPKAJtTN4N8NjYfgwMrMPWrTdgYGAdxsb252IHIcQfUdW8bQAAiIgWxZZ2YGxsP4aH\nb0RX1yDeemsGo6N3Y8eOazI7fqPRwMDAOszPjwO4AMBh1OubMTt7FP39/ZnZQUg7IyJQVYmzD44E\n2pBGo4Hh4RsxPz+ON954AvPz4xgevjH1EYFz5DEzM4OurkFYAgAAF6CzcwAzMzOp2kAICQdFoA3J\nowF2u36efPIQ3nprBsBhe43DOHFiFoODg6nZQAgJD0WgDRkcHMy0AfYaedx88y7ceedXUK9vRm/v\nBtTrmzE6ejddQYQUjOV5G0CSp7+/H6Ojd2N4eDM6Owdw4sSsbwPcdN0MDg5GbqCbI4/5+cUjjw0b\nLsTs7NHY+yeEpIdRYFhEtgH4GqyRw6iq3u76/AsA/i0ABdAJYD2A01T19aBtHftgYDhhghr4pILH\nDAITkg9JBIYDRUBEOgA8D+ByAK8AmAKwXVWP+qz/WwD+UFW3hNmWIpAtSTfcTUFxjjyyzEYipIok\nIQIm7qBNAI6p6qx90H0ArgTgKQIAdgAYi7gtyQg/F87MzEwkEdix4xps2XIZXT+ElAyTwPDpAF50\nvH/JXrYEEakD2AbggbDbVpE8J3O1Ch5Htau/vx8bN24MFIC8J7ERQhZIOjD82wAOqurrUTbes2fP\nyddDQ0MYGhpKxqoCkvdkLr/g8eOPfy9Vu/I+b0LKzMTEBCYmJhLdp0lM4GIAe1R1m/1+FwD1CvCK\nyN8C+BtV3Rdh28rEBIoUSHUGjwGkaler8wZAVxIhIclqxvAUgLNEZEBEugBsB/CwhzF9AC4F8FDY\nbatGmMlcabtOnC6ctCeZ+e3/nnvuY40hQvJCVQP/YPn5nwNwDMAue9n1AD7jWOdTAO432dbnGFoV\n5ubmtF4/VYGnFVAFntZ6/VSdm5tbtN799+/Tev1U7evboPX6qXr//fsKYVfYfU5OTurc3Jzv/mu1\n1Ykek5CqYLebRu2431+sjZP8q5IIqC408L29F3k28Gk0yEnYFWVfThFz7/+2276sfX0b7HO0/np7\nL9LJyckEz4pkiVP4SbpQBEpOq5tlcnIyt8Zxenpa9+7dq9PT05H30UrETEYHbEDyJWpDnvXotepQ\nBNqYvEcCUW5iZ8MRRsSSHH2Q+ET9DVDQs4ci0Obcf/8+rdVWa0/POVqrrS50TGDB1nO1Vlut3/jG\nvaH2lbULgS4Lb+L8BvIcvVaVJESAVUQzImqWj1V5o27/T5eo2UGNRgOf+tR1ePNNwc9/3oM33xR8\n7nN/FKqKqOlEsyTgE8/8iZMhlnX1WpIQcVUkqT+08UggzPC62UOdnp7OvCcdtRd44MABBVYs2g5Y\noQcOHEgkvpAkdFm0Ju71KbJrrx1Hf6A7qPiEuamcYtHd3av1+vtC+dSTCMb53cStbiBLBM5aZCvw\nG7pz5y2eduV5M9JlEUzchryIjW27BqwpAiXAtNFZKhbjCtQDxSONnq279x50A83NzWlXV98iGzo7\nV3na1YwVZH0zRh1hVZUiNuRRaefRH0WgoERJgfQSi1ptULu7V7fskSXds3U3+KYB3uZ2PT0X+Ob/\nr1p1oXZ39+ae8TQyclNhXRYkedp59EcRKCAmE6S8Gh0/sZienm7ZIwvTywnq3Xntq7t7ta5a5e2W\ncu8vSPxa7csE096piQgHXdc0KGrvuqh2JQVHAhSBzDCdIOVHVF+syXYmPlGvHpNf793EreO2K2za\naFj7vdYryozkLHzSURrzvHzlWQtPkQPWcaAIFIwkhp1Rb45W25n2hPzWazbeURpzt11Rbsa49udd\nmyiLnmiUxryMExLj0I4jHopAwSjqsDOJ2btRZwN74XUzmpXQmFNgUoE5z+P52XXbbV/OtRdoan9U\nov7u8vCVF/UeKSsUgQJSxGGnV/ZOV1efr5sqSuwgzo1skn3U2blKgVMU2KDAKdrZuTJUplSevUBT\n+6MStTHPo0Fu5yBtHlAECkrRhp1WI7TSboQuOtkIxUnXTKqkhUlD1ErE/NJZyyLCSe0/bqwlq+u1\nYOu4PSoa50ggBhSBNiSNGbZe7oi46ZoLKaHvj9V4mPQM/da5+urtas2lOEeBuo6M3KSqxRPhVueY\nlK1xGvM4NkTZdmTk857fW5IU7TeQFhSBNsP05gj7A/fqfcVJ14zrRgg7j8I73bRX3ZPpgHphylM4\nCQq4JxUgTbvh8wvyFy0Y3a6zg72gCLQR09PTRo1a1B+4W2Cuvfa6UDdjUoHhqPMo3JPRrr/+s/a5\nqOPvbN27d6/R9ciaMOmyRezFRp1E6CaNpAL351UKPFME2oi9e/cGNmpRf+CmqZ+mufdRG4A4gVt3\nqeo77vhqaUYCTUyEtJnJlHcv1nu0Nu4YSfbqqlUXGTXmYUd+fkSd69LOgWeKQBthMhKI+gNvtV1Q\nDCKugJja0Qo/G6699jr7mp3d0n2WNAcPHtRbb71VDx48GHkfRZ3ToOo94a5e/3UFTlUru+lU7e5+\nj3Z3B9sadeTnJu5cEY4EKAKlYGTkppaNWlojgag9q+ixifAuBHdF1Xr9fJ2cnEykQQ7D1q0fXfQd\nXXHFRyPvy/95y+nMJ2jS6nvz+o4sYVraQbnjjq+2zBBLImW3ud6BAweMf4dFzBBLC4pAGxLUM0+q\ntISpSydMw21yY5va79yX3yjpjju+mqnr5ODBg552xB0ROF0lac4nUA12qXiJfk/POdrd/d4lInzb\nbV/WWu0UrdfP11rtFKN9hRE1p6212uolabatOjJFjKukAUWgokSZ4OVeJ+lnAEd5cI6f/d7uiDNt\nd8RFodwRSXLrrbfaIwB1/J2lt956a+R9ukUgznyCJIKmfiMBr2Wdnb0tbY3jmvHatrNzZaSOTDtD\nESgoWfdCskjVM3MhjGuUCUBu+62JbV6+8gcV2KvAgy0Dk2ld/6RHAkkWu0syaOol+u5lu3Z9Ub0e\nJHTgwIFF+wpycfrhZ+uBAwcSK1/SDlAECkjWOcpJZFuYuJZaNayWz35x4LBWG4w858B6VOXcohv7\n6quvUWeK6/Dw74eKcyQhDHNzcypSV+fMa5F6pH2a9rjjZl2ZrOdVVjtotNbqkaJLj2fWOQibRZRE\nzKHsUAQKRh6ZCWnnXasGC5vpHAdT+4HfUODbHiOBpQ2+MzDp5x7wEoYoLrUkC8ElWewujmtvZOTz\nkTotVvxiaSkS57WLYldQFpFJEJiTxSgCudGOVRlN9t8qeyfK/ru6+rRWW+2RNePdYDZLV5g+zaz5\n6EuvBqdVw5FkkLzVvsKWDonq2ov7qM2g+lFJpHW2ihV5jfKqNjqgCBSMPEYCqummxJkIW9zz9vIb\nB93YfsFK97Kl5THm1O3GWBhpjGuQ28I0SN7d3au12hna3d3r65JKshcb5TeQRKclqGE1sSvJ31jR\nJ+ElDUWggOSVo5xWL8c0YyXqeZv6jf1z6r1vdv8Mkm+rO6DZ03OBdnf/mprGNIKC5B0ddXXGLzo6\nakaxirhiGmUEkWZ1U+dx4mYtmQpWkrGWMpCZCADYBuAogOcB7PRZZwjAUwD+BcC4Y/kMgKftzyZb\nHCO9K5Ux7TTsNPH9OtcNMwGoVYaHMwvEa7tW4tGqx+2Vb16rnaJAbdGyqCUo9u/fr0vjIzXPHHfT\nXqzXtXATNUNsQbDOPilYebhOgh5m1Mp1FTTCKsojRtMgExEA0AHghwAGAHQCOARgnWudPgDPAjjd\nfn+a47MXAJxicJwULxVRjSZOrYKhUfZnUofIy2fvRZiSxF6+5WYxOmv08L5F59iMaYQ9x7vuukuX\nziVYq7Xa+ZF6sV1dfdrd3XuyZlLcWIWThQyfBSEFVujOnbek7joxCcy7fysjIzcZu8+co6K83LRZ\nkJUIXAzgu473u9yjAQCfBfAffbb/EYBfMThOSpeJqEZPXfW7gaKUQQ7al19PPWww0eRaOIvRfeMb\n93rO1I1yjt6ZUjV7tBFsq7sXa/XUW88gjurbt0TAnes/mPokvDhB+EceeeRkmZAwv812LSWRlQhc\nBeBex/t/B+Au1zp3Avg6gHEAUwB+1/HZCwCetJdf1+I4aV6rtifITx0nYyJqyQm3XSZ1iFrViHES\ntSie37WwRMA5GumNXMzNK9AdphFq2n/fffdpUC5+q3Myccu5BXf58p5UJ+HFCfB2da1XoFubI7+r\nr75mySh15crzfUWsndy0TYokAn8B4B8A1AD8ih07OMv+7N32/37blXSJz3F09+7dJ//Gx8fTvHZt\nRZR6MM0gqrNHbDpZLOzM0zAlqE2DlX6NyfBws7Kot4vIuzbOBdrTc27gsjB+ZC8hCiO4fX0b7NRW\n71m57n1FnZnbHBXVameFnmthSpTfjtf3a53f+KL3y5f3qHOktGxZ3RaxdIvw5cX4+PiidjJLd9Df\nOd57uYN2AtjteP9NAFd57Gs3gD/yOU5a162tMelZ+a3jvoFMi5XFOWZQCeq5uTldtmyFOgPRy5at\nMHKdmDxjwM8ur15/mJGASQMfPktmfMn5dHX1OSbJnetquMc1KEjudw2djwl1x0zi1OiJ0hFwb9vb\ne5HdMXi3SxDP1mXLehbtq7NzVepF+IpEViKwzBEY7rJ78+td66wD8Ji97goAzwA4z3690l6nB8D/\nAnCFz3HSvl5tSdR6MFbtl2BXgx9B7g0T14/Xjbngp17oyXnVpGni3JfJg3n8bPdb1moylHt/rWYk\nm0x08grC12qWj97ZILsbuYXe79KRnruBd187P6F2ikyr7Joobsgwz6Jo7t+vblNPz+KA+8qV5wcW\ntmsnMhEB6zjYBuA5AMcA7LKXXQ/gM451vmBnCB0G8Dl72Zm2aDxlC8OuFsdI92q1KWH8wc4b1jso\n6N/Y+h07ShyiFSY1afxoVb7CbWuY7BS/RtTvPN3ZTaYuFr8AtbO2j9/16epa3PCZVPn0EmqvGdZ+\no6KguQ9hngFggtvl5fWI1DBPPGsHMhOBLP4oAtGJkvlg6nvPwy7TeQletArKplFldWnvfXpJI710\n1rJ6BjBNGm4/8d6164seI73WIu91jl62mk3C8xYGr4qwcVxq7lhLnKSFdoAiUFDyyEKIcky37zer\nfHATu0zcMH4kkSMeJoC5uPe+aknj69W79uqx9vScoz097295TNPRjjVpbemIYf/+/UuutWkjGhTg\n9RI2dx2oMC41U/xGcO2WDuoFRaCA5FXBMKrwFDVtLim7os7CNRWPhRHVuD0SeHBJI+3lB/dqbP3K\nGyx1B71LnQ/YAdYsyRiy1uteNKICuj0zi6I0ot6jCG9XjPm1bl7DcM+j8NtnEX/XSUMRKBhRe51x\nqVLpXBOcDYDXd9LsnQZdL9PiZ+5nKSxf3q/d3Ut7vyaN7dJSzzct+m4XAvoLDabXLF8rvrBSgT61\nguV9oSbAhUlndQqbqYvRParwex5FEllXYdcrExSBghE1IyYOeQlP2kS9Xl6C6G6swvipg4qy+bln\nHnnkEaNibq3mE/jVy1lI7bV6+MuX9wRk+bR+3kJSPe65ObP0Xvd35Jfaa/IMaZPgfZj1ygZFoGAE\npcSl0VPPQ3jSJukSF05/tumMZFM7rF7s4kBqV9d67e7uNR5pRJ/kd06oFM5WdaC8rmXY345Jeq+f\nG8ldW6lWe68dR1l4pKhbsEyTG7JIgsgLikAByTpbIQ/hSZM4IxuTYK6JUPi5kfxiAu713DNbTbfz\nauSSttUKYvcpcK42XURBPXXT345Jeq9pQLm7e7UuX96vztnfy5f3L/ouTdOck0iHLioUgYISlEWR\ndN5yO6XJxblepo1hK198lIe+u9NSOzvPCNwu6iS/OPELy13jLrOwwlh4TK5/UHpvUKelaf/u3V9S\nPzdb0JwJbxGIPjGyyFAESkBWPvushSct4l6voLr0zf0EpZGaPohkaWaLd3ZQlJGAc92ogc+lGUPh\ne+qtfjtewe6g9F6T78h79ve7taurzyP43XpOiYk4lRWKQEkwbZiSIq4PNO9YQtw8b78sHHdd+qBe\n/0LZBf95FF6NZrPUQxK99zi4z3vXrlsC3SJhxMmklr8fQYK1NOA+7imu7uC33zW0Rms1Bd6jQM24\nwF7RoQiUiKCGKckGIE7PJ2m7ogpKUkJk4rP36/W7a+iEqXvvzO1vRdhHQsY5byszanHJ7KiPCk06\nFuX1u1vsZuu2S0kvHaEE/VaWjtbiz0MoChSBkpK2iyhMFkgcu4JuviLMX/DqqVuNyuSSXn/UdMqo\nPXrT6xNFKLyylur1841GN6rB361pgNdEEFv97prnfvDgwVQTBsoKRaCkpP2jjNrzCWNXUAOWVSwk\nCNPsnWaj38wjD/tc2rAjF9PrE+YRmk7CFNOLgl+qp3vGcL1+fmC6bNJB8qZ9YbOnyghFoKRk8aOM\n0njESYt0r1ek3pffLNygbKqknyfgxOT6tGrITfZfr5+pzvISzVm4SWGSlRYuXTa402JynU0mDJYl\nZToIikCJSfNHGUdkTEslxMnHzwN342GSTeWunJlk8TOT62P6fITW+w9uWOPgF+vq7b1Iu7tX20IU\n3BGIOuLxssfvuuad8JAGFIGSk9aPMmrRNFO7TBv4svS+4jQcaQpunJGAyf7DYhqbaF6z6enpWI8K\njXJfFGkEmgUUgYISJ+MjLX+tadE0U0wbmDg57lkStcEM0+gEpUV6EfXZwXGO6W1HNPeiSZZakg13\n0UagaUMRKCBxhrVJZtPEKZpmSlINdxGyiFSjnU/YUVGUc0wyjTSKHVFHJK2y1NIM3JZlBJoEFIGC\nEWf4noYPt3mjhSmaFrSvJHpU7Za5EdToFOUco9oRNTYRZi5B0g133iPLrKAIFIxWN4tJ3rVfTfW4\nxG2E0hihRKnRU2Rafb9F8VNHtSNO5yZMXauqNNxJQhEoGH43i0ld9LhBwCCi9rSS7MV67cu0Rk+Z\nKftIQDVebMIkE6tsol8UKAIFxH2zXHvtdUY3nt8Mzzg3R6u0SFOSvGnjpmLmQdJxj7zPMY4dScQm\niiKI7QJFoKA4bxbTRjSt4FhcF06Sedd55nBH2X9R6iglTd52FEUQ2wGKQAkI07gndXNkkW0RtYHM\nowFIazJXnuTdkMel7PYXBYpASYha8yQqafhdk8zoybIBiGprkX3XRUmpJflDESgRZWj4TClyA+km\nqq1FHQkU1S6SD0mIQAdIJvT392Pjxo3o7+/P5Fijo3ejXt+M3t4NqNc3Y3T07sSOPTg4iLfemgFw\n2F5yGCdOzGJwcDCR/SdJVFtbXcNGo4GpqSk0Go2UrV/KzMwMuroGAVxgL7kAnZ0DmJmZydwW0ibE\nVZGk/tDmI4E8SHP0UabgXhxb3dcwb1cMRwLECRIYCYi1n/wRES2KLcSMRqOBmZkZDA4OZjLCiUMS\ntjYaDQwMrMP8/Disnvhh1OubMTt7NNPzHxvbj+HhG9HZOYATJ2YxOno3duy4JrPjk+IgIlBVibUP\nk4ZXRLYB+BqADgCjqnq7xzpDAO4E0AmgoaqbTbe116MIkEIzNTWFrVtvwBtvPHFyWW/vBjz++D3Y\nuHFjpraUSYBJemQiAiLSAeB5AJcDeAXAFIDtqnrUsU4fgH8AcIWqviwip6nqT0y2deyDIkAKTVFG\nAoQ0SUIETALDmwAcU9VZVT0BYB+AK13r/A6AB1T1ZQBQ1Z+E2JaQUpB2wJ2QPFhusM7pAF50vH8J\nVuPu5BwAnSIyDmAlgLtU9VuG25IWcNhfLHbsuAZbtlzG74S0DSYiYLqfDQAuA9AD4B9F5B/D7mTP\nnj0nXw8NDWFoaCgh88pJMwDY1WWlOWYVAKTwtKa/v5/XheTCxMQEJiYmEt2nSUzgYgB7VHWb/X4X\nrLSk2x3r7ARQU9Uv2e+/CeC7AF4O2taxD8YEHOTlf85LeAgh4ckqJjAF4CwRGRCRLgDbATzsWuch\nAJeIyDIRWQHggwCOGG5LPGg1KSityUqNRgPDwzdifn4cb7zxBObnxzE8fGMuk6IIIdkQKAKq+jaA\nEQCPAngWwD5VPSIi14vIZ+x1jgI4AGta5g8A3Kuq037bpnMq7YXfTNcnnzyEgYF12Lr1BgwMrMPY\n2P7EjsnZqIRUD04WKzDuSUF33vkV3HzzrtRcREyBJKRcZOUOIjmxY8c1mJ09iscfvwezs0exYcOF\nqfbUmQJJSPXgSKBELPTURwG8AaAP9fpw4j11ZgcRUg6SGAkklSJKMqC/vx+XXPJBPPbY7wBYC+Al\nfPjDQ4k31EyBJKQ6cCRQIo4cOYLzzvsArNi75bMHLsb09BNYv359vsYRQjKHMYGKMTk5CeA9cMYE\ngLX2ckIICQ9FoERs2rQJVhWOhbRR4CV7OSGEhIciUCLWr1+PkZHrAFwMq1zTxRgZuY6uIEJIZBgT\nyAivjJuoWThHjhzB5OQkNm3aRAEgpMJk9lCZLGhnEfCqxwOANXoIIbGgCJQAv1m4qu/gzTe/D87M\nJYREhdlBJcCrHk9Hx1osW7YGJjN/0yoWRwghAEUgdbwKwb3zzkt4++3X4C4ONzg4uGjbsbH9qRWL\nI4QQgO6gTHAXgnPGBJzLnDEBFnMjhATBmECJCJsdNDU1hUsvHcb8/OGTy+r19+H73/9LbNy4MVPb\nCSHFhLWDSoRXPZ5WNXpWrlyJ+fkfwnIZWSOB+fl/xcqVK1O3lRBSHSgCBeX48eOo19+F+fnNAAYA\nzKJWW4Pjx4/nbRohpI2gCBQUK0j8BoAHAPQA+DlErloSPCaEkDgwO6igLDzg5Sr09l6Pev0qPuCF\nEJI4DAwXHNPSEnwQDCHVg9lBBIB3WQqWoCCk/aEIEM4nIKTCsGwE8SxLkeTD5wkh7Q1FoOR4laXw\nKkFBCCFeUARKzkIW0Wb09m5Avb6ZWUSEEGMYEyghST6ghhBSXhgYriDMBCKENKEIVAxmAhFCnDA7\nqGIwE4gQkjRGIiAi20TkqIg8LyI7PT6/VEReF5En7b8/dXw2IyJPi8hTIjKZpPFVg5lAhJCkCSwg\nJyIdAL4O4HIArwCYEpGHVPWoa9W/V9WPe+ziHQBDqvqz2NZWnGYm0PDw5kUPo6EriBASFZMqopsA\nHFPVWQCO95EMAAAHn0lEQVQQkX0ArgTgFgE/v5SAbqfE2LHjGmzZchkzgQghiWAiAqcDeNHx/iVY\nwuDmN0XkEICXAfyxqk7byxXAYyLyNoB7VfW+OAaT1g+jIYSQMCT1PIEnAJyhqr8QkY8C+A6Ac+zP\nPqSqr4pIPywxOKKqBxM6LiGEkBiYiMDLAM5wvF9rLzuJqh53vP6uiNwtIqeq6k9V9VV7eUNEHoQ1\nivAUgT179px8PTQ0hKGhIcPTIISQ9mdiYgITExOJ7jNwnoCILAPwHKzA8KsAJgHsUNUjjnXWqOpr\n9utNAP5GVQdFZAWADlU9LiI9AB4F8CVVfdTjOJwnQAghIcjkQfOq+raIjMBqwDsAjKrqERG53vpY\n7wXwSRH5LIATAOYBNKewrgHwoIiofaxvewkAIYSQfOCMYUIIKSmcMVwiGo0Gpqam0Gg08jaFEEJO\nQhHIgLGx/RgYWIetW2/AwMA6jI3tz9skQggBQHdQ6rDoGyEkLegOKgEs+kYIKTIUgZRh0TdCSJGh\nCKQMH/9ICCkyjAlkBB//SAhJGj5ZjBBCKgwDw4QQQmJBESCEkApDESCEkApDESCEkApDESCEkApD\nESCEkApDESCEkApDESCEkApDESCEkApDESCEkApDESCEkApDESCEkApDESCEkApDESCEkApDESCE\nkApDESCEkApDESCEkApDESCEkApDESCEkApDESCEkApjJAIisk1EjorI8yKy0+PzS0XkdRF50v77\nU9NtCSGE5EegCIhIB4CvA/gIgPcC2CEi6zxW/XtV3WD//VnIbUvNxMRE3ibEgvbnC+3Pl7LbHxeT\nkcAmAMdUdVZVTwDYB+BKj/Ukxralpuw/ItqfL7Q/X8puf1xMROB0AC863r9kL3PzmyJySET+h4ic\nF3JbQgghObA8of08AeAMVf2FiHwUwHcAnJPQvgkhhKSEqGrrFUQuBrBHVbfZ73cBUFW9vcU2PwLw\nAVhCYLStiLQ2hBBCyBJU1csVb4zJSGAKwFkiMgDgVQDbAexwriAia1T1Nfv1Jlji8lMRCdy2SdwT\nIYQQEp5AEVDVt0VkBMCjsGIIo6p6RESutz7WewF8UkQ+C+AEgHkA17TaNqVzIYQQEpJAdxAhhJD2\nJfMZwyLySRH5FxF5W0Q2uD67RUSOicgREbnCsXyDiBy2J5x9LWubW1GGyXAiMioir4nIYceyU0Tk\nURF5TkQOiEif4zPP7yEPRGStiHxPRJ4VkWdE5CZ7eVns7xaRfxKRp2z7d9vLS2F/ExHpsCeCPmy/\nL439IjIjIk/b38GkvaxM9veJyH+z7XlWRD6YqP2qmukfgHMBnA3gewA2OJavB/AULBfVIIAfYmGk\n8k8ANtqv/yeAj2Rtt8+5dNh2DgDoBHAIwLq87fKw8xIAFwI47Fh2O4A/sV/vBPAV+/V5ft9DTra/\nC8CF9uuVAJ4DsK4s9ts2rbD/LwPwA1jzZ0pjv23XzQD+K4CHy/T7sW16AcAprmVlsn8vgE/br5cD\n6EvS/sxHAqr6nKoew9LJZVcC2Keqv1TVGQDHAGwSkXcBWKWqU/Z6/wXAJzIzuDWlmAynqgcB/My1\n+EoAf22//mssXNOPw+N7yMJOL1T1x6p6yH59HMARAGtREvsBQFV/Yb/shnVzKkpkv4isBfAxAN90\nLC6N/bDaGndbVwr7RaQXwIdV9a8AwLbrDSRof5EKyLknlr1sLzsd1iSzJkWacFbmyXC/qnZGl6r+\nGMCv2sv9vofcEZFBWCOaHwBYUxb7bVfKUwB+DOAxu0NTGvsB3Angj2GJV5My2a8AHhORKRH5fXtZ\nWew/E8BPROSvbHfcvSKyAgnan9RksUWIyGMA1jgXwfoi/oOqPpLGMUlsCp0hICIrAfx3AJ9X1eMe\n80oKa7+qvgPgIrtX96CIvBdL7S2k/SLybwC8pqqHRGSoxaqFtN/mQ6r6qoj0A3hURJ5DSa4/rDZ6\nA4A/UNV/FpE7AexCgvanIgKqujXCZi8DeI/j/Vp7md/yIvAygDMc74tkWxCvNed32C63OXt54a63\niCyHJQDfUtWH7MWlsb+Jqv5fEZkAsA3lsf9DAD4uIh8DUAewSkS+BeDHJbEfqvqq/b8hIt+B5R4p\ny/V/CcCLqvrP9vsHYIlAYvbn7Q5yxgUeBrBdRLpE5EwAZwGYtIc6b4jIJhERAP8ewEMe+8qDk5Ph\nRKQL1mS4h3O2yQ/B0uv9e/brT2Hhmnp+D1kZ6cNfAphW1T93LCuF/SJyWjNzQ0TqALbCimuUwn5V\n/aKqnqGqvw7r9/09Vf1dAI+gBPaLyAp7FAkR6QFwBYBnUJ7r/xqAF0WkWYbncgDPIkn7c4h0fwKW\nz2oe1izi7zo+uwVWNPsIgCscyz8A64s7BuDPs7Y54Hy2wcpYOQZgV972+Nh4P4BXAPw/AP8bwKcB\nnALgcdv2RwGsDvoecrL9QwDehpV59RSAJ+1rfmpJ7H+fbfMhAIdhuURRFvtd53IpFrKDSmE/LJ96\n87fzTPMeLYv9tj3vh9XhPATgb2FlByVmPyeLEUJIhcnbHUQIISRHKAKEEFJhKAKEEFJhKAKEEFJh\nKAKEEFJhKAKEEFJhKAKEEFJhKAKEEFJh/j++BP6u5NB27AAAAABJRU5ErkJggg==\n",
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAX0AAAD8CAYAAACb4nSYAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAE+dJREFUeJzt3W+sHFd5x/HvU6dOSomKg50oimPstAYlojRpVzE0fRFQ\nHQxUpFIRsqnUoFL8pmnpH1ElckVoaCS3LxraKqqSthF9UTAqpcGlUVOXEAlRML4ugcRODY4Jyq2R\nY0hCUxVh7Dx9seOwXt8/u3t3d2b2fD/S6u6cPXvvOXtnf3Pm7MxsZCaSpDL8SN0NkCRNj6EvSQUx\n9CWpIIa+JBXE0Jekghj6klQQQ1+SCjJQ6EfEtog4EhFHI+K2BR6/OyIerW5fi4jnex470/PY3nE2\nXpI0nFju5KyIWAV8DdgKzAMHgB2ZeXiR+r8FXJeZv14t/29mvnysrZYkjeSCAepcDxzNzGMAEbEH\nuBlYMPSBHcAdozZo7dq1uXHjxlGfLklFOnjw4Lczc91y9QYJ/SuAp3uW54EtC1WMiFcBm4CHe4ov\niog54DSwOzMfWOqPbdy4kbm5uQGaJUk6KyK+OUi9QUI/FihbbE5oO/CJzDzTU7YhM49HxFXAwxHx\nWGY+2dfYncBOgA0bNgzQJEnSKAb5IHceuLJneT1wfJG624GP9RZk5vHq5zHgEeC6/idl5n2Z2cnM\nzrp1y+6dSJJGNEjoHwA2R8SmiFhNN9jPOwonIl4DrAG+0FO2JiIurO6vBW5g8c8CJEkTtuz0Tmae\njohbgYeAVcD9mXkoIu4E5jLz7AZgB7Anzz0c6Grg3oh4ke4GZvdiR/1IkiZv2UM2p63T6aQf5ErS\ncCLiYGZ2lqvnGbmSVBBDX5IKYuhLUkEMfUkqiKEvSQUx9CWpIIa+JBXE0Jekghj6klQQQ1+SCmLo\nS1JBDH1JKoihL0kFGeSbszSDtty1jxMvnHpp+bKLV7N/19YaWyRpGhzpF6g/8AFOvHCKLXftq6lF\nkqbF0C9Qf+AvVy5pdhj6klQQQ1+SCmLoF+iyi1cPVS5pdhj6Bdq/a+t5Ae/RO1IZPGSzUAa8VCZH\n+pJUEENfkgpi6EtSQQx9SSqIoS9JBTH0Jakghr4kFcTQl6SCGPqSVBBDX5IKYuhLUkEMfUkqiKEv\nSQUx9CWpIIa+JBXE0Jekggz0JSoRsQ34c2AV8DeZubvv8buBN1aLLwMuzcxXVI/dAvxh9dgfZ+bf\njaPhbbflrn2ceOHUS8t+c5WkaVh2pB8Rq4B7gLcA1wA7IuKa3jqZ+buZeW1mXgv8JfDJ6rmXAHcA\nW4DrgTsiYs14u9A+/YEPcOKFU2y5a19NLZJUikGmd64Hjmbmscw8BewBbl6i/g7gY9X9NwP7MvPZ\nzHwO2AdsW0mDZ0F/4C9XLknjMsj0zhXA0z3L83RH7ueJiFcBm4CHl3juFcM3U2ofp/DURIOM9GOB\nslyk7nbgE5l5ZpjnRsTOiJiLiLmTJ08O0CSp2ZzCU1MNEvrzwJU9y+uB44vU3c4Pp3YGfm5m3peZ\nnczsrFu3boAmtdtlF68eqlzt4xSemmqQ0D8AbI6ITRGxmm6w7+2vFBGvAdYAX+gpfgi4KSLWVB/g\n3lSVFW3/rq3nBby7/pKmYdk5/cw8HRG30g3rVcD9mXkoIu4E5jLz7AZgB7AnM7Pnuc9GxIfobjgA\n7szMZ8fbhXYy4CXVYaDj9DPzQeDBvrIP9C1/cJHn3g/cP2L7pFa67OLVC07lOIWnunlGrjQBTuGp\nqQYa6UsangGvJnKkL0kFcaSvifHkJKl5HOlrIjw5SWomQ18T4clJUjMZ+pJUEENfkgpi6GsivL6Q\n1EyGvibCk5OkZvKQTU2MAS81jyN9SSqII33NLE8Ok85n6GsmLXVy2CjB7wZEs8LpHc2kcZ4c5tnF\nmiWO9KVlLLUBcQ9AbWPot4gB0zzjnEKSpsHpnZZwimE4dZ4c5vWF1GSO9Ftili5gNo09lv27to7t\n7yz21YdSGxUb+k6V1GPcR9UsZVy/b7ENiBsCtVGRoT/N4NG52rrHstB6sdB6BF5fSM1WZOi3MXgW\nG1lOK2DcMzrfOKeQpGkpMvTbqM6Acc9ocaX3X+1j6LdIXQEzzj2juvdYpNIVecim13qvj5dclupV\n5EjfudjFLfS6jJuv8+zz/dVcRYY+jDd4ZmUFX2zufjHuGWkhfgbUbMWG/rjM0gq+XMDPwoZNk9fG\no+PGqemDQEN/hUpZwZu00kpN1YZBoKHfME0fJUha3KiDwGm+74s8eqep6r6omkc1aRxcj4Yz7fe9\nI/0VGudx53VPFXlU03RM4zWu8/+41Hrk+nW+ab/vDf0hlLAit7XdbTGNOd8mfFXkoNcqatp890q1\n4eRDQ39AJaywmrxRv4VrmOCd1ldFDrve170nOw1tGAQa+gOaxgrbhlGCJmeped26BhwlBPW4LfU/\nWWiDMO33vR/kNoiXKFC/Ey+cMnhnxFInP07zfe9Iv2EM+OE0fVe63zS+fKWpe4xNbde0LLXxfmr3\n26bWjoFG+hGxLSKORMTRiLhtkTrvjIjDEXEoIj7aU34mIh6tbnvH1fBp8zC05qn7ENdRLLY3N42/\nMepXRQ5TPq12aXTLjvQjYhVwD7AVmAcORMTezDzcU2czcDtwQ2Y+FxGX9vyK72XmtWNu99S14QOa\n0rR12mOUb+EadoQ86a+KHPX3e82r+g0yvXM9cDQzjwFExB7gZuBwT533Avdk5nMAmfnMuBvaBK5Q\nmpTlwrXu4+6bpo1H0zVlemuQ0L8CeLpneR7Y0lfn1QAR8XlgFfDBzPzX6rGLImIOOA3szswHVtZk\naWWaOkJcqg1NaF+TtHEvrymzBYOEfixQlgv8ns3AjcB64HMR8drMfB7YkJnHI+Iq4OGIeCwznzzn\nD0TsBHYCbNiwYcguqG51rcijjJzaOELU7GjCOjZI6M8DV/YsrweOL1Dni5n5A+AbEXGE7kbgQGYe\nB8jMYxHxCHAdcE7oZ+Z9wH0AnU6nf4OiEdVxuj9M7+zPUUZOy40QmzAS0+T4/x0s9A8AmyNiE/Df\nwHbgXX11HgB2AB+JiLV0p3uORcQa4P8y8/tV+Q3An46t9VrUtEa00zr7s/93nn2zTvKDwd6/X1ow\nNJ17eaNbNvQz83RE3Ao8RHe+/v7MPBQRdwJzmbm3euymiDgMnAHen5nfiYifB+6NiBfpHh66u/eo\nH01OG+c8h2lz0zdgbdSmUfAk9vJKMdDJWZn5IPBgX9kHeu4n8HvVrbfOfwA/vfJmSucb5c261Aix\ntDd/rzaOgpvarqbzjFytSFMOQxvUUiPEjbf9S40tq1cbvvyjadrad0N/wtp0ZMsoxnkY2rRG24u1\nrW0bsLq1be9gnP/ftvW9lxdcm6A6LxMwzVPe9+/aylO73/bSbSVnay7U5mldAsPLBAynbXPk4/z/\ntq3vvRzpT1DdK0Ybw2qxNk9rj6mNr9k4lLKXU+r/t5ehr1bwzTpZTThbtO6/XwpDXyrMYuE6bMCW\nPEfe5j0jQ3+C2rxiaDaNM1zHuXcw7qnQYds1bP0m7BmNytCfoDavGJpN4w7XJq7Lw27YRt0QNrHv\ngzD0J6ytK4bUVsNu2Oo+4GLaPGRTUu38ZrrpcaQvFaSpnzM1eSq0qe0alaEvFaTJ4TquNgy7YVuq\nftuOKhpEdK+V1hydTifn5ubqbobUCk0N8LqN6+idpa7H9NTut42nsWMSEQczs7NcPUf6UkvN4ih0\nXEY5/LQUhv4McLRXptKOOtF4GPot52hPs67OQU1TP/heCQ/ZbDlHe5pldV6pFmbzyquO9KWWmsVR\naL8mDGraHPALcaQvtdQsjkI1eY70W66E0Z4WZ8BrWI70W87RnmaZl2cYP0/OktRoHpI8GE/OkjQT\nDPjxcnpHkgpi6EtSQQx9SSqIoS9JBTH0Jakghr4kFWTmD9n0GF9J+qGZHunXfYU+SWqamR7pj3qF\nPvcOJM2qmR7pj8K9A0mzzNDv04Trd0vSpMx06HuFPkk610yHvpcdlqRzzfQHuTD8Ffr8UhJJs2yg\nkX5EbIuIIxFxNCJuW6TOOyPicEQcioiP9pTfEhFfr263jKvhk+LegaRZtuxIPyJWAfcAW4F54EBE\n7M3Mwz11NgO3Azdk5nMRcWlVfglwB9ABEjhYPfe58XdlfAx4SbNqkJH+9cDRzDyWmaeAPcDNfXXe\nC9xzNswz85mq/M3Avsx8tnpsH7BtPE2XJA1rkNC/Ani6Z3m+Kuv1auDVEfH5iPhiRGwb4rmSpCkZ\n5IPcWKCs/4t1LwA2AzcC64HPRcRrB3wuEbET2AmwYcOGAZokSRrFICP9eeDKnuX1wPEF6nwqM3+Q\nmd8AjtDdCAzyXDLzvszsZGZn3bp1w7RfkjSEQUL/ALA5IjZFxGpgO7C3r84DwBsBImIt3emeY8BD\nwE0RsSYi1gA3VWWSpBosO72Tmacj4la6Yb0KuD8zD0XEncBcZu7lh+F+GDgDvD8zvwMQER+iu+EA\nuDMzn51ERyRJy4vM86bYa9XpdHJubq7uZkhSq0TEwczsLFdvpi/DIEk6l6EvSQUx9CWpIIa+JBXE\n0Jekghj6klQQQ1+SCmLoS1JBDH1JKoihL0kFMfQlqSCGviQVxNCXpIIY+pJUEENfkgpi6EtSQQx9\nSSqIoS9JBTH0Jakghr4kFcTQl6SCGPqSVBBDX5IKYuhLUkEMfUkqiKEvSQUx9CWpIIa+JBXE0Jek\nghj6klQQQ1+SCmLoS1JBDH1JKoihL0kFMfQlqSCGviQVxNCXpIIMFPoRsS0ijkTE0Yi4bYHH3x0R\nJyPi0er2Gz2Pnekp3zvOxkuShnPBchUiYhVwD7AVmAcORMTezDzcV/XjmXnrAr/ie5l57cqbKkla\nqUFG+tcDRzPzWGaeAvYAN0+2WZKkSRgk9K8Anu5Znq/K+v1KRHw1Ij4REVf2lF8UEXMR8cWI+OWV\nNFaStDKDhH4sUJZ9y/8MbMzM1wH/Dvxdz2MbMrMDvAv4cET85Hl/IGJntWGYO3ny5IBNlyQNa5DQ\nnwd6R+7rgeO9FTLzO5n5/Wrxr4Gf63nsePXzGPAIcF3/H8jM+zKzk5mddevWDdUBSdLgBgn9A8Dm\niNgUEauB7cA5R+FExOU9i28HnqjK10TEhdX9tcANQP8HwJKkKVn26J3MPB0RtwIPAauA+zPzUETc\nCcxl5l7gtyPi7cBp4Fng3dXTrwbujYgX6W5gdi9w1I8kaUois396vl6dTifn5ubqboYktUpEHKw+\nP12SZ+RKUkGWnd5piy137ePEC6deWr7s4tXs37W1xhZJUvPMxEi/P/ABTrxwii137aupRZLUTDMR\n+v2Bv1y5JJVqJkJfkjQYQ1+SCjIToX/ZxauHKpekUs1E6O/ftfW8gPfoHUk638wcsmnAS9LyZmKk\nL0kajKEvSQUx9CWpIIa+JBXE0Jekghj6klQQQ1+SCmLoS1JBDH1JKoihL0kFMfQlqSCGviQVxNCX\npIIY+pJUEENfkgpi6EtSQQx9SSqIoS9JBYnMrLsN54iIk8A3l6m2Fvj2FJrTVCX3v+S+Q9n9t+9L\ne1VmrlvuFzUu9AcREXOZ2am7HXUpuf8l9x3K7r99H0/fnd6RpIIY+pJUkLaG/n11N6BmJfe/5L5D\n2f2372PQyjl9SdJo2jrSlySNoHWhHxHbIuJIRByNiNvqbs+4RcT9EfFMRDzeU3ZJROyLiK9XP9dU\n5RERf1G9Fl+NiJ+tr+UrFxFXRsRnI+KJiDgUEe+rykvp/0UR8aWI+ErV/z+qyjdFxP6q/x+PiNVV\n+YXV8tHq8Y11tn8cImJVRHw5Ij5dLZfU96ci4rGIeDQi5qqysa/7rQr9iFgF3AO8BbgG2BER19Tb\nqrH7CLCtr+w24DOZuRn4TLUM3ddhc3XbCfzVlNo4KaeB38/Mq4HXA79Z/X9L6f/3gTdl5s8A1wLb\nIuL1wJ8Ad1f9fw54T1X/PcBzmflTwN1VvbZ7H/BEz3JJfQd4Y2Ze23N45vjX/cxszQ14A/BQz/Lt\nwO11t2sC/dwIPN6zfAS4vLp/OXCkun8vsGOherNwAz4FbC2x/8DLgP8EttA9KeeCqvyl9wDwEPCG\n6v4FVb2ou+0r6PP6KtjeBHwaiFL6XvXjKWBtX9nY1/1WjfSBK4Cne5bnq7JZd1lmfgug+nlpVT6z\nr0e1u34dsJ+C+l9NbzwKPAPsA54Ens/M01WV3j6+1P/q8e8Cr5xui8fqw8AfAC9Wy6+knL4DJPBv\nEXEwInZWZWNf9y8YU2OnJRYoK/nwo5l8PSLi5cA/Ar+Tmf8TsVA3u1UXKGt1/zPzDHBtRLwC+Cfg\n6oWqVT9npv8R8UvAM5l5MCJuPFu8QNWZ63uPGzLzeERcCuyLiP9aou7I/W/bSH8euLJneT1wvKa2\nTNOJiLgcoPr5TFU+c69HRPwo3cD/+8z8ZFVcTP/PyszngUfofrbxiog4O0Dr7eNL/a8e/wng2em2\ndGxuAN4eEU8Be+hO8XyYMvoOQGYer34+Q3eDfz0TWPfbFvoHgM3VJ/qrge3A3prbNA17gVuq+7fQ\nnes+W/5r1Sf5rwe+e3ZXsI2iO6T/W+CJzPyznodK6f+6aoRPRPwY8It0P9T8LPCOqlp//8++Lu8A\nHs5qgrdtMvP2zFyfmRvpvq8fzsxfpYC+A0TEj0fExWfvAzcBjzOJdb/uDy9G+LDjrcDX6M517qq7\nPRPo38eAbwE/oLs1fw/ducrPAF+vfl5S1Q26RzM9CTwGdOpu/wr7/gt0d1G/Cjxa3d5aUP9fB3y5\n6v/jwAeq8quALwFHgX8ALqzKL6qWj1aPX1V3H8b0OtwIfLqkvlf9/Ep1O3Q22yax7ntGriQVpG3T\nO5KkFTD0Jakghr4kFcTQl6SCGPqSVBBDX5IKYuhLUkEMfUkqyP8D6Fx/15tQq7MAAAAASUVORK5C\nYII=\n",
"text/plain": [
- "<matplotlib.figure.Figure at 0x111145518>"
+ "<matplotlib.figure.Figure at 0x11f4bb048>"
]
},
"metadata": {},
@@ -1107,9 +1105,9 @@
],
"metadata": {
"kernelspec": {
- "display_name": "Python 3",
+ "display_name": "Python [mesa_dev]",
"language": "python",
- "name": "python3"
+ "name": "Python [mesa_dev]"
},
"language_info": {
"codemirror_mode": {
@@ -1121,9 +1119,9 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.5.1+"
+ "version": "3.5.3"
}
},
"nbformat": 4,
- "nbformat_minor": 0
+ "nbformat_minor": 1
}
diff --git a/docs/tutorials/intro_tutorial.rst b/docs/tutorials/intro_tutorial.rst
index 95de4112..9b3ff29c 100644
--- a/docs/tutorials/intro_tutorial.rst
+++ b/docs/tutorials/intro_tutorial.rst
@@ -804,16 +804,15 @@ indefinitely in ``__init__``.
self.running = True
# ...
-We instantiate a BatchRunner with a model class to run, and a dictionary
-mapping parameters to values for them to take. If any of these
-parameters are assigned more than one value, as a list or an iterator,
-the BatchRunner will know to run all the combinations of these values
-and the other ones. The BatchRunner also takes an argument for how many
-model instantiations to create and run at each combination of parameter
-values, and how many steps to run each instantiation for. Finally, like
-the DataCollector, it takes dictionaries of model- and agent-level
-reporters to collect. Unlike the DataCollector, it won't collect the
-data every step of the model, but only at the end of each run.
+We instantiate a BatchRunner with a model class to run, and two dictionaries:
+one of the fixed parameters (mapping model arguments to values) and one of
+varying parameters (mapping each parameter name to a sequence of values for it
+to take). The BatchRunner also takes an argument for how many model
+instantiations to create and run at each combination of parameter values, and
+how many steps to run each instantiation for. Finally, like the DataCollector,
+it takes dictionaries of model- and agent-level reporters to collect. Unlike
+the DataCollector, it won't collect the data every step of the model, but only
+at the end of each run.
In the following example, we hold the height and width fixed, and vary
the number of agents. We tell the BatchRunner to run 5 instantiations of
@@ -827,12 +826,13 @@ Now, we can set up and run the BatchRunner:
# run.py
from mesa.batchrunner import BatchRunner
- parameters = {"width": 10,
- "height": 10,
- "N": range(10, 500, 10)}
+ fixed_params = {"width": 10,
+ "height": 10}
+ variable_params = {"N": range(10, 500, 10)}
batch_run = BatchRunner(MoneyModel,
- parameters,
+ fixed_parameters=fixed_params,
+ variable_parameters=variable_params,
iterations=5,
max_steps=100,
model_reporters={"Gini": compute_gini})
diff --git a/examples/Schelling/analysis.ipynb b/examples/Schelling/analysis.ipynb
index 38f07ae9..f60068a9 100644
--- a/examples/Schelling/analysis.ipynb
+++ b/examples/Schelling/analysis.ipynb
@@ -17,10 +17,8 @@
},
{
"cell_type": "code",
- "execution_count": 2,
- "metadata": {
- "collapsed": false
- },
+ "execution_count": 1,
+ "metadata": {},
"outputs": [],
"source": [
"import matplotlib.pyplot as plt\n",
@@ -38,9 +36,9 @@
},
{
"cell_type": "code",
- "execution_count": 3,
+ "execution_count": 2,
"metadata": {
- "collapsed": false
+ "collapsed": true
},
"outputs": [],
"source": [
@@ -56,10 +54,8 @@
},
{
"cell_type": "code",
- "execution_count": 4,
- "metadata": {
- "collapsed": false
- },
+ "execution_count": 3,
+ "metadata": {},
"outputs": [
{
"name": "stdout",
@@ -84,9 +80,9 @@
},
{
"cell_type": "code",
- "execution_count": 5,
+ "execution_count": 4,
"metadata": {
- "collapsed": false
+ "collapsed": true
},
"outputs": [],
"source": [
@@ -95,15 +91,26 @@
},
{
"cell_type": "code",
- "execution_count": 6,
- "metadata": {
- "collapsed": false
- },
+ "execution_count": 5,
+ "metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
+ "<style>\n",
+ " .dataframe thead tr:only-child th {\n",
+ " text-align: right;\n",
+ " }\n",
+ "\n",
+ " .dataframe thead th {\n",
+ " text-align: left;\n",
+ " }\n",
+ "\n",
+ " .dataframe tbody tr th {\n",
+ " vertical-align: top;\n",
+ " }\n",
+ "</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
@@ -114,7 +121,7 @@
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
- " <td>66</td>\n",
+ " <td>61</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1</th>\n",
@@ -122,15 +129,15 @@
" </tr>\n",
" <tr>\n",
" <th>2</th>\n",
- " <td>67</td>\n",
+ " <td>69</td>\n",
" </tr>\n",
" <tr>\n",
" <th>3</th>\n",
- " <td>66</td>\n",
+ " <td>71</td>\n",
" </tr>\n",
" <tr>\n",
" <th>4</th>\n",
- " <td>66</td>\n",
+ " <td>71</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
@@ -138,14 +145,14 @@
],
"text/plain": [
" happy\n",
- "0 66\n",
+ "0 61\n",
"1 67\n",
- "2 67\n",
- "3 66\n",
- "4 66"
+ "2 69\n",
+ "3 71\n",
+ "4 71"
]
},
- "execution_count": 6,
+ "execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
@@ -163,26 +170,24 @@
},
{
"cell_type": "code",
- "execution_count": 7,
- "metadata": {
- "collapsed": false
- },
+ "execution_count": 6,
+ "metadata": {},
"outputs": [
{
"data": {
"text/plain": [
- "<matplotlib.axes._subplots.AxesSubplot at 0x10672dd30>"
+ "<matplotlib.axes._subplots.AxesSubplot at 0x110917ac8>"
]
},
- "execution_count": 7,
+ "execution_count": 6,
"metadata": {},
"output_type": "execute_result"
},
{
"data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAW0AAAEACAYAAAB4ayemAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAFfRJREFUeJzt3X+w3XV95/HnOyRpSJAAI4SJgYTQhmWwpUnF2FbsUWCK\nbhvdHSsy21Z0+mOUMU7XUaDaep3udGS3jrLoTtdxZRmLdIstlXaYqgye7kwXiyGBxYCkhuVyiZAQ\nahJIQPLjvX+cQ3LvPefknFzu95zPuef5mMnc8/3c7znf93yT+8r7fr6/IjORJA2HeYMuQJLUO0Nb\nkoaIoS1JQ8TQlqQhYmhL0hAxtCVpiHQN7YhYExFbImJz8+veiNg46fsfjYgjEXFGtaVKkuZ3WyEz\ntwFrASJiHvAUcGdzeQVwBTBeYY2SpKYTnR65HNiemRPN5c8BH5vdkiRJnZxoaF8F3A4QERuAicx8\neNarkiS1Fb1exh4RC4AfARcC+4HvAFdk5vMR8f+AN2Tmc5VVKknqPqc9yduBBzJzd0S8HlgFPBQR\nAawAHoiIN2bmrslvighvbiJJM5CZMX3sRKZHrqY5NZKZ38/MszNzdWaeR+Pg5NrpgT1pw/5p/vnU\npz418BpK+eO+cH/M9X3xiU8kY2Mze28nPYV2RCymcRDybzqskkDL/wiSNMrGx2Hlytn9zJ6mRzLz\nAHDmcb6/etYqkqQ5oorQ9orIPqvVaoMuoRjui6ncH8fMlX1RRWj3fPbIjDcQkVVvQ5JKc+gQLFkC\nzz8PCxee+PsjgnyVByIlST3asQPOOmtmgX08hrYkVaCKqREwtCWpEoa2JA0RQ1uShoihLUlDxNCW\npCFSVWh7nrYkzbJMWLwYdu9unKs9E56nLUl9smsXnHLKzAP7eAxtSZplVU2NgKEtSbPO0JakIWJo\nS9IQMbQlaYgY2pI0RAxtSRoiTzxhaEvSUNizB44cgdNPr+bzuz4jMiLWAP+LYw/vXQ38EfBa4J3N\n8d3ANZn5VDVlStJweGVqJCp61HnX0M7MbcBagIiYBzwF3An8ODP/uDn+YWAM+J1qypSk4VDlfDb0\n+DT2SS4HtmfmxLTxJTS6bUkaaaWF9lXA7a8sRMR/An4bOACsn8W6JGkoVR3aPR+IjIgFwAbgjlfG\nMvOTmXkucAvw+dkvTxpuv/Zr8P3vD7oK9VNJnfbbgQcy89k23/sacHenN46NjR19XavVqNVqJ7BZ\naXjdfz9s3Qqvf/2gK1G/zDS06/U69Xq963o93087Im4H/iEzb20u/3Rm/rD5+sPAGzPzt9q8z/tp\nayQdONC4NeeNN8LHPz7oatQvy5bBli2wfPmr+5xO99PuqdOOiMU0DkL+3qThzzRPBzwMPA588NWV\nKM0tTz7Z+Do+Ptg61D8vvgh798LZZ1e3jZ5COzMPAGdOG3t3JRVJc8T4OMybZ2iPkiefhBUrGn/v\nVfGKSKki4+Owdq2hPUqqPggJhrZUmfFxeMtbGl89rDMaDG1piI2Pw8UXN17v2TPYWtQfhrY0xF75\nAV650imSUWFoS0PM0B49hrY0pA4ehGeeaZxJYGiPDkNbGlI7djQusliwwNAeFYcOwdNPN/6jrpKh\nLVVgfBxWrWq8NrRHw44dcNZZsHBhtdsxtKUKTP412dAeDf2YGgFDW6qEoT16DG1piE3+AV62DPbt\na9xASnOXoS0Nsck/wPPmwTnnHLuBlOYmQ1saYtN/gJ0imfsMbWlIHTkCExNw7rnHxgztuc/QlobU\nrl3wmtfA4sXHxgztuS2zMf1laEtDqF3HZWjPbbt2NZ5StGRJ9dsytKVZZmiPnn5NjYChLc06Q3v0\nGNrSEGv3A7xiReMGUgcPDqYmVauo0I6INRGxJSI2N7/ujYiNEfGfI+LRiHgwIv46Ik7tR8FS6Z54\novUHeMGCxkU2Tz01kJJUsXZ/51XpGtqZuS0z12bmOuAXgP3AncC3gIsy8+eBfwFuqLRSaUh06rqc\nIpm7iuq0p7kc2J6ZE5l5T2YeaY5/F6j4hoRS+TIN7VHUz9Cef4LrXwXc3mb8A8BfvvpypFa7d8PL\nLw+6it7s29f4etpprd9buRK2boUf/ai/Nal6RYZ2RCwANgDXTxv/BHAwM7/W6b1jY2NHX9dqNWq1\n2onWqRH15JNw/vlw5pmDrqR3b30rRLSOr18PH/wg/MVf9L8mVev88+GMM17dZ9Trder1etf1IjN7\n+sCI2AB8KDOvnDR2DfC7wNsy8ycd3pe9bkOabtMm+P3fhwceGHQlUn9FBJnZ8t//iUyPXM2kqZGI\nuBL4GPCWToEtvVp79sDSpYOuQipHTwciI2IxjYOQfzNp+GbgFODbzdMB/1sF9WnE7d3bfn5YGlU9\nddqZeQA4c9rYz1RSkTSJnbY0lVdEqmh22tJUhraKZqctTWVoq2h22tJUhraKZqctTWVoq2h22tJU\nhraKZqctTWVoq2h22tJUhraKZqctTWVoq2h22tJUPd8wasYb8IZRmqFMmD8fXnqp8eQXaZR0umGU\nnbaK9cILcPLJBrY0maGtYjmfLbUytFUs57OlVoa2irVnj6EtTWdoq1h79zo9Ik1naKtYdtpSK0Nb\nxbLTlloZ2iqWnbbUytBWsey0pVZdQzsi1kTElubDe7dExN6I2BgR746I70fE4YhY149iNVrstKVW\nXR/sm5nbgLUAETEPeAq4E1gM/Dvgv1dZoEaXnbbUqqensU9yObA9MydeGYiIlmvjpdlgpy21OtE5\n7auA26soRJrOTltq1XOnHRELgA3A9Se6kbGxsaOva7UatVrtRD9CI8hOW6OkXq9Tr9e7rtfzrVkj\nYgPwocy8ctr4d4CPZubmDu/z1qyakeXL4Xvfg9e9btCVSP03G7dmvZrOUyPOa2vW2WlLrXrqtCNi\nMTAOrM7M55tj7wJuBl4L7AEezMy3t3mvnbZO2Msvw5Ilja8e6tYo6tRp++QaFenZZ+HCC2H37kFX\nIg2GT67RUPHMEak9Q1tFcj5bas/QVpF81JjUnqGtIvmoMak9Q1tFstOW2jO0VSQ7bak9Q1tF8kCk\n1J6hrSJ5yp/UnqGtItlpS+0Z2iqSnbbUnqGtItlpS+0Z2iqSnbbUnqGtItlpS+0Z2iqSnbbUnrdm\nVXEyYcECeOklmH+ij56W5ghvzaqh8cILsGiRgS21Y2irOM5nS50Z2iqO89lSZ4a2imOnLXXWNbQj\nYk1EbImIzc2veyNiY0ScHhHfiojHIuKbEWFvpFlhpy111jW0M3NbZq7NzHXALwD7gTuB64F7MvMC\n4F7ghkor1ciw05Y6O9HpkcuB7Zk5AbwTuLU5fivwrtksTKPLTlvq7ERPqroK+Frz9bLM3AmQmc9E\nxFmzWpkG7r77YPfu/m/3n/8Zli/v/3alYdBzaEfEAmADcF1zaPoVMx2voBkbGzv6ularUavVei5Q\ng3P55VCrwbwBHK7+zd/s/zalQarX69Tr9a7r9XxFZERsAD6UmVc2lx8Fapm5MyLOBr6TmRe2eZ9X\nRA6hI0caF7ccPgzRck2WpKrNxhWRVwO3T1q+C7im+fp9wDdmXJ2Kc+AAnHyygS2VpqdOOyIWA+PA\n6sx8vjl2BvBXwDnN770nM/e0ea+d9hDatQsuugiefXbQlUijqVOn3dOcdmYeAM6cNvavNM4m0Rx0\n4AAsWTLoKiRN5xWRamv/fli8eNBVSJrO0FZbBw4Y2lKJDG215fSIVCZDW205PSKVydBWW06PSGUy\ntNWW0yNSmQxtteX0iFQmQ1ttOT0ilcnQVlv79zs9IpXI0FZbdtpSmQxttWVoS2UytNWW0yNSmQxt\ntWWnLZXJ0FZbhrZUJkNbbTk9IpXJ0FZbdtpSmQxttWVoS2UytNWW0yNSmQxttWWnLZWpp9COiKUR\ncUdEPBoRWyNifUT8XET8n4h4KCK+ERGnVF2s+se7/Ell6rXTvgm4OzMvBC4GfgB8Gfh4Zl4M3Al8\nvJoSNQje5U8qU2Tm8VeIOBXYkpnnTxv/cWae3ny9AvhmZl7U5v3ZbRsqy8GDsGgRHDoEEYOuRhpN\nEUFmtvwE9tJpnwfsjohbImJzRHwpIhYDWyNiQ3Od9wArZrFeDdCLLzamRgxsqTzze1xnHXBtZm6K\niM8D1wEfAG6OiD8C7gJe7vQBY2NjR1/XajVqtdqrKFlVc2pE6r96vU69Xu+6Xi/TI8uA+zJzdXP5\nzcB1mfnrk9b5GeCrmfmmNu93emTIbN8OV1wBjz8+6Eqk0TXj6ZHM3AlMRMSa5tBlwCMRcWbzg+cB\nnwT+fBbr1QB55ohUrl7PHtkI3BYRD9I4e+RPgasj4jHgEWBHZv7PakpUvzk9IpWrlzltMvMh4JJp\nw/+1+UdzjBfWSOXyiki18BJ2qVyGtlrYaUvlMrTVwtCWymVoq4XTI1K5DG21sNOWymVoq4WhLZXL\n0FYLp0ekchnaamGnLZXL0FYLQ1sql6GtFk6PSOUytNXCTlsql6GtFoa2VC5DWy2cHpHKZWirhZ22\nVC5DWy0MbalchrZaOD0ilcvQVgs7balchramyDS0pZIZ2pri5Zdh3jxYsGDQlUhqp6fQjoilEXFH\nRDwaEVsjYn1EXBIR90fElubXN1RdrKpnly2VracH+wI3AXdn5m9ExHxgCfC3wCcz81sR8XbgvwBv\nrahO9YmhLZWta2hHxKnApZl5DUBmHgL2RsTTwNLmaqcBO6oqUv3jmSNS2XrptM8DdkfELcDFwCbg\nI8D1wD9FxGeBAH6psirVN3baUtl6Ce35wDrg2szcFBGfA24AfhH4cGb+bUS8G/gKcEW7DxgbGzv6\nularUavVXmXZqsr+/Ya2NAj1ep16vd51vcjM468QsQy4LzNXN5ffTKPLvjQzl05ab+/k5Unj2W0b\nKse3vw033gj33DPoSqTRFhFkZkwf73r2SGbuBCYiYk1z6DJgK/DDiPiV5odfBmybxXo1IE6PSGXr\n9eyRjcBtEbEAeBx4P3AH8MWIWAi8BPxeNSWqnzwQKZWtp9DOzIeAS6YNbwLWz3pFGig7balsXhGp\nKQxtqWyGtqZwekQqm6GtKey0pbIZ2prC0JbKZmhrCqdHpLIZ2prCTlsqm6GtKQxtqWyGtqZwekQq\nm6GtKey0pbIZ2prC0JbKZmhrCqdHpLIZ2prCTlsqm6GtKQxtqWyGtqZwekQqm6GtozLhxRfh5JMH\nXYmkTgxtHfXii7BwIZx00qArkdSJoa2jDhxwakQqnaGtozwIKZWvp8eNRcRS4MvA64EjwAeAPwAu\nABI4HfhxZq6rqE71wf79hrZUul4f7HsTcHdm/kZEzAcWZ+Z7X/lmRPwZsKeKAtU/To9I5esa2hFx\nKnBpZl4DkJmHgH3TVnsP8NZZr0595fSIVL5e5rTPA3ZHxC0RsTkivhQRR08Ki4hLgWcyc3tlVaov\nnB6RytfL9Mh8YB1wbWZuiojPAzcAf9z8/tXA7d0+ZN8+uOEGOHiwsXzSSfAnfwKvfe2M6lYF7LSl\n8vUS2k8BE5m5qbn8deA6gIg4Cfj3NEK9o7GxMZ54Av7+7+F976txwQU1vvAFuP9+eMc7XkX1mlXP\nPANnnTXoKqTRVK/XqdfrXdfrGtqZuTMiJiJiTWZuAy4DHml++wrg0cz80fE+Y2xsjFtvhUOH4LOf\nbYxt2gTj413rUx+Nj8PKlYOuQhpNtVqNWq12dPnTn/502/V6PU97I3BbRDwIXAz8aXP8KnqYGoHW\nQFi5Ep54osetqy8Mbal8PZ3yl5kPAZe0GX9/rxsaH4f1648tr1zZmC5ROcbHYdWqQVch6Xj6dkVk\nu07b6ZGy2GlL5TO0BcBPfgLPPQfLlw+6EknH05fQPnIEJibg3HOPjS1fDrt3N8JCgzcx0fg78Q5/\nUtn6Eto7d8Kpp049B3j+/EZITEz0owJ149SINBz6EtqdAsEpknIY2tJwMLQFGNrSsDC0BRja0rAw\ntAUY2tKwMLQFGNrSsBhoaK9aZWiX4PBh2LEDzjln0JVI6magoX3OOY2wOHy4H1Wok6efhjPOgEWL\nBl2JpG76dkXkaae1ji1a1AiLp5/uVxVqx6kRaXj0JbRXroSIzt9zimSwDG1pePQttI/3PUN7sAxt\naXgY2jK0pSFiaMvQloaIoS1DWxoihvaIyzS0pWFSTGhn9qMSTffcc/BTP9W4da6k8vUU2hGxNCLu\niIhHI2JrRKxvjn+4OfZwRHym0/uXLev82aeeCgsXNsJD/WeXLQ2XXjvtm4C7M/NCGk9jfzQiasCv\nAz+bmT8L/FnHjXTZyihNkdTr9UGXMMUgQ7u0fTFo7o9j3BeddQ3tiDgVuDQzbwHIzEOZuQ/4IPCZ\nzDzUHN890yIM7cExtMvh/jjGfdFZL532ecDuiLglIjZHxJciYjGwBnhLRHw3Ir4TEW+YaRGjFNql\ncXpEGi7ze1xnHXBtZm6KiM8B1zfHT8/MN0XEJcBfAatnUsSqVfDFL8K9987k3cPlscfggQcGXcUx\nmzfDzTcPugpJvYrsctpGRCwD7svM1c3lN9MI7XnAjZn5j83xHwLrM/O5ae/3vBBJmoHMbLlrU9dO\nOzN3RsRERKzJzG3AZcBWYDvwNuAfI2INsGB6YHfaqCRpZnqZHgHYCNwWEQuAx4H3AweAr0TEw8BP\ngN+upkRJ0iu6To9IkspR2RWREXFlRPwgIrZFxHVVbadUEbEiIu5tXoz0cERsbI6fHhHfiojHIuKb\nEbF00LX2S0TMa56BdFdzeZT3RcsFayO+P25o7of/GxG3RcTCUd4fx1NJaEfEPOALwK8CFwFXR8S/\nqWJbBTsE/MfMvAj4ReDa5j64HrgnMy8A7gVuGGCN/fYR4JFJy6O8L6ZfsPYDRnR/RMRK4HeBtZn5\nczSmba9mRPdHN1V12m8E/iUzxzPzIPCXwDsr2laRMvOZzHyw+foF4FFgBY39cGtztVuBdw2mwv6K\niBXAO4AvTxoe1X3R7oK1vYzo/gD2AS8DSyJiPnAysIPR3R/HVVVovw6YmLT8VHNsJEXEKuDnge8C\nyzJzJzSCHThrcJX11eeAjwGTD6KM6r7odMHaSO6PzPwx8FngSRphvTcz72FE90c3fXuw76iKiFOA\nrwMfaXbc04/8zvkjwRHxb4Gdzd88jncK6JzfF02vXLD2xcxcB+ynMRUwcv82ACJiNfAHwEpgOY2O\n+z8wovujm6pCewdw7qTlFc2xkdL8Ve/rwFcz8xvN4Z3NC5aIiLOBXYOqr49+GdgQEY8DtwNvi4iv\nAs+M4L6Axm+eE5m5qbn81zRCfBT/bQC8AfinzPzXzDwM3An8EqO7P46rqtD+HvDTEbEyIhYC7wXu\nqmhbJfsK8Ehm3jRp7C7gmubr9wHfmP6muSYz/zAzz21eVfte4N7M/C3g7xixfQGNC9aAieZFaXDs\ngrWR+7fR9BjwpohYFBFBY388wujuj+Oq7DztiLiSxhHyecD/yMyO99ueiyLil4H/DTxM49e6BP4Q\nuJ/GfVrOAcaB92TmnkHV2W8R8SvARzNzQ0ScwYjui4i4mMZB2ckXrJ3E6O6Pj9EI6MPAFuB3gNcw\novvjeLy4RpKGiAciJWmIGNqSNEQMbUkaIoa2JA0RQ1uShoihLUlDxNCWpCFiaEvSEPn/BnlhQzol\nDmkAAAAASUVORK5CYII=\n",
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXQAAAD8CAYAAABn919SAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAGGRJREFUeJzt3X2MXfV95/H3Zx49YxvP2B7z4BnX2ATTlBADA8V0QxJc\nqtBSk91NW1BTuaQra6MVG6ImKVmkqJW2u2na3SzStrQWBPEHok1c00SVQoOyfZSNyWCcEDBOMgZ7\nBowZz4MNvvM83/3jnrHH47Hn3pl777m+9/OSLPuc+/D7+ej4Mz9/f79zjiICMzO79NWk3QEzMysM\nB7qZWYVwoJuZVQgHuplZhXCgm5lVCAe6mVmFcKCbmVUIB7qZWYVwoJuZVYi6Uja2evXqWL9+fSmb\nNDO75L300ksnIqJtvveVNNDXr19PV1dXKZs0M7vkSTqSy/tccjEzqxAOdDOzCuFANzOrEA50M7MK\n4UA3M6sQ8wa6pE2SDsz4dUrSQ8lrD0o6JOlVSV8rfnfNzOxC5l22GBGHgM0AkmqBt4BnJX0cuBe4\nISJGJa0pak/NzOyi8l2HvhXojogjkv4U+GpEjAJExLsF753xra4eegYyZ7Y3r2vhzusuT7FHZlau\n8g30+4Bnkj9fC3xE0h8DI8AXIuIHsz8gaQewA2DdunWL6Gr1eXtomC/u+hEAEkTAmuWNvPiIA93M\nzpfzpKikBmAb8K1kVx3QCtwGfBH4piTN/lxE7IyIzojobGub98pVm2Fvdz8A3/3cR3jjf/4av3/X\ntbz73igj45Mp98zMylE+q1zuBvZHxPFkuxfYHVkvAlPA6kJ3sJrt6e6ntbmeTZcvB6BjZTMAvYOZ\ni33MzKpUPoF+P2fLLQB/B9wJIOlaoAE4UbiuVbeIYG/3CbZsXEVNTfY/Pu2tTQD0DAyn2TUzK1M5\nBbqkZuAuYPeM3d8ANkj6MfDXwPaIiMJ3sTodHcjw9skRtmxYdWbf9Ai9xyN0M5tDTpOiEZEBVs3a\nNwZ8uhidsmy5BWDLxrNVrLZljTTU1Zyz6sXMbJqvFC1Te7r7WbO8kY1tS8/sq6kR7a1N9A665GJm\n53Ogl6Fs/byfLRtXMXvhUEdrs0suZjYnB3oZ+tm773Pi/VFu37jqvNc6VjZ5UtTM5uRAL0PT9fPb\nN56/CrS9tZmTw+OcGhkvdbfMrMw50MvQ3u5+1rY0nVnVMlNHa7LSxROjZjaLA73MTE0Few/3z1lu\ngWzJBbwW3czOV9KHRNv8Xjt2ipPD42y5UKC3+mpRs0vFwWOneOPE6TPbHa3NfKh9RdHac6CXmRcO\nT68/nzvQW5rrWdZY56WLZmVuYnKK3/qrvZwamTizr7Guhpe/chfNDcWJXpdcysye7n6uXr2UK1c0\nzfm6lF2L7hq6WXl75a2TnBqZ4Cv3fJB/eOgOvvYfb2B0YoquNweL1qYDvYxMTE7x4hsDF6yfT2v3\nWnSzsrc3+d/2ts1XsemK5dzz4Supq9GZVWzF4EAvI6+8dZL3RycuWG6ZNr0W3bfOMStfe7v72XT5\nclYvawSguaGOzR0tZ4K+GBzoZWT6J/dtG+YJ9NZmhscn6T89VopumVmeRicm+cGbA+cNzm7fuIpX\neoeKdh2JA72M7O3u57orzv5Ev5Azd110Hd2sLP2w5yQj41PnBfqWjauZCnjx8EBR2nWgl4nRiUm6\njgzMOzqHs2vRvdLFrDzt6T6BBLddfe6/5xvXtdBYV1O0sosDvUwcODrEyPjUvBOikJ0UBd8X3axc\n7e3u5/qrVrCiuf6c/Uvqa7n551qLNjHqQC8Te7r7qRH8Yg4j9GWNdbQ21/tqUbMyNDw2yctHhy64\nuOH2jas4eOwUA0WYA3Ogl4m9h/u5fu0KVjTVz/9msnV0Xy1qVn5eOjLI2OT59fNp0w+t2VeEssu8\ngS5pk6QDM36dkvTQjNe/ICkk+QHRC5T9iT54zuPm5tPR2uxJUbMytPfwCepqxC3rV875+g3tK2hu\nqC1K2WXeQI+IQxGxOSI2AzcDGeBZAEkdZJ81erTgPasiXUcGGJ+Medefz9S+som3hoaZnPJadLNy\nsqe7nxvaV7Csce7L++tra7j16pXs6T5R8LbzvaHAVqA7Io4k218HvgR8u6C9KqKTmXH6T4+m3Y1z\nfO/V4xf9iT6XjtZmxieDl44MsnpZQxF7Z2a5Gp2Y4ke9J/nsRzde9H23b1zF/zjUx/6jg7TkWGbN\nRb6Bfh/wDICkbcBbEfHD2Y9JK1dTU8HH/9c/FWUyYrFuWd/K0gv8RJ/LhtXZZ43+5l/tLVaXzGyB\nbr/m4v/b/qVrshXq//AXewrabs4JIqkB2AZ8WVIz8AjwKzl8bgewA2DdunUL7GZhHH9vhIHTY9x3\nS0de5Y1SuGlda17vv23DKnb+zs0Mj08WqUdmthDLGuvmnQ/7hatW8NRnbmUok9vg8pN/klvb+YzQ\n7wb2R8RxSR8CrgamR+ftwH5Jt0bEOzM/FBE7gZ0AnZ2dqRZ8py/E+cT1V/CxTWvS7Mqi1dSIX/mF\nK9Luhpkt0EevbSv4d+YT6PeTlFsi4hXgTCJKehPojIjCV/kLaHpVyFyPdjMzu9TltA49KbHcBewu\nbneKa/pCnLUtc99r3MzsUpbTCD0iMsAFi0IRsb5QHSqmnsEMa5Y3sqS+Nu2umJkVXFVdKdozkHG5\nxcwqVlUFeu/gMB2tLreYWWWqmkAfn5zi2Mlhj9DNrGJVTaAfGxphKrJXWJqZVaKqCfTpe4e3r3TJ\nxcwqU/UE+vQadI/QzaxCVU+gD2aorRFXrliSdlfMzIqiegJ9YJirWpZQV1s1f2UzqzJVk249gxna\nW1xuMbPKVTWB3js4TIcnRM2sglVFoI+MT9L33qgnRM2solVFoE8/TNkXFZlZJauKQJ++y6JLLmZW\nyaoj0Ae9Bt3MKl91BPpAhsa6GtqWN6bdFTOzoqmKQO8dHKa9tYlL5WHWZmYLURWB3jOYod3lFjOr\ncPM+sUjSJuBvZuzaAHwFWAv8OjAGdAMPRMRQMTq5WD0Dw2zuaEm7G2ZmRTXvCD0iDkXE5ojYDNwM\nZIBngeeB6yPiBuAnwJeL2tMFOjUyzsnhcU+ImlnFy7fkshXojogjEfG9iJhI9r8AtBe2a4Vx5i6L\nXoNuZhUup4dEz3Af8Mwc+z/DuWWZovjD77zK/qODeX3m/ZHszxyP0M2s0uUc6JIagG3MKq1IegSY\nAJ6+wOd2ADsA1q1bt+CODmXGeGrvm3xgzTLWtuR+gdCqpQ1sXtfCpiuWL7htM7NLQT4j9LuB/RFx\nfHqHpO3APcDWiIi5PhQRO4GdAJ2dnXO+Jxf73hggAv77Jz/ErVevXOjXmJlVrHwC/X5mlFskfQL4\nA+CjEZEpdMdm29vdz5L6Gq9WMTO7gJwmRSU1A3cBu2fs/r/AcuB5SQck/WUR+nfGnu4T3LJ+JQ11\nVbF03swsbzmN0JMR+KpZ+64pSo/m0PfeKD85/j7//sayXEhjZlYWLonh7guH+wHYsnHVPO80M6te\nl0Sg7+nuZ3ljHddfdVnaXTEzK1uXRKDv7T7BL25Y6Qc8m5ldRNkn5NtDw7zZn2HLxtVpd8XMrKyV\nfaDv7U7q5xtcPzczu5iyD/Q93f20Ntdzna/0NDO7qLIO9Ihgb/cJtmxcRU2NH05hZnYx+d6cq+j2\ndJ/g2f1vATA+OcXbJ0f4rMstZmbzKrtA3/kvh9nzs35WL2sA4Jo1y9j685en3Cszs/JXdoHeM5Dh\nzuvW8Je/c3PaXTEzu6SUVQ09IugdHKZjZe63xzUzs6yyCvS+90YZnZjy04XMzBagrAK9ZzB5XJyf\nLmRmlreyCvTewWEAl1zMzBagrAJ9+oHO7R6hm5nlrcwCfZjVyxpZUl+bdlfMzC455RXogxmXW8zM\nFqj8At3lFjOzBZk30CVtSp4ZOv3rlKSHJK2U9Lyknya/ty6mIxOTU7w9NOIRupnZAs0b6BFxKCI2\nR8Rm4GYgAzwLPAx8PyI+AHw/2V6wYydHmJwKj9DNzBYo35LLVqA7Io4A9wJPJfufAj65mI6cXbLo\nQDczW4h8A/0+4Jnkz5dHxDGA5Pc1c31A0g5JXZK6+vr6LvjF0xcVtbe65GJmthA5B7qkBmAb8K18\nGoiInRHRGRGdbW1tF3xf70CGGsFVLQ50M7OFyGeEfjewPyKOJ9vHJV0JkPz+7mI60jM4zJUrmqj3\ng6DNzBYkn/S8n7PlFoDvANuTP28Hvr2YjvQMZFxuMTNbhJwCXVIzcBewe8burwJ3Sfpp8tpXF9OR\n7EVFnhA1M1uonB5wEREZYNWsff1kV70s2sj4JMdPjXrJopnZIpRFwfrtoeySRZdczMwWriwCvcdr\n0M3MFq08Aj25ba4v+zczW7jyCPTBDA21NVy+fEnaXTEzu2SVRaD3DgyztrWJmhql3RUzs0tWWQR6\nz6DXoJuZLVZ5BPpAxo+dMzNbpNQD/fToBIOZcU+ImpktUuqBPn2XRV9UZGa2OOkH+oDXoJuZFUIZ\nBPr0CN0lFzOzxUg/0AczNDfUsnJpQ9pdMTO7pKUf6APDdLQ2I3kNupnZYqQe6L1eg25mVhCpBnpE\n0Ds47AlRM7MCSDXQhzLjvD864RG6mVkBpBroZ9age4RuZrZouT6CrkXSLkmvSzooaYukzZJekHRA\nUpekW/Nt/MwadF9UZGa2aDk9gg54FHguIj4lqQFoBr4J/FFEfFfSrwJfAz6WT+NnR+guuZiZLda8\ngS7pMuAO4HcBImIMGJMUwGXJ21YAb+fbeM9AhpbmepYvqc/3o2ZmNksuI/QNQB/wpKQPAy8BnwMe\nAv5B0p+RLd3cnm/jvYPDnhA1MyuQXGrodcBNwGMRcSNwGngY+Czw+YjoAD4PPDHXhyXtSGrsXX19\nfee81jOYcf3czKxAcgn0XqA3IvYl27vIBvx2YHey71vAnJOiEbEzIjojorOtre3M/qkpr0E3Myuk\neQM9It4BeiRtSnZtBV4jWzP/aLLvTuCn+TTc9/4oYxNTvimXmVmB5LrK5UHg6WSFy2HgAeDbwKOS\n6oARYEc+DU/fZbHdI3Qzs4LIKdAj4gDQOWv3vwE3L7RhP9jCzKywUrtSdPqiIq9yMTMrjNQCvXcw\nQ9vyRpbU16bVBTOzipLqCN0TomZmhZNeoA9mvGTRzKyAUgn0ickpjp0c8YSomVkBpRLox06OMDkV\nvimXmVkBpRLo02vQPUI3MyucVAK9d3B6yaID3cysUFIJ9KHhMQBal/q2uWZmhZJKoGfGJgFobsj1\nzgNmZjafVAJ9eGyShroaamuURvNmZhUptRF6c4OvEDUzK6T0At2X/JuZFVQ6JZfxCZo8QjczK6jU\nauieEDUzK6zUSi4eoZuZFVZKJRdPipqZFVpOgS6pRdIuSa9LOihpS7L/QUmHJL0q6Wu5NupVLmZm\nhZdrIftR4LmI+FTyXNFmSR8H7gVuiIhRSWtybXR4bJKmetfQzcwKad5UlXQZcAfwuwARMQaMSfos\n8NWIGE32v5tro5mxCY/QzcwKLJeSywagD3hS0suSHpe0FLgW+IikfZL+WdItuTbqkouZWeHlEuh1\nwE3AYxFxI3AaeDjZ3wrcBnwR+Kak867ll7RDUpekrr6+PiangtGJKa9yMTMrsFwCvRfojYh9yfYu\nsgHfC+yOrBeBKWD17A9HxM6I6IyIzra2NobHp2/M5UA3MyukeQM9It4BeiRtSnZtBV4D/g64E0DS\ntUADcGK+78uMTQDQ5AuLzMwKKtdUfRB4Olnhchh4gGzp5RuSfgyMAdsjIub7ouHpW+f6Xi5mZgWV\nU6BHxAGgc46XPp1vg9P3QncN3cyssEp+pagD3cysOEoe6C65mJkVRwoj9OykqO+2aGZWWKUfoY+7\n5GJmVgyp1dC9Dt3MrLDSq6E70M3MCsolFzOzCpHKpGhtjWioTeXZGmZmFSuVGnpzfS1z3MfLzMwW\nIZUausstZmaFl84I3YFuZlZwqQS677RoZlZ4Kaxy8ePnzMyKwSUXM7MKkc6kqG/MZWZWcB6hm5lV\niJQmRR3oZmaFlkLJZYKmeq9yMTMrtJwCXVKLpF2SXpd0UNKWGa99QVJIWp3Ld2XGXXIxMyuGXIfK\njwLPRcSnkgdFNwNI6gDuAo7m8iUR2V8uuZiZFd68I3RJlwF3AE8ARMRYRAwlL38d+BIQuTQ2Gdm3\neYRuZlZ4uZRcNgB9wJOSXpb0uKSlkrYBb0XEDy/2YUk7JHVJ6jpxoh9woJuZFUMugV4H3AQ8FhE3\nAqeBPwQeAb4y34cjYmdEdEZEZ+vKlQC+9N/MrAhyCfReoDci9iXbu8gG/NXADyW9CbQD+yVdcbEv\nmppKSi6+sMjMrODmDfSIeAfokbQp2bUV2B8RayJifUSsJxv6NyXvvaAp19DNzIom19rHg8DTyQqX\nw8ADC2ksGaB7lYuZWRHkFOgRcQDovMjr63P5nrMjdNfQzcwKraRXirrkYmZWPCUO9OzvLrmYmRVe\naQN9yiN0M7NiSaXksqTOgW5mVmglL7k01ddSU6NSNmtmVhVKPkJ3ucXMrDhKHuhLfJWomVlRlHhS\n1BOiZmbF4pKLmVmFKGmgR4TXoJuZFUlJA30yfNm/mVmxeIRuZlYhSr4O3fdCNzMrjpJf+u9JUTOz\n4ij5Khc/fs7MrDhKW0PH69DNzIqlpIEODnQzs2LJKdAltUjaJel1SQclbZH0p8n2jyQ9K6kll+/y\nKhczs+LIdYT+KPBcRFwHfBg4CDwPXB8RNwA/Ab6cyxd5hG5mVhzzBrqky4A7gCcAImIsIoYi4nsR\nMZG87QWgPZcGm+o9KWpmVgy5jNA3AH3Ak5JelvS4pKWz3vMZ4Lu5NOgRuplZceQS6HXATcBjEXEj\ncBp4ePpFSY8AE8DTc31Y0g5JXZK6wIFuZlYsuQR6L9AbEfuS7V1kAx5J24F7gN+OSJ4vN0tE7IyI\nzojoBE+KmpkVy7yBHhHvAD2SNiW7tgKvSfoE8AfAtojI5Npgky/9NzMrilxnKB8EnpbUABwGHgB+\nADQCz0sCeCEi/vN8X+S7LZqZFUdO6RoRB4DOWbuvWUiDLrmYmRWHrxQ1M6sQJQ10AfW1Jf8ZYmZW\nFUqarjXZWruZmRWBA93MrEKUNtBdbTEzK5rS1tA9QjczK5qSBnqt89zMrGg8QjczqxAlDfRljb5K\n1MysWEoa6G3LG0vZnJlZVfG6EzOzCuFANzOrEA50M7MK4UA3M6sQDnQzswrhQDczqxAOdDOzCuFA\nNzOrEIqI0jUmvQccKlmD5W81cCLtTpQJH4tz+Xic5WMBPxcRbfO9qdTX4h+KiNnPJq1akrp8PLJ8\nLM7l43GWj0XuXHIxM6sQDnQzswpR6kDfWeL2yp2Px1k+Fufy8TjLxyJHJZ0UNTOz4nHJxcysQpQk\n0CV9QtIhST+T9HAp2iwnkjok/aOkg5JelfS5ZP9KSc9L+mnye2vafS0VSbWSXpb098n21ZL2Jcfi\nbyQ1pN3HUpHUImmXpNeTc2RLlZ8bn0/+nfxY0jOSllTz+ZGPoge6pFrgz4G7gQ8C90v6YLHbLTMT\nwO9HxM8DtwH/JTkGDwPfj4gPAN9PtqvF54CDM7b/BPh6ciwGgd9LpVfpeBR4LiKuAz5M9rhU5bkh\naS3wX4HOiLgeqAXuo7rPj5yVYoR+K/CziDgcEWPAXwP3lqDdshERxyJif/Ln98j+g11L9jg8lbzt\nKeCT6fSwtCS1A78GPJ5sC7gT2JW8pZqOxWXAHcATABExFhFDVOm5kagDmiTVAc3AMar0/MhXKQJ9\nLdAzY7s32VeVJK0HbgT2AZdHxDHIhj6wJr2eldT/Ab4ETCXbq4ChiJhItqvpHNkA9AFPJiWoxyUt\npUrPjYh4C/gz4CjZID8JvET1nh95KUWga459Vbm0RtIy4G+BhyLiVNr9SYOke4B3I+KlmbvneGu1\nnCN1wE3AYxFxI3CaKimvzCWZK7gXuBq4ClhKtlw7W7WcH3kpRaD3Ah0zttuBt0vQblmRVE82zJ+O\niN3J7uOSrkxevxJ4N63+ldAvAdskvUm2/HYn2RF7S/JfbKiuc6QX6I2Ifcn2LrIBX43nBsAvA29E\nRF9EjAO7gdup3vMjL6UI9B8AH0hmqRvITnB8pwTtlo2kRvwEcDAi/veMl74DbE/+vB34dqn7VmoR\n8eWIaI+I9WTPhf8XEb8N/CPwqeRtVXEsACLiHaBH0qZk11bgNarw3EgcBW6T1Jz8u5k+HlV5fuSr\nJBcWSfpVsqOwWuAbEfHHRW+0jEj6d8C/Aq9wtm7838jW0b8JrCN7Iv9GRAyk0skUSPoY8IWIuEfS\nBrIj9pXAy8CnI2I0zf6ViqTNZCeIG4DDwANkB1tVeW5I+iPgt8iuDnsZ+E9ka+ZVeX7kw1eKmplV\nCF8pamZWIRzoZmYVwoFuZlYhHOhmZhXCgW5mViEc6GZmFcKBbmZWIRzoZmYV4v8Dy+yBMELBv9QA\nAAAASUVORK5CYII=\n",
"text/plain": [
- "<matplotlib.figure.Figure at 0x1066d15f8>"
+ "<matplotlib.figure.Figure at 0x110917400>"
]
},
"metadata": {},
@@ -202,9 +207,9 @@
},
{
"cell_type": "code",
- "execution_count": 8,
+ "execution_count": 7,
"metadata": {
- "collapsed": false
+ "collapsed": true
},
"outputs": [],
"source": [
@@ -213,15 +218,26 @@
},
{
"cell_type": "code",
- "execution_count": 9,
- "metadata": {
- "collapsed": false
- },
+ "execution_count": 8,
+ "metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
+ "<style>\n",
+ " .dataframe thead tr:only-child th {\n",
+ " text-align: right;\n",
+ " }\n",
+ "\n",
+ " .dataframe thead th {\n",
+ " text-align: left;\n",
+ " }\n",
+ "\n",
+ " .dataframe tbody tr th {\n",
+ " vertical-align: top;\n",
+ " }\n",
+ "</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
@@ -241,28 +257,28 @@
" <tr>\n",
" <th rowspan=\"5\" valign=\"top\">0</th>\n",
" <th>(0, 0)</th>\n",
- " <td>1</td>\n",
+ " <td>2</td>\n",
" <td>5</td>\n",
" </tr>\n",
" <tr>\n",
- " <th>(0, 1)</th>\n",
- " <td>4</td>\n",
- " <td>7</td>\n",
- " </tr>\n",
- " <tr>\n",
" <th>(0, 2)</th>\n",
- " <td>6</td>\n",
- " <td>0</td>\n",
+ " <td>8</td>\n",
+ " <td>4</td>\n",
" </tr>\n",
" <tr>\n",
" <th>(0, 3)</th>\n",
- " <td>2</td>\n",
- " <td>1</td>\n",
+ " <td>5</td>\n",
+ " <td>4</td>\n",
" </tr>\n",
" <tr>\n",
" <th>(0, 4)</th>\n",
- " <td>2</td>\n",
- " <td>2</td>\n",
+ " <td>4</td>\n",
+ " <td>9</td>\n",
+ " </tr>\n",
+ " <tr>\n",
+ " <th>(0, 5)</th>\n",
+ " <td>1</td>\n",
+ " <td>0</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
@@ -271,14 +287,14 @@
"text/plain": [
" x y\n",
"Step AgentID \n",
- "0 (0, 0) 1 5\n",
- " (0, 1) 4 7\n",
- " (0, 2) 6 0\n",
- " (0, 3) 2 1\n",
- " (0, 4) 2 2"
+ "0 (0, 0) 2 5\n",
+ " (0, 2) 8 4\n",
+ " (0, 3) 5 4\n",
+ " (0, 4) 4 9\n",
+ " (0, 5) 1 0"
]
},
- "execution_count": 9,
+ "execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
@@ -300,10 +316,8 @@
},
{
"cell_type": "code",
- "execution_count": 10,
- "metadata": {
- "collapsed": true
- },
+ "execution_count": 9,
+ "metadata": {},
"outputs": [],
"source": [
"from mesa.batchrunner import BatchRunner"
@@ -311,7 +325,7 @@
},
{
"cell_type": "code",
- "execution_count": 11,
+ "execution_count": 10,
"metadata": {
"collapsed": true
},
@@ -342,21 +356,21 @@
},
{
"cell_type": "code",
- "execution_count": 12,
+ "execution_count": 11,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
- "parameters = {\"height\": 10, \"width\": 10, \"density\": 0.8, \"minority_pc\": 0.2, \n",
- " \"homophily\": range(1,9)}"
+ "fixed_params = {\"height\": 10, \"width\": 10, \"density\": 0.8, \"minority_pc\": 0.2} \n",
+ "variable_parms = {\"homophily\": range(1,9)}"
]
},
{
"cell_type": "code",
- "execution_count": 13,
+ "execution_count": 12,
"metadata": {
- "collapsed": false
+ "collapsed": true
},
"outputs": [],
"source": [
@@ -365,33 +379,41 @@
},
{
"cell_type": "code",
- "execution_count": 14,
+ "execution_count": 13,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
- "param_sweep = BatchRunner(SchellingModel, parameters, iterations=10, \n",
+ "param_sweep = BatchRunner(SchellingModel, \n",
+ " variable_parameters=variable_parms, fixed_parameters=fixed_params,\n",
+ " iterations=10, \n",
" max_steps=200,\n",
" model_reporters=model_reporters)"
]
},
{
"cell_type": "code",
- "execution_count": 15,
- "metadata": {
- "collapsed": false
- },
- "outputs": [],
+ "execution_count": 14,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "80it [00:02, 27.26it/s] \n"
+ ]
+ }
+ ],
"source": [
"param_sweep.run_all()"
]
},
{
"cell_type": "code",
- "execution_count": 16,
+ "execution_count": 15,
"metadata": {
- "collapsed": false
+ "collapsed": true
},
"outputs": [],
"source": [
@@ -400,16 +422,14 @@
},
{
"cell_type": "code",
- "execution_count": 17,
- "metadata": {
- "collapsed": false
- },
+ "execution_count": 16,
+ "metadata": {},
"outputs": [
{
"data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXQAAAEACAYAAACj0I2EAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzt3X98VfWd5/HXB03ggoUUyXQtSDJVMMA+bEDNMlOriUXB\n7mNqp+yjmtYO6WQAR+OPrbbQ7mydx87OrDx2aLWlqDjZxmlr4DGlP9zHdIHaJtu1rQ2lIC0kSnWS\nEX/lthanUCgpfvaPc25ycpvknos3OZ/c+3k+HvdBzs3JvW/PPX7PuZ/z/X6PqCrOOecmvylJB3DO\nOVcY3qA751yR8AbdOeeKhDfozjlXJLxBd865IuENunPOFYlYDbqIrBKRHhF5VkQ2jPD7ChH5mog8\nLSJPicjiwkd1zjk3lpwNuohMAbYAK4ElQKOI1GSt9ilgv6q+E1gDfK7QQZ1zzo0tzhl6HXBEVftU\ndQDYDtyQtc5i4LsAqvoMUC0ilQVN6pxzbkxxGvS5wAuR5aPhc1FPAx8AEJE6YD4wrxABnXPOxVOo\ni6L3AW8VkZ8AtwH7gTMFem3nnHMxnBtjnRcJzrgz5oXPDVLVXwN/nlkWkX8Bns9+IRHxiWOcc+4s\nqKrkWifOGfpe4GIRqRKRcuAm4PHoCiIyS0TKwp/XAv9XVY+PEsrU49577008w2TJ5Zk8Uynkspgp\nrpxn6Kp6RkRagD0EB4BWVe0WkfXBr3UbsAh4VETeAA4BzbETJKy3tzfpCCOymMszxeOZ4rOYy2Km\nuOKUXFDVXcAlWc89HPn5qezfO+ecm1glP1K0qakp6QgjspjLM8XjmeKzmMtiprgkn/rMm34zEZ3I\n93POuWIgImiBLooWtc7OzqQjjMhiLs8Uj2eKz2Iui5niKvkG3TnnioWXXJxzzjgvuTjnXIkp+Qbd\nar3MYi7PFI9nis9iLouZ4ir5Bt0554qF19Cdc844r6E751yJKfkG3Wq9zGIuzxSPZ4rPYi6LmeIq\n+QbdOeeKhdfQnXPOOK+hO+dciSn5Bt1qvcxiLs8Uj2eKz2Iui5niKvkG3TnnikWsGrqIrALuZ+iO\nRZuyfn8+8GXgAuAcYLOqto3wOl5Dd865PMWtoeds0EVkCvAs8B7gJYJ7jN6kqj2Rde4FpqnqJ0Vk\nDvAM8DZV/V3Wa3mD7pxzeSrkRdE64Iiq9qnqALAduCFrnVeAt4Q/vwX4ZXZjbpXVepnFXJ4pHs8U\nn8VcFjPFFeeeonOBFyLLRwka+ahHgO+IyEvAecCNhYnnnHMurlg3iY7hk8DTqtogIhcB3xaRS1X1\nePaKTU1NVFdXA1BRUUFtbS319fXA0JFxopczknr/kZbr6+tN5cno7Ow0k8fy52dt2eL+5J/f6Mud\nnZ20tbUBDLaXccSpoS8H/lpVV4XLGwGNXhgVkW8Bf6uq3w+XvwNsUNUfZ72W19Cdcy5Phayh7wUu\nFpEqESkHbgIez1qnG1gRvvHbgIXA8/lFTkb2WYIVFnN5png8U3wWc1nMFFfOBl1VzwAtwB7gELBd\nVbtFZL2IrAtX+x/A5SLyNPBt4BOq+tp4hXYOIJ1O09PTQzqdTjqKcyb4XC5uUmpv30Fz862Ul1dz\n+nQvra1baWz0a/GuOBWsH3oheYPuCiGdTlNVVcPJkx3ApcBBUqkG+vp6qKysTDqecwXnk3PFZLVe\nZjGXlUy9vb2Ul1cTDEx+CLiAsrIqent7E82VYWU7RVnMBDZzWcwUV6G6LTo3Yaqrq/nNb44AlwBz\ngE9x8uRAXt27nCtGXnJxk046nWbevAWcPv09MiWX8vKrOHr0iJdcXFHykosrWr29vaRSFxE05gCX\nMm3aO8yUXJxLSsk36FbrZRZzWclUXR30bIGDQCdwkIGBPjMlFyvbKcpiJrCZy2KmuEq+QXeTT2Vl\nJa2tW0mlGpg+fS2pVAOtrVu93OJKntfQ3aSVTqfp7e2lurraG3NX1LwfunPOFQm/KBqT1XqZxVye\nKR7PFJ/FXBYzxVXyDbpzheTzy7gkecnFuQLx+WXcePEauit63d3ddHV1UVdXx6JFixLN4vPLuPHk\nNfSYrNbLLOaylOn22+9i8eLLaGr6KxYvvozbb78z0TxD88tcStA3/lKfXyYGi7ksZoqr5Bt0N/l0\nd3ezZcs24CngS8BTbNnyCN3d3YllGj7YCawNdnKlwUsubtJ59NFHaWr6O+CZyLMLaWv7L6xZsyap\nWIM19LKyKgYG+ryG7gqmoDV0EVkF3E9wRt8avZ9o+Pt7gA8DCpQBi4A5qnosaz1v0N2b1t3dzeLF\nlxGcoQf1aljO4cP7TNTSfbCTK7SC1dBFZAqwBVgJLAEaRaQmuo6q/r2qLlXVZcAngc7sxtwqq/Uy\ni7msZFq0aBEtLWuB5cA8YDktLWsTb8whmJbgxIkT5hpzK59dNou5LGaKK04NvQ44oqp9qjoAbAdu\nGGP9RqC9EOGcG83nP/8Ahw/vY8OGmzl8eB+f//wDSUdyLnE5Sy4ishpYqarrwuWbgTpVvWOEdVPA\nUeCikc7QveTinHP5i1tyKfQdi/4EeHKscktTU9Pglf+Kigpqa2upr68Hhr7q+LIv+7Ivl/JyZ2cn\nbW1tAPn1lFLVMR8EhcpdkeWNwIZR1v0acNMYr6XWdHR0JB1hRBZzeaZ4PFN8FnNZzBS2nTnb6zg1\n9L3AxSJSJSLlwE3A49kricgs4Grgm/EPJ8455woln26LDzDUbfE+EVlPcNTYFq6zhqDW/qExXkfj\nvJ9zzrkhPpeLc84VCZ/LJabMhQhrLObyTPF4pvgs5rKYKa6Sb9Cdc65YeMnFOeeM85KLc86VmJJv\n0K3Wyyzm8kzxeKb4LOaymCmukm/QnXOuWHgN3TnnjPMaunMJSKfT7N27l3Q6nXQUV4JKvkG3Wi+z\nmMszja29fQdVVTU0NHyYqqoa2tt3JB1pkKXtFGUxl8VMcRV6tkXnSlI6naa5+VZOnuwAXgNm09zc\nwIoV15i72YUrXl5Dd64A9u7dy7XX3sLrr+8bfG7mzGU88cTDXHHFFQkmc8XAa+jOTaDq6mpOn+4l\nuL8pwEEGBvrym8vauTep5Bt0q/Uya7nS6TQPPfSQqYt9ljJVVlbS2rqVVKqB6dMXkEo10Nq61Uy5\nxdr+lGExl8VMcZV8g+5yy1zsu+eez5i52GcxU2PjjfT19bB589309fXQ2Hhj0pFcifEauhtTOp2m\nqqomvNh3KXCQVKqBvr6exM4+LWZybjx5Dd0VRG9vL+Xl1QQNJ8CllJVV0dvb65kmCe8bXzpiNegi\nskpEekTkWRHZMMo69SKyX0R+JiIdhY05fqzWy6zkqq6u5uTJ5wgu9nUCBzl16vlEL/YNvwAZZLJ0\nAdLKZwe2+8aDrW2VYTFTXDn7oYvIFGAL8B7gJWCviHxTVXsi68wCvgBcp6ovisic8QrsJp7qGaAe\nOB/4ZbicnMwFyObmBkRmo/qaqQuQVnjf+NKTs4YuIsuBe1X1+nB5I8G9RDdF1vlL4AJV/XSO1/Ia\n+iQz1L96F9ALVDNz5koT/avT6TS9vb1UV1d7AzUC7xtfPOLW0OOMFJ0LvBBZPgrUZa2zECgLSy3n\nAZ9T1S/FDevsGipvvAxcgaXyRmVlpTfkYxhemgouHlv57Nz4KNTQ/3OBZcA1wAzghyLyQ1X9efaK\nTU1NgztURUUFtbW11NfXA0O1q4lcPnDgAHfddVdi7z/acrSOl3SeTHnjjTemofprWlsfobKy0sT2\nsvj5ZZ5LOs+hQ4e4++7b2Lw5KE397nevcvfddw0eBJPO55/f6MudnZ20tbUB5HcAVtUxH8ByYFdk\neSOwIWudDQRlmczyPwCrR3gttaajoyPpCCOylqu/v18ffPBB7e/vTzrKML6dcrOYKcPa56dqM1PY\nduZsr+PU0M8BniG4KPoy0AU0qmp3ZJ0a4PPAKmAq8CPgRlU9nPVamuv9nNeGJ6v29h00N99KeXlQ\n6mht3eqDi1xBxK2hxxpYJCKrgAcIujm2qup9IrKe4KixLVznHuCjwBngEVX9/Aiv4w16Dt4oTE4+\n2MmNp4IOLFLVXap6iaouUNX7wucezjTm4fLfq+oSVb10pMbcqmjdLGnRbmavv76Zkyc7aG6+1cyA\nEEvbKsNKpuGDnTqxNtjJynbKZjGXxUxx+UhRQ3wE5OTlsy06C3wuF0P8a/vklimXlZVVMTDQ5+Uy\nVzAFraEXijfouXmjMLl1d3fT1dVFXV0dixYtSjqOKxI+OVdM1uplmSlYN21aZ24KVmvbCmxlam/f\nwWWXXcltt/0tl112pal5UyxtpyiLuSxmisvvKWpQZWUlNTU1XmaZRHzeFGeBl1xcLN43fmw+b4ob\nT15ycQXT3r6D+fMX0tDwEebPX2iqlGCF93JxFpR8g261XmYlVzqdZs2atZw6JZw48QanTglr1vyF\n943PkpnSd+rUd1NW9jamTn23qSl9rWynbBZzWcwUV8k36G5s+/fvZ2DgDMFgmW1AJwMDb7B///5k\ngxn0gx/8kN/+doCBgXJ++9sBfvCDHyQdyZUYr6G7Me3Zs4eVK28DjkSevZjdu7dy3XXXJRXLnO7u\nbhYvvgx4iswYAljO4cP7vPuie9O8hu4KYunSpZSXp4nWhsvLf8HSpUuTjGVOV1cXcCHRUb4wL3ze\nuYlR8g261XqZlVyVlZWsW7eGYBblucBy1q1b47XhLHV1dQT3gRm6zykcDZ9PnpXtlM1iLouZ4ir5\nBt2NLZ1O09r6ZeBbwH8FvkVr65fNXBS1YtGiRbS0rCU48N0MLKelZa2XW9yE8hq6G5P3r86PD/13\n48HncnEF4ROGOZe8gl4UFZFVItIjIs+KyIYRfn+1iBwTkZ+Ej786m9BJsFovs5Ir0786lWpg+vQF\npFIN3r86B88Un8VcFjPFlXMuFxGZAmwhuAXdS8BeEfmmqvZkrfo9VX3fOGR0CWtsvJEVK65h586d\nrF692kxj7pwbLs49RZcT3AD6+nB5I8Gt5zZF1rkauEdV/yTHa3nJxRU1i3PeWMzk8lPIkstcgv5Y\nGUfD57L9kYgcEJF/FpHFMXM6VzTa23dQVVVDQ8NaqqpqTMx5k8l07bW3mMnkxk+hui3uA+arai1B\neeYbBXrdcWe1XmYxl2caXTqdpqlpPSdPdnDixP2cPNlBU9P6RLt3Wr9HLdj5/KIsZoorznzoLwLz\nI8vzwucGqerxyM//R0S2ishsVX0t+8WampoGZ6CrqKigtraW+vp6YGhDTuTygQMHEn3/kZaXLFlC\nT08Px44do6KiIvE8meUDBw4k+v6WP7/9+/dz+vQM4HngFWA1p0/PoK2tjY9//OOJ5Nu5cydTppzP\n0I2rX0NkNr29vVRWVvrnN8pyRpJ5Ojs7aWtrA8hvxk5VHfMBnAP8HKgCyoEDwKKsdd4W+bkO6B3l\ntdSN7bHHtmsqNVtnzVqmqdRsfeyx7UlHcjHs3r1bYarCWxWWhf9O1d27dyeWqb+/X1Op2QpPK6jC\n05pKzdb+/v7EMrmzE7adOdvrnCUXVT0DtAB7gEPAdlXtFpH1IrIuXO0/icjPRGQ/cD9g575pk8jw\nr8j7TH5FdiO78MILCSqYnQQVyE5gSvh8MqJdTmfOXGauy6krvFg1dFXdpaqXqOoCVb0vfO5hVd0W\n/vwFVf33qrpUVf9YVX80nqELKftrVpJ6e3spL69m6CvypZSVVdHb25tkLCA42Dz00EPmDi5WPr/j\nx4+TSl1M9LNLpS7i+PHjY//hOLN8j1qw8/lFWcwUl8/lYojVu95kekrcc89nvKfEKILP6EWinx28\nlPhnB36P2lLiQ/+NaW/fQXPzrZSVVTEw0Edr69ZEz6p86H981j47Vzx8LpcYrA64sJTLJ+fKj6XP\nzhUPv8FFDkODQD5sroxQWVnJiRMnTDQIw8tAnVgpA2VYq3da+uyirG2nDIu5LGaKqyQb9GhvkhMn\ntpnrTZJOp+np6TGRJ9pTYtq0Zu8p4ZxhJdmgD+9NUo+l3iSZbw4bNz5i6puD6hucc04Zqm8kHWWY\nzKAMSzxTfBZzWcwUV0nW0K1e6LOYy2Im50qN19DHYHWOb4v90C1mirJY7/RM8VnMZTFTXHHmcilK\nFuf4rq6u5uTJ54j2ZT516vlEL0Ba7RvvnPt9JVlysSqdTvMHfzCPYPqcuQQDVX5Hf/+LiR5wvH+1\nc8mKW3Ip2TN0izo6Ogga828BM4ATwHvp6Ojggx/8YGK5Mt9mvH+1c7aVZA09ylK97NVXXyWYnbie\noDGvB+aGzyfLYv/q7u5uNm7cSHd3d9JRhrG0T2VYzAQ2c1nMFFfJN+iWrFixguCGUNH5QF4Mn0+W\npb7xALfffheLF1/Gpk1fYfHiy7j99juTjuRc8uLMsVuoB8bmQ+/v79euri5T80O3tNyhkFJYoJDS\nlpY7ko5kbo72w4cPh9toaJ5vSOnhw4cTzeXceCHmfOgl26Bba6SinnzySf30pz+tTz75ZNJRTN4k\noa2tTWFhmCfzWKBtbW2JZXL5s3hCZZU36GMY3kh1mGikMjIHmhkzFpg40HR1demsWcvCRrNDQXXm\nzKXa1dWVWKbhZ+gd5s7QOzo6ko7we6xlsrafR1nbVqoFvGMRgIisEpEeEXlWRDaMsd4VIjIgIh94\n07WgcTR8sAxYGSxjcY4Zi/3Q58yZwznnCMFF47VAPeecI8yZMyexTC4+i/t5scjZoIvIFGALsBJY\nAjSKSM0o690H7C50yEIb3kjVY6GRAptzzAy/jdnHTIyq7e3t5bzzaoBngMeAZ5gx45LED8gZFucC\nsZTJ4n4eZWlb5SvOGXodcERV+1R1ANgO3DDCercDXwX6C5hvXFi916LFs2EYuo3ZE088bOI2ZkPb\n6WXgCuBlE9vJxWN1Py8GcRr0ucALkeWj4XODROTtwPtV9UEg52gmCyzea9HqHDOZbFb6oVveTmCv\nH7O1+8H65zd+CtUP/X4gWlufFI26xXstZg40mzffbeZAY5Fvp3is3g/WP7/xEWfo/4vA/MjyvPC5\nqMuB7SIiwBzgehEZUNXHs1+sqalp8KtVRUUFtbW1gzWrzJFxopczknr/kZZvueUWU3kyOjs7zeQ5\ndOjQsANy0nmsLX/jG9+gqWktp08/SVCvbqWpaS0rVlxDZWVl4vn88xt9ubOzk7a2NoC8SlE5J+cS\nkXMIrj69h6Bo2QU0quqI461F5IvA/1bVr43wO831fs65wvD7wRaPgs2HrqpngBZgD3AI2K6q3SKy\nXkTWjfQneadNiLXaYlTmaG2JZ4rHSibr94MFO9sqymKmuGLV0FV1l6peoqoLVPW+8LmHVXXbCOv+\n+Uhn59ZYrS06VyjDLz6uNXfx0RVeSc6H7rdVc6UknU6bnPrYai6L/BZ0Y7A6UtRNful0mr1795os\n41mS+YZ87bW3+DfkAirJBt1ri2fHM40t00g1NHzYTCNlMVN06P/rr282N/Tf0j6Vr5Js0L22WBws\nzdFucX4Si5nAvyGPp5Js0GFoYENn52MmBzZE+35bYSlT5sxz48ZHTJx5WpyfxGImsDuXEgQHwRkz\nZiR+0DtrcaZkLNQDI9PnusnN4hztnik/melzZ85camb6XMv3SMDnQ4/H4tzHqjZzWclkcY521aEG\nYfr0i800CI89tl2nTavQadPm6bRpFSYyZfT39+uDDz5o4gBj+R4JqgWeD905S6zO1md1fpJgZuup\n4b92WJpLqVjq+iXZD91Nfu3tO2huvpWysioGBvpobd1qpgG1wsdbxDe0rXYCM4ATpFKrzWwr74ce\nQ3d3N48++ijd3SNOS5MY78ucm7U52i0qlrPOiVBZWUlz80eA9wI3A++luflmE415XuLUZQr1wFAN\nvaXlzvC+lPMUUtrSckfSkVTV77WYL880Out1YVVb26q8fNawbVVePsvMtsJr6KPr7u5my5ZtwFPA\nl4Cn2LLlkcTP1K32G3aTk4+3iG///v2cPl1J9NvM6dNz2L9/f5Kx8hZnPvSi09XVBVzI0IcHMI+u\nri4WLVqUUKqhr8gnTw7lynxFtvA/oaV+6BmeaWyNjTeyYsU1JudMifb5tpHrJaJ944PZwieXkjxD\nr6urI7ir3lAvCTgaPp8cq703nCs0a3O5LF26lLKyKQSN+TKgnrKyKSxdujTRXHmLU5cp1ANDNfTr\nrrs+rKG/XSGl1113fdKRVNVmX+YMK/XOKM80NovXZKzW9i332ccHFo1uaIfqUHhQocPEDpVhacBF\nlKWGKsMzjc5qw2l1YJiq3f/3CtqgA6uAHuBZYMMIv38f8DSwH/gxcM0orzMx//U5DN+h1NQO5Vyh\nWN3PLU9JYFXcBj1nDV2C4WVbgJXAEqBRRGqyVntCVd+pqkuBjwK/dycjS7xW7UqB1f082vtmxox3\neu+bAopzUbQOOKKqfao6AGwHboiuoKq/iSyeB/yicBELb3h3rgUmdyhrczJbvf+qte0EdjJZ389V\n3+DMmV+h+kbSUQZZ3c9jy3UKD6wGtkWWbwY+N8J67we6gV8BdaO81nh/M8mL1XqZqp06rKrNC2sZ\nlrZThrVMhw8f1g0bNujhw4eTjqKqdmv7lvdzYpZcCtYPXVW/AXxDRK4kGK1zyUjrNTU1DX7lq6io\noLa2drDfbubMZqKWDx06NGxyoIl+/7GW6+vrTeQ5duzY4GAneA14jubmW1mx4hoOHTqUeL4oC9vL\n2vJ3vvNdNm/+AuXl1Xz2s1v5xCfu4m/+5r8lmm/GjBnheIvXCARTEuzcuZOamppEtlc6naapaS2n\nT38WaAYO0tR0JanUVN7//vdPeJ7Ozk7a2toA8iuR5WrxgeXArsjyRka4MJr1N88B54/w/PgfyvLQ\n39+vXV1diZ8ZWGb1wprLzerFR4u5rO/nFHDo/17gYhGpEpFy4Cbg8egKInJR5OdlYcv9y/iHlYnX\n3r6D+fMXctVVq5k/f2HiAxuyZZ99JsXy/Vet1jutfHbDJ+fqxMrkXBZr+5b387zEafUJui0+AxwB\nNobPrQfWhT9/AvgZ8BPg/wGXj/I6E3Q8G1t/f7+Wlb1F4a0KCxTeqmVl5yV+5hJlqQ5rcbCT5Xqn\nlc/Oaq06w9o1LIv7eQY+sGh0u3fvVpg+7CsfTNfdu3cnHc0sS+Upi1/ZrbJ4qzfLLO3nUXEb9JKc\nnOvYsWPABURnVoMLwufdSCorK810d/v9ScwuNTWJmSWWJ+eyyNJ+fjZKcnKuiooKgpnUhupl8HL4\nfPK8Njw26/VOK9spo7KykhMnTphrqHw/L7ySbNCHZla7mqCL0tVmZlbLzEJ3zz2fMTELnUU+z/fk\nZ3U/T6fT9PT0mDvIxBanLlOoB0Zq6KpDM6vNmLHQzMxqXhvOj9V6pxub1f08c71h1qxl5q434DX0\nsVmsLXptOD+Tvd5Zqizu59G7hQW5DtLc3MCKFddMqn2sJEsuGdZqi14bzp9nisdSJov7udU++/kq\n6QbdWr3Mcm3Y2rZyk5fF/XzoINNJMFN4Z+IHmbMSpy5TqAfGauhW62XWasOWt5WbvKzt5y0td4Z3\nMVuokNKWljuSjjSImDV0CdadGCKiE/l+o0mn01RV1YQTTgX1slSqgb6+HhNnw5b4tnLjJZ1Om7mG\nZX0/FxFUVXKtV5Ill8lQL7NS87S+raxspyhrmSz29850W2xo+LCJbovW9/O4SrJBt3onlwxL9Wrr\n28qNzWJ/72iPkhMntnHyZAfNzbcmur8XzX4epy5TqAcGa+jW5riwWK+2uq3c2Kz297Y6Va3l/Ryv\noedmqYaXyWO1jmdtW7nc9u7dy7XX3sLrr+8bfG7mzGU88cTDXHHFFYnlSqfTzJu3gNOnv0dmPy8v\nv4qjR48kvm9Z3c+9hh6DtX7olut41rZVhrV6NdjJZLG/d4bqGaAeWADUh8s27Nu3L/dKRpV0g25N\n0dTxnAkW+3tDcOIyffpC4PsEtyz+PqnUgsRPXCxeb8hXSZdcLGpv30Fz862UlVUxMNBHa+tWGhtv\nTDqWm8SslRHS6TRz517EwMC5wB8C/0JZ2QAvvvh8okP/rZY7IX7JJdZcLiKyCrif4Iy+VVU3Zf3+\nQ8CGcPHXwF+q6k/zi+zA5hwzbnKzOOeNyBQyZUU4iMhVieaxOL/M2chZcpFgy28BVgJLgEYRqcla\n7XngKlV9J/DfgUcKHXS8WKl3Rnm9Oj7PlJu1fui9vb2kUhcRvVY0bdo7Ei25WL7ekI84NfQ64Iiq\n9qnqALAduCG6gqo+paqvh4tPAXMLG9M5dzYs1oUtXiuyer0hXzlr6CKyGlipquvC5ZuBOlW9Y5T1\n7wEWZtbP+p3X0J2bIJbrwlavFVm73pBR0Bp6Hm/aAHwUuHK0dZqamgaPxBUVFdTW1lJfXw8MfVX1\nZV/25Te/vHPnTqZMOZ+he+e+hsjswbpwkvkaG28klZrKK6+8wurVqxPPY225s7OTtrY2gPy+ueQa\neQQsB3ZFljcCG0ZY71LgCHDRGK9V2OFTBdDR0ZF0hBFZzOWZ4rGSafhI0Q4zI0WjrGyrKIuZiDlS\nNE4NfS9wsYhUiUg5cBPweHQFEZkP7AQ+oqrPxT+cOOfGS7HUhV18sfqhh90WH2Co2+J9IrKe4Kix\nTUQeAT4A9AECDKhq3Qivo3HezzlXOFbrwi6+uDV0H1jkXJHzBn3y87lcYspciLDGYi7PFI+lTNbm\nHc9maVtlWMwUV0F7uTjn7IjOOw6vAbMn5Z3sXXxecnGuSFmdPtflz0suzpU4iyMy3fgq+Qbdar3M\nYi7PFI+VTMO7LS4w2W3RyraKspgpLq+hO1fEMrN37ty5c3BEpiteXkN3ziXCu1PG5zV055xZme6U\n1157i6nulOl0mr1795qZajhfJd+gW62XWczlmeLxTGOLdqd8/fXNnDzZQXPzrYk3ou3tO5g/fyFX\nXbWa+fMXmjnI5KPkG3Tn3MQafjN0sHAz9HQ6zZo1azl1Sjh1ahqnTglr1vxF4geZfHkN3Tk3oSzO\n075nzx5WrvxT4IeDmeCP2L3761x33XWJZIryGrpzzqRod8qZM5cZ6k75dqLfGuCCBLOcnZJv0C3V\nFqMs5vJ/zPi0AAAIHElEQVRM8Xim3Bobb6Svr4dNm9bR19eT+N2Kli5dSnl5mug9RcvLf8HSpUsT\nzZWvkm/QnXPJqKyspKamxsCZeZClre1hUqkGpk1rJpVqoK3tYRPZ8uE1dOecC1ntG+/zoTvnXJEo\n6EVREVklIj0i8qyIbBjh95eIyA9E5JSIfOxsAifFWm0xw2IuzxSPZ4rPYi6LmeLK2aCLyBRgC7AS\nWAI0ikhN1mq/BG4H/mfBEzrn3ARJp9P09PRMuv7nGTlLLiKyHLhXVa8PlzcS3Et00wjr3gv8WlU/\nM8precnFOWdSe/sOmptvpbw8mHa4tXVr4r1vMgpZcpkLvBBZPho+55xzRWH4dAT7zExHkK8Jnz63\nqalpcIL9iooKamtrqa+vB4ZqVxO5fODAAe66667E3n+05Wgdz0IegPvvvz/xzyt72eLnl3nOSh6r\n+5Olz6+3t5cpU84nuFVfJ1CPyGx27tzJLbfcMuF5Ojs7aWtrA8jvhiSqOuYDWA7siixvBDaMsu69\nwMfGeC21pqOjI+kII7KYyzPF45nis5Krv79fU6nZCk8rdCg8ranUbO3v7086mqqqhm1nzvY6Tg39\nHOAZ4D3Ay0AX0Kiq3SOsey9wXFU3j/Jamuv9nHOlwVqf70wNvaysioGBvklZQ4/VD11EVgEPENTc\nW1X1PhFZT3DU2CYibwN+DLwFeAM4DixW1eNZr+MNunPO7AVIaweZjIL2Q1fVXap6iaouUNX7wuce\nVtVt4c+vquqFqlqhqrNVdX52Y25VtLZoicVcnikezzQ2q/OhQzAFwIkTJ0w15vnwuVyccxPK4nzo\nxcKH/jvnJpTF+dCt8/nQnXMm2Z0PffIr+QbdUm0xymIuzxSPZ8rN2nzoUda2VT5KvkF3ziXD0nzo\nxcJr6M45Z5zX0J1zrsSUfINutV5mMZdnisczxWcxl8VMcZV8g+6cc8XCa+jOOWec19Cdc67ElHyD\nbrVeZjGXZ4rHM8VnMZfFTHGVfIPunHPFwmvozjlnnNfQnXOuxMRq0EVklYj0iMizIrJhlHU+JyJH\nROSAiNQWNub4sVovs5jLM8XjmeKzmMtiprhyNugiMgXYAqwElgCNIlKTtc71wEWqugBYDzw0DlnH\nxYEDB5KOMCKLuTxTPJ4pPou5LGaKK84Zeh1wRFX7VHUA2A7ckLXODcA/Aqjqj4BZ4W3pzDt27FjS\nEUZkMZdnisczxWcxl8VMccVp0OcCL0SWj4bPjbXOiyOs45xzbhyV/EVRq7e9spjLM8XjmeKzmMti\nprhydlsUkeXAX6vqqnB5I6CquimyzkNAh6ruCJd7gKtV9dWs1/I+i845dxbidFs8N8br7AUuFpEq\n4GXgJqAxa53HgduAHeEB4Fh2Yx43kHPOubOTs0FX1TMi0gLsISjRtKpqt4isD36t21T1WyLyXhH5\nOXAC+Oj4xnbOOZdtQkeKOuecGz8TdlE0zuCkiSQirSLyqogcTDpLhojME5HvisghEfmpiNxhINNU\nEfmRiOwPc/1d0pkyRGSKiPxERB5POkuGiPSKyNPh9upKOg+AiMwSkX8Ske7wM/wPCedZGG6fn4T/\nvm5kX/9kuH0OishXRKTcQKY7w7YgXnugquP+IDhw/ByoAsqAA0DNRLz3GJmuBGqBg0nmyMr074Da\n8OfzgGeS3k5hlunhv+cATwHvSjpTmOc/A18GHk86SyTT88Bbk86RlakN+Gj487nAzKQzRbJNAV4C\nLkw4R1X42ZWHyzuAP0s40xLgIDA1/H9vD/COsf5mos7Q4wxOmlCq+iTwqyQzZFPVV1T1QPjzcaAb\nA/35VfU34Y9TCf4HTHy7icg84L3APySdJYtgqDuwiMwE3q2qXwRQ1d+p6r8lHCtqBfCcqr6Qc83x\n9W/AaWCGiJwLTCc40CRpEfAjVf2tqp4Bvgd8YKw/mKgdL87gJBchItUE3yB+lGySwdLGfuAVoFNV\nDyedCfgs8HHA2kUgBb4tIntFZG3SYYA/BH4hIl8MSxzbRCSVdKiIG4H2pEOo6q+AzcC/EgyMPKaq\nTySbip8B7xaRt4rIdIITmAvH+gMzZxJuiIicB3wVuDM8U0+Uqr6hqkuBecBVInJ1knlE5D8Cr4bf\nZiR8WPEuVV1G8D/fbSJyZcJ5zgWWAV8Ic/0G2JhspICIlAHvA/7JQJZ3EJTwqoC3A+eJyIeSzKSq\nPcAm4NvAt4D9wJmx/maiGvQXgfmR5Xnhcy5L+HXvq8CXVPWbSeeJCr+q/zNwecJR3gW8T0SeJzi7\naxCRf0w4EwCq+nL4bxr4OkG5MUlHgRdU9cfh8lcJGngLrgf2hdsqaZcD31fV18LyxteAP044E6r6\nRVW9XFXrgWPAs2OtP1EN+uDgpPDK8U0Eg5GSZu3sDuB/AYdV9YGkgwCIyBwRmRX+nAKuJbionRhV\n/ZSqzlfVdxDsS99V1T9LMhOAiEwPv10hIjOA6wi+NidGgwF+L4jIwvCp9wAWSmYQDFBMvNwSegZY\nLiLTREQItlN3wpkQkcrw3/nAnwKPjbV+nJGib5qOMjhpIt57NCLyGFAPnC8i/wrcm7lwlGCmdwEf\nBn4a1qwV+JSq7kow1gXAo+FOPoXgm8N3Esxj2duAr4dTXJwLfEVV9yScCeAO4CthieN5DAz8C2vC\nK4B1SWcBUNWnw295+wjKGvuBbcmmAmCniMwGBoBbc13Q9oFFzjlXJPyiqHPOFQlv0J1zrkh4g+6c\nc0XCG3TnnCsS3qA751yR8AbdOeeKhDfozjlXJLxBd865IvH/ARAde4MJ+DxBAAAAAElFTkSuQmCC\n",
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXcAAAD8CAYAAACMwORRAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAEVJJREFUeJzt3X+MXPdd7vH3U6eBNlsoImVV4ggHyVSyMGpZ41AqFW9/\n3DoEOUgkKCFEWCJYSLgUGkCOQBE39+qqlPJD4kaIEEpB0C4htGA1FgaBV1DED8chNDghYIJpnEBT\nSknZ3l5Sw4c/PJsM292d492dnT1fv1/SyOec+Z4zj1brZ7975szZVBWSpLa8ZNIBJEkbz3KXpAZZ\n7pLUIMtdkhpkuUtSgyx3SWqQ5S5JDepU7kn2J3kiyZkkR1YY821JHktyOsn7NzamJOliZNSHmJJs\nA/4GeCtwDjgJ3FJVjw2N2QncD7ypqj6V5Muq6tnxxZYkreayDmP2Ameq6kmAJHPADcBjQ2O+G7in\nqj4F0KXYr7zyytqxY8dFB170mc98hiuuuGLN+28ms45Pn/L2KSv0K++llPXUqVP/XFWvGjWuS7lf\nBTw1tH4OuHbJmK8CSPLHwDbgx6rqd5YeKMkh4BDA9PQ073nPezq8/PIWFhaYmppa8/6byazj06e8\nfcoK/cp7KWWdnZ39h04Dq2rVB3ATcN/Q+m3Azy4Z82HgQ8BLgWu48APglasdd2ZmptbjxIkT69p/\nM5l1fPqUt09Zq/qV91LKCjxUI3q7qjq9oXoOuHpofTvwzDJjfruqPldVfw88Aezs9NNFkrThupT7\nSWBnkmuSXA7cDBxdMua3gFmAJFdy4TTNkxsZVJLU3chyr6rzwGHgOPA4cH9VnU5yd5IDg2HHgU8m\neQw4AfxQVX1yXKElSavr8oYqVXUMOLZk211DywW8c/CQJE2Yn1CVpAZZ7pLUIMtdkhpkuUtSgyx3\nSWpQp6tl1L4dRx4E4I7d5zk4WD77rusnGUnSOjhz1wvF3nW7pK3PcpekBlnuktQgy12SGmS5S1KD\nLHeteFWMV8tI/eWlkAJeLPL5+XnO3rpvsmEkrZszd0lqkOUuSQ2y3CWpQZa7JDXIN1TVS94LR1qd\nM3f1jvfCkUaz3CWpQZa7JDXIcpekBlnuktQgy129471wpNG8FFK95L1wpNU5c5ekBlnuktQgy12S\nGtSp3JPsT/JEkjNJjizz/MEkn0jyyOBx+8ZHlSR1NfIN1STbgHuAtwLngJNJjlbVY0uG/npVHR5D\nRknSReoyc98LnKmqJ6vqeWAOuGG8sSRJ69Gl3K8CnhpaPzfYttS3JvlokgeSXL0h6aQG7DjyIDuO\nPMijTz/3wrI0bqmq1QckNwFvq6rbB+u3AXur6u1DY74UWKiqf0/yPcC3VdWbljnWIeAQwPT09Mzc\n3Nyagy8sLDA1NbXm/TeTWcdnq+d99OnnXliefhl8/LMvPrf7qi+eQKLutvrXdtillHV2dvZUVe0Z\nNa7Lh5jOAcMz8e3AM8MDquqTQ6u/APz4cgeqqnuBewH27NlT+/bt6/Dyy5ufn2c9+28ms47PVs97\ncGiWfsfu8/zkoy/+l9vqH77a6l/bYWb9fF1Oy5wEdia5JsnlwM3A0eEBSV49tHoAeHzjIkqSLtbI\nmXtVnU9yGDgObAPeW1Wnk9wNPFRVR4HvS3IAOA/8C3BwjJklSSN0urdMVR0Dji3ZdtfQ8p3AnRsb\nTZK0Vn5CVRoj72CpSfGukNKYeQdLTYIzd0lqkOUuSQ2y3CWpQZa7JDXIcpekBlnuktQgy12SGmS5\nS1KDLHdJapDlLkkNstwlqUGWuyQ1yHKXpAZZ7pLUIMtdkhpkuUtSgyx3SWqQ5S5JDbLcJalBlrsk\nNchyl6QGWe6S1CDLXZIaZLlLUoMsd0lqkOUuSQ26bNIBJG0tO448CMAdu89zcLB89l3XTzKS1qDT\nzD3J/iRPJDmT5Mgq425MUkn2bFxESZtlsdi7btfWNbLck2wD7gGuA3YBtyTZtcy4VwDfB/zZRoeU\nJF2cLjP3vcCZqnqyqp4H5oAblhn3v4B3A/9/A/NJktYgVbX6gORGYH9V3T5Yvw24tqoOD415HfCj\nVfWtSeaBH6yqh5Y51iHgEMD09PTM3NzcmoMvLCwwNTW15v03k1nHp095+5D10aefe2F5+mXw8c++\n+Nzuq754Aom66cPXdtF6s87Ozp6qqpGnvru8oZpltr3wEyHJS4CfBg6OOlBV3QvcC7Bnz57at29f\nh5df3vz8POvZfzOZdXz6lLcPWQ8OnVu/Y/d5fvLRFyvi7K37JpComz58bRdtVtYup2XOAVcPrW8H\nnhlafwXw1cB8krPA1wNHfVNV6p+Vrorxapn+6TJzPwnsTHIN8DRwM/Dti09W1XPAlYvrq52WkbT1\nLRb5/Pz8lp6ta3UjZ+5VdR44DBwHHgfur6rTSe5OcmDcASVJF6/Th5iq6hhwbMm2u1YYu2/9sSRJ\n6+HtBySpQZa7JDXIcpekBlnuktQgy12SGmS5S1KDLHdJapDlLkkNstwlqUGWuyQ1yHKXpAZZ7pLU\nIMtdkhpkuUtSgyx3SWqQ5S5JDbLcJalBlrskNchyl6QGWe6S1CDLXZIaZLlLUoMsd0lqkOUuSQ2y\n3CWpQZa7JDXIcpekBlnuktSgTuWeZH+SJ5KcSXJkmee/J8mjSR5J8pEkuzY+qiSpq5HlnmQbcA9w\nHbALuGWZ8n5/Ve2uqtcC7wZ+asOTSpI66zJz3wucqaonq+p5YA64YXhAVX16aPUKoDYuoiTpYl3W\nYcxVwFND6+eAa5cOSvK9wDuBy4E3bUg6SdKapGr1SXaSm4C3VdXtg/XbgL1V9fYVxn/7YPx3LvPc\nIeAQwPT09Mzc3Nyagy8sLDA1NbXm/TeTWcenT3n7lBX6lfdSyjo7O3uqqvaMHFhVqz6A1wPHh9bv\nBO5cZfxLgOdGHXdmZqbW48SJE+vafzOZdXz6lLdPWav6lfdSygo8VCP6tao6nXM/CexMck2Sy4Gb\ngaPDA5LsHFq9HvjbDseVJI3JyHPuVXU+yWHgOLANeG9VnU5yNxd+ghwFDid5C/A54FPA552SkSRt\nni5vqFJVx4BjS7bdNbT8jg3OJUlaBz+hKkkNstwlqUGWuyQ1yHKXpAZZ7pLUIMtdkhpkuUtSgyx3\nSWqQ5S5JDbLcJalBlrskNchyl6QGdbpxmNZmx5EHAbhj93kODpbPvuv6SUaSdIlw5j4mi8Xedbsk\nbSTLXZIaZLlLUoMsd0lqkOUuSQ2y3MdkpativFpG0mbwUsgxWizy+fl5zt66b7JhJF1SnLlLUoMs\nd0lqkOUuSQ2y3CWpQZa7JDXIcpekBlnuktQgy12SGmS5S1KDOpV7kv1JnkhyJsmRZZ5/Z5LHknw0\nye8n+YqNjypJ6mpkuSfZBtwDXAfsAm5JsmvJsL8A9lTV1wAPAO/e6KCSpO66zNz3Ameq6smqeh6Y\nA24YHlBVJ6rq/w1W/xTYvrExJUkXI1W1+oDkRmB/Vd0+WL8NuLaqDq8w/v8C/1RV/3uZ5w4BhwCm\np6dn5ubm1hx8YWGBqampNe+/mcw6Pn3K26es0K+8l1LW2dnZU1W1Z+TAqlr1AdwE3De0fhvwsyuM\n/Q4uzNy/YNRxZ2Zmaj1OnDixrv03k1nHp095+5S1ql95L6WswEM1ol+rqtMtf88BVw+tbweeWToo\nyVuAHwG+sar+vcNxJUlj0uWc+0lgZ5JrklwO3AwcHR6Q5HXAzwMHqurZjY8pSboYI8u9qs4Dh4Hj\nwOPA/VV1OsndSQ4Mhv0EMAX8RpJHkhxd4XCSpE3Q6S8xVdUx4NiSbXcNLb9lg3NJktbBT6hKUoMs\nd0lqkOUuSQ2y3CWpQZa7JDXIcpekBlnuktSgTte5byU7jjwIwB27z3NwsHz2XddPMpIkbTm9mrkv\nFnvX7ZJ0qepVuUuSurHcJalBlrskNchyl6QG9arcV7oqxqtlJOm/692lkItFPj8/z9lb9002jCRt\nUb2auUuSurHcJalBlrskNchyl6QG9e4NVUla5L2mVubMXVIvea+p1VnuktQgy12SGmS5S1KDLHdJ\napDlLqmXvNfU6rwUUlJvea+plTlzl6QGdSr3JPuTPJHkTJIjyzz/xiQPJzmf5MaNjylJuhgjyz3J\nNuAe4DpgF3BLkl1Lhn0MOAi8f6MDSpIuXpdz7nuBM1X1JECSOeAG4LHFAVV1dvDcf44hoyTpInU5\nLXMV8NTQ+rnBNknSFpWqWn1AchPwtqq6fbB+G7C3qt6+zNj3AR+uqgdWONYh4BDA9PT0zNzc3JqD\nLywsMDU1teb9N5NZx6dPefuUFfqV91LKOjs7e6qq9owcWFWrPoDXA8eH1u8E7lxh7PuAG0cds6qY\nmZmp9Thx4sS69t9MZh2fPuXtU9aqfuW9lLICD1WHju1yWuYksDPJNUkuB24Gjq7lJ44kaXOMLPeq\nOg8cBo4DjwP3V9XpJHcnOQCQ5OuSnANuAn4+yelxhpYkra7TJ1Sr6hhwbMm2u4aWTwLbNzaaJGmt\n/ISqJDXIcpekBlnuktQgy12SGmS5S1KDLHdJapB/rEOSNsGOIw8CcMfu8xwcLI/zr0Y5c5ekMVss\n9q7bN4LlLkkNstwlqUGWuyQ1yHKXpAZZ7pI0ZitdFTPOq2W8FFKSNsFikc/Pz3P21n1jfz1n7pLU\nIMtdkhpkuUtSgyx3SWqQ5S5JDbLcJalBlrskNchyl6QGWe6S1CDLXZIalKqazAsnnwD+YR2HuBL4\n5w2KM25mHZ8+5e1TVuhX3ksp61dU1atGDZpYua9Xkoeqas+kc3Rh1vHpU94+ZYV+5TXr5/O0jCQ1\nyHKXpAb1udzvnXSAi2DW8elT3j5lhX7lNesSvT3nLklaWZ9n7pKkFfSu3JO8N8mzSf5q0llGSXJ1\nkhNJHk9yOsk7Jp1pJUm+MMmfJ/nLQdb/OelMoyTZluQvknx40llGSXI2yaNJHkny0KTzrCbJK5M8\nkOSvB9+7r590ppUkec3ga7r4+HSS7590rpUk+YHB/6+/SvKBJF84ttfq22mZJG8EFoBfqaqvnnSe\n1SR5NfDqqno4ySuAU8C3VNVjE472eZIEuKKqFpK8FPgI8I6q+tMJR1tRkncCe4AvqqpvnnSe1SQ5\nC+ypqi1/LXaSXwb+qKruS3I58PKq+tdJ5xolyTbgaeDaqlrPZ2jGIslVXPh/tauqPpvkfuBYVb1v\nHK/Xu5l7Vf0h8C+TztFFVf1jVT08WP434HHgqsmmWl5dsDBYfengsWV/8ifZDlwP3DfpLC1J8kXA\nG4FfBKiq5/tQ7ANvBv5uKxb7kMuAlyW5DHg58My4Xqh35d5XSXYArwP+bLJJVjY4zfEI8Czwe1W1\nZbMCPwP8MPCfkw7SUQG/m+RUkkOTDrOKrwQ+AfzS4JTXfUmumHSojm4GPjDpECupqqeB9wAfA/4R\neK6qfndcr2e5b4IkU8BvAt9fVZ+edJ6VVNV/VNVrge3A3iRb8rRXkm8Gnq2qU5POchHeUFVfC1wH\nfO/g9OJWdBnwtcDPVdXrgM8ARyYbabTB6aMDwG9MOstKknwJcANwDfDlwBVJvmNcr2e5j9ng/PVv\nAr9WVR+cdJ4uBr+GzwP7JxxlJW8ADgzOY88Bb0ryq5ONtLqqembw77PAh4C9k020onPAuaHf2h7g\nQtlvddcBD1fVxycdZBVvAf6+qj5RVZ8DPgh8w7hezHIfo8GblL8IPF5VPzXpPKtJ8qokrxwsv4wL\n34h/PdlUy6uqO6tqe1Xt4MKv4n9QVWObAa1XkisGb6gzOMXxP4AtebVXVf0T8FSS1ww2vRnYchcA\nLOMWtvApmYGPAV+f5OWDbngzF96HG4velXuSDwB/Arwmybkk3zXpTKt4A3AbF2aWi5dqfdOkQ63g\n1cCJJB8FTnLhnPuWv8SwJ6aBjyT5S+DPgQer6ncmnGk1bwd+bfC98Frg/0w4z6qSvBx4KxdmwlvW\n4LehB4CHgUe50L9j+7Rq7y6FlCSN1ruZuyRpNMtdkhpkuUtSgyx3SWqQ5S5JDbLcJalBlrskNchy\nl6QG/RdKeUiSHYeeDwAAAABJRU5ErkJggg==\n",
"text/plain": [
- "<matplotlib.figure.Figure at 0x1082ba400>"
+ "<matplotlib.figure.Figure at 0x112f4d128>"
]
},
"metadata": {},
@@ -424,9 +444,9 @@
],
"metadata": {
"kernelspec": {
- "display_name": "Python 3",
+ "display_name": "Python [mesa_dev]",
"language": "python",
- "name": "python3"
+ "name": "Python [mesa_dev]"
},
"language_info": {
"codemirror_mode": {
@@ -438,9 +458,13 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.5.1"
+ "version": "3.5.3"
+ },
+ "widgets": {
+ "state": {},
+ "version": "1.1.2"
}
},
"nbformat": 4,
- "nbformat_minor": 0
+ "nbformat_minor": 1
}
diff --git a/examples/forest_fire/Forest Fire Model.ipynb b/examples/forest_fire/Forest Fire Model.ipynb
index 59e4d847..56332b83 100644
--- a/examples/forest_fire/Forest Fire Model.ipynb
+++ b/examples/forest_fire/Forest Fire Model.ipynb
@@ -24,9 +24,7 @@
{
"cell_type": "code",
"execution_count": 1,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [],
"source": [
"import random\n",
@@ -59,9 +57,7 @@
{
"cell_type": "code",
"execution_count": 2,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [],
"source": [
"class TreeCell(Agent):\n",
@@ -117,9 +113,7 @@
{
"cell_type": "code",
"execution_count": 3,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [],
"source": [
"class ForestFire(Model):\n",
@@ -193,9 +187,7 @@
{
"cell_type": "code",
"execution_count": 4,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [],
"source": [
"fire = ForestFire(100, 100, 0.6)"
@@ -211,9 +203,7 @@
{
"cell_type": "code",
"execution_count": 5,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [],
"source": [
"fire.run_model()"
@@ -227,17 +217,13 @@
"\n",
"But... so what? This code doesn't include a visualization, after all. \n",
"\n",
- "**TODO: Add a MatPlotLib visualization**\n",
- "\n",
"Remember the data collector? Now we can put the data it collected into a pandas DataFrame:"
]
},
{
"cell_type": "code",
"execution_count": 6,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [],
"source": [
"results = fire.dc.get_model_vars_dataframe()"
@@ -253,14 +239,12 @@
{
"cell_type": "code",
"execution_count": 7,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [
{
"data": {
"text/plain": [
- "<matplotlib.axes._subplots.AxesSubplot at 0x10a022710>"
+ "<matplotlib.axes._subplots.AxesSubplot at 0x119756a90>"
]
},
"execution_count": 7,
@@ -269,9 +253,9 @@
},
{
"data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXkAAAEACAYAAABWLgY0AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzt3Xd4FOX2wPHvmwABkkgSShIIkNBBQYpCUJCgUlRQxIKK\nSKw/BQX1WkD0qveKItfevQKCei2oICpIJ4iCgjTR0CHUEFqAkBBIOb8/3k0BAqTv7OZ8nmee3Zmd\nnXnP7ubs5Mzs+xoRQSmllHfycXcDlFJKlR1N8kop5cU0ySullBfTJK+UUl5Mk7xSSnkxTfJKKeXF\nCpXkjTFBxphvjDFrjTHxxphOxpgQY8wcY8wGY8xsY0xQvvVHGmM2GmPWGWN65lvewRizxvXYm2UR\nkFJKqTyFPZJ/E5ghIi2BNsA6YAQwR0SaAfNc8xhjWgEDgFZAb+A9Y4xxbed94G4RaQo0Ncb0LrVI\nlFJKneacSd4YUwPoKiITAEQkU0QOA9cCk1yrTQL6ue5fB3whIhkikgBsAjoZY8KBQBFZ6lrvk3zP\nUUopVQYKcyQfBewzxnxsjFlhjPnIGOMPhIpIkmudJCDUdb8usDPf83cC9QpYvsu1XCmlVBkpTJKv\nBLQH3hOR9kAqrtJMDrF9I2j/CEop5TCVCrHOTmCniCxzzX8DjAT2GGPCRGSPqxSz1/X4LqB+vudH\nuLaxy3U///Jdp+7MGKNfFkopVUQiYgpafs4jeRHZA+wwxjRzLboS+Bv4ARjsWjYY+M51/3vgFmNM\nFWNMFNAUWOrazhHXlTkGGJTvOafu0+OnZ5991u1t0Fi8NxZviUNjKZ3pbApzJA/wEPA/Y0wVYDNw\nJ+ALTDbG3A0kADe7EnS8MWYyEA9kAkMkrxVDgIlANezVOjMLuX+Pk5CQ4O4mlBqNxXm8JQ7QWMpa\noZK8iKwGLi7goSvPsP6LwIsFLF8OtC5KA5VSShWf/uK1jMTGxrq7CaVGY3Eeb4kDNJayZs5Vzylv\nxhhxWpuUUsrJjDFIcU+8quKJi4tzdxNKjcbiPCWJwxijkwdPRVXYE69KKS+i/y17puIkeS3XKFXB\nuP61d3czVDGc6b3zuHJN8rFkdzdBKaW8giOT/L8W/svdTSgxb6n9gsbiRN4Shyp7jkzyn/75Kev2\nr3N3M5RS6iQ+Pj5s2bLF3c0oEkcm+acve5rY72JJz0x3d1OKLSYmxt1NKDUai/N4SxynioyMpHr1\n6gQGBhISEkKfPn3YuXPnuZ/oED/++CMdO3YkICCAWrVqcfvtt7Nr12lddJ1RTEwM48ePL9U2OTLJ\nD+80nPo16vPA9Af0BJFSFYgxhh9//JGUlBQSExMJDQ3loYceKta2MjMzS7l1Z/fNN98wcOBAHn30\nUQ4cOMDff/+Nn58fXbp04dChQ4XaRnGunjkXRyZ5YwwfX/cxKxJX8PbSt93dnGLxppqpxuI83hLH\n2fj5+XHDDTcQHx+fu+zUI92JEyfStWvX3HkfHx/ee+89mjZtSvPmzVm4cCERERG89tprhIaGUrdu\nXSZOnJi7/vHjx3nsscdo2LAhYWFhPPDAA6Sn51UQ/vOf/1C3bl0iIiKYMGHCGdsqIvzjH//gmWee\n4ZZbbsHPz4/Q0FDGjRtHQEAAr7/+OgDPPfccgwYNyn1eQkICPj4+ZGVlMWrUKBYtWsSDDz5IYGAg\nw4YNK9Hrl/ualMpWykBAlQC+G/AdL/3yElPXTnV3c5RS5STnv/e0tDS++uorOnfunPtYYX4QNG3a\nNJYtW0Z8fDwiQlJSEkeOHGH37t2MHz+eoUOHcvjwYQBGjBjBpk2bWL16NZs2bWLXrl3861/2wo+Z\nM2fy6quvMnfuXDZs2MDcuXPPuM/169ezY8cObrrpppOWG2O44YYbmDNnzlnbbIxh9OjRdO3alXff\nfZeUlBTeeuutsz6nsByb5AGigqOYftt0/u/H/2PmJs/qsNKbaqYai/OUZRzGlM5UHCJCv379CA4O\nJigoiHnz5vHYY48VaRsjR44kKCgIPz8/ACpXrsw///lPfH19ueqqqwgICGD9+vWICB999BGvvfYa\nQUFBBAQEMHLkSL788ksAJk+ezF133UWrVq2oXr06zz///Bn3uX//fgDCw8NPeywsLCz38cIo7RK1\no5M8QPvw9ky7ZRp3TL2D+Vvnu7s5Snk9kdKZisMYw7Rp00hOTub48eO8/fbbdOvWjb179577yS71\n69c/ab5mzZr4+OSluurVq3P06FH27dtHWloaHTp0IDg4mODgYK666qrchJyYmHjStho0aHDGfdaq\nVSv3OadKTEykdu3ahW5/adflHZ/kATrX78zkmyZzyze38N26AscZcRxvqplqLM7jLXGcjTGG66+/\nHl9fX3755RcA/P39SU1NzV1nz549BT6vMGrVqkW1atWIj48nOTmZ5ORkDh06xJEjRwB7VL59+/bc\n9fPfP1Xz5s2JiIhg8uTJJy3Pzs7m22+/5Yorrshtf1pa2hnbX2FOvBYkJjKGnwb+xJDpQxi3Ypy7\nm6OUKiM55QoRyT2qb9myJQBt27ZlypQpHDt2jE2bNpXockMfHx/uvfdeHn74Yfbt2wfArl27mD17\nNgA333wzEydOZO3ataSlpZ21XGOM4ZVXXuGFF17giy++ID09nT179nDPPfdw9OhRHnnkEQDatWvH\nzz//zI4dOzh8+DAvvfTSSdsJDQ1l8+bNxY6pQO4eLquAYazkbDbs3yCN3mwkI+eOlKzsrLOuq5Q6\n3bn+xtwpMjJSqlWrJgEBARIYGCitW7eWzz//PPfx/fv3S8+ePSUwMFC6dOkizz33nHTt2jX3cR8f\nH9m8eXPu/IIFC6R+/fqn7WPevHkiIpKeni5PPfWUNGrUSM477zxp2bKlvP3227nrjhkzRsLCwqRe\nvXoyYcKE07Z/qmnTpsnFF18s/v7+EhISIrfddpvs3LnzpHWGDh0qQUFB0rRpU/noo4/Ex8dHsrJs\nLluyZIk0a9ZMgoODZfjw4adt/0zvnWt5gTnVIzso25e6jxu/vpEafjX4rP9nnOd3Xjm1TinPpx2U\neS6v6aDsXGr712bOoDmEB4Rz5SdXcizjmLubdBpvqplqLM7jLXGosueRSR6gim8VPujzAY2CG+kv\nY5VS6gw8slyTX+qJVC6ZcAl3t7ubYZ1K5xdiSnkzLdd4ruKUazw+yQNsTd5K5/Gd+e6W74iOiC6j\nlinlHTTJe64KU5M/VVRwFG/0foN7f7iXjKwMdzcH8K6aqcbiPN4Shyp7XpHkAQacP4D659XnlcWv\nuLspSinlGF5RrsmRcCiBi/57Eb/d8xtNQpqUcsuU8g5arvFcFbZckyMyKJKRXUZy3w/3kS3Z7m6O\nUkq5nVcleYDh0cM5kXWC15e87tZ2eFPNVGNxHm+J42xEhMDAQBISEtzdFI9Wyd0NKG2VfCrxWf/P\n6PhRR65odAVtw9q6u0lKqUKIjIxk7969+Pr6ArYEsXHjRsLCwtzcMs9WqJq8MSYBOAJkARki0tEY\nEwJ8BTQEEoCbReSQa/2RwF2u9YeJyGzX8g7ARKAqMENEhhewr2LX5PP735//Y/Si0fxx3x9Ur1y9\nxNtTyls4tSYfFRXF+PHjufzyy93dFMcqy5q8ADEi0k5EOrqWjQDmiEgzYJ5rHmNMK2AA0AroDbxn\n8vrPfB+4W0SaAk2NMb0Luf8iG9hmIG1C2/B83Jl7jlNKOZuPjw9btmwBIDY2lqFDh9KnTx/OO+88\noqOjcx8DWLduHT169KBmzZq0aNGCr7/+2l3NdpSi1ORP/Za4Fpjkuj8J6Oe6fx3whYhkiEgCsAno\nZIwJBwJFZKlrvU/yPadMvNn7TT5e9TFrktaU5W4K5E01U43FebwljlOd6z+Mr776iueee47k5GSa\nNGnCqFGjAEhNTaVHjx7cfvvt7Nu3jy+//JIhQ4awdu3a8mi2oxW2Ji/AXGNMFvChiHwEhIpIkuvx\nJCDUdb8u8Fu+5+4E6gEZrvs5drmWl5nQgFD+3f3f3D/9fhbduQgf43XnmZUqdeb50hm4Qp4tWklI\nXEP/Vapk09KpQxwaY+jfvz8XXXQRAAMHDuTRRx8F4McffyQqKorBgwcDtt/5/v378/XXX/PPf/6z\nhJF4tsIm+UtFJNEYUxuYY4xZl/9BERFjjPOKfMC9He5l4uqJjF8xnns73Ftu+/WWsURBY3Gisoyj\nqMm5tOQM/Ze/Jp9/2D6wg2rkqFatGkePHgVg27Zt/P777wQHB+c+npmZyR133FHGrXa+QiV5EUl0\n3e4zxkwFOgJJxpgwEdnjKsXkDMK4C8g/yGIE9gh+l+t+/uW7CtpfbGwskZGRAAQFBdG2bdvcD3XO\nv6mFnf954c/cE3wPI+ePJCYyhl1rdhXp+Tqv8944720aNGhAt27dckd18mY572FcXFzhLi8902gi\nORNQHVtLB/AHfgV6AmOBJ13LRwBjXPdbAauAKkAUsJm8q3h+Bzph6/szgN4F7K/AkU9K6r2l70mb\n99tI6onUMtn+qRYsWFAu+ykPGovzlCSOsvobK6n8IzblMMbkjsQ0ePBgefrpp3MfW7BggURERIiI\nyJEjR6Rhw4by6aefyokTJ+TEiROydOlSWbt2bfkFUA7O9N5xlpGhClOkDgUWGWNWuZL0j2IviRwD\n9DDGbAAud80jIvHAZCAe+AkY4moEwBBgHLAR2CQiMwux/1Jx/0X30ya0jfY9r5QHyT+wtTHmtIGu\nc+YDAwOZPXs2X375JfXq1SM8PJyRI0dy4sSJcm2vE3lV3zXnknoilejx0dzX/j4e6vRQmexDKadz\n6nXy6tyKc5281/3i9Wz8q/jz/S3fc+mES4kKjqJPsz7ubpJSSpWpCndNYVRwFFMHTOXOaXeyInFF\nme3Hm05waSzO4y1xqLJX4ZI8QKeITnzY50Ou/eJaElMS3d0cpZQqMxWqJn+qfy38F7M2z2LB4AVU\n8a1SLvtUyt20Ju+5KuwYr8WVLdlc/9X1RARG8O4175bLPpVyN03ynqvCDxpSVD7Gh0/6fcLcrXOZ\nsHJCqW7bm2qmGovzeEscquxVqKtrClKjag2m3TKNbhO70Si4ETGRMe5uklJKlZoKXa7Jb96Wedw2\n5TYW3bmIZjWblfv+lSovWq7xXFquKYErGl3B6MtH0+fzPhxIO+Du5iilytD27dsJDAysEF92muTz\nuaf9PfRr0Y8bJt/AiayS/Rzam2qmGovzeEscBZk4cSKtW7fG39+f8PBwhgwZwuHDh4u9PR8fHwIC\nAggMDCQwMJCQkBAaNGhASkrKad0keCNN8qcYc+UYgqsFc98P91WIb3mlnOTVV19lxIgRvPrqqxw5\ncoTffvuNbdu20aNHDzIyMoq93T///JOUlBRSUlI4ePDgWdeVvM4SvcOZei5z14QDesg7evyotP+w\nvbz484vubopSpc4Jf2MFOXz4sAQEBMjXX3990vKjR49K7dq1ZcKECSIi8uyzz8pNN90kd9xxhwQG\nBsr5558vf/zxxxm3m78nyxxbt24VY4xkZWWJiEi3bt1k1KhRcskll0i1atVk8+bNsnbtWrnyyisl\nJCREmjdvLpMnTy7liIvuTO8dJeyFssLxr+LPD7f+wPt/vM838d+4uzlKVQiLFy8mPT2d/v37n7Tc\n39+fq6++mjlz5uQu++GHH7j11ls5fPgw1157LQ8++OBZty2FODL/7LPPGDduHEePHqVmzZpeM5yg\nJvkzqBtYl2m3TOOB6Q+wdNfScz/hFN5UM9VYnKdM4zCmdKYi2r9/P7Vq1TptNCiAsLAw9u/fnzvf\ntWtXevfujTGG22+/ndWrV5912+3btyc4OJjg4GAefvjhAkI2xMbG0rJlS3x8fJg5c2bucII+Pj4n\nDSfoaSr8dfJn0y68HROuncD1X13PkruX0KBGA3c3Samy56Z6dK1atdi/fz/Z2dmnJfrExERq166d\nO59/GMDq1auTnp5e4PNyrFy5kkaNGuXOFzSiUv36eQPaedNwgnokfw59m/flH53/QZ/P+3Dk+JFC\nP89bxhIFjcWJvCWO/Dp37oyfnx/ffvvtScuPHj3KzJkzueKKK8p0//mvtMkZTjA5OTl3SklJ4d13\nPa/7E03yhfBI9CNcUv8SbvnmFjKzM93dHKW8Uo0aNXj22Wd56KGHmDVrFhkZGSQkJHDzzTdTv359\nBg0aVKb7z1+379OnDxs2bOCzzz4jIyODjIwMli1bxrp168q0DWVBk3whGGN4+6q3yczO5Ik5TxTq\nOd5S+wWNxYm8JY5TPf7447z44os89thj1KhRg+joaBo2bMi8efOoXLkycPZhAAtypsfOto2AgACv\nGU5QuzUoguRjybT/b3te7/U6/Vr0O+u6cXFxXvMvtcbiPCWJQ7s18Fza1XA5+H3n7/T9oi9L711K\nZFCku5ujVJFpkvdc2ndNOegU0YmRXUYy4JsBpGemu7s5Sil1Vprki+Hh6IdpGtKUq/93NSnHUwpc\nx5tqphqL83hLHKrsaZIvBmMMk/pNomlIU6789EoOHjt7XxhKKeUuWpMvARHhiTlPMHfrXBYMXkBQ\n1SB3N0mpc9KavOfSE69uICI8PPNhlicuZ/ag2VSvXN3dTVLqrDTJey5jDFOmCFu2wNatsG8fHDkC\nM2fqidcyY4zh9d6v0ySkCf2/6s/xzOOAd9VMNRbnKWkcOdea6+RZE8DEibBzJzRrBv36wUMPnf29\n1r5rSoGP8WHcteMYNHUQ3Sd1Z8qAKe5uklJn5LSjeG/57QIUP5Zdu2DVKtiwAdauhd9+s0fqnTvD\nNdfA1VdDkybF6vdNyzWlKVuyeeHnFxi3YhxTBkzhoroXubtJSikHSkuDmTNh2jT4+WdISYGLLrJH\n582aQadO0LYtuH7ke05aky9nU9ZO4f4f76dvs7483/15Is6LcHeTlFJukJFhj8zXrIH4eNi2zU5/\n/gkdO8L118Pll0Pz5sU7Ss9R4h9DGWN8jTErjTE/uOZDjDFzjDEbjDGzjTFB+dYdaYzZaIxZZ4zp\nmW95B2PMGtdjbxY/HOfr37I/Ey6cQG3/2lz4wYW8tOglj+7YzFvq2OA9sXhLHOB9sRw+DN99B3fc\nAaGhMGCAPWL384NeveCFF2DLFpgzB4YMgRYtSpbgz6WwNfnhQDwQ6JofAcwRkbHGmCdd8yOMMa2A\nAUAroB4w1xjT1HVo/j5wt4gsNcbMMMb0FpGZpRqNgwRUCWBMzBjuv+h+7vvhPr5d+y3jrh1H27C2\n7m6aUqqU/f03fP45fPONra9HR9uj9Jdegnr13Nu2c5ZrjDERwERgNPCoiPQ1xqwDuolIkjEmDIgT\nkRbGmJFAtoi87HruTOA5YBswX0RaupbfAsSIyP0F7M/jyzWnEhHGrxzPqPmjaBzcmDsuvINBbQbh\nX8Xf3U1TShXTtm3w5Zc2uR88CLfeCn362Hq6n1/5tqWk5ZrXgceB7HzLQkUkyXU/CcgZpqUusDPf\nejuxR/SnLt/lWl4hGGO4p/097HxkJ6O6jmLW5llc+MGFLN6x2N1NU0oVwf798P770LUrdOhgyy5v\nvWUT/tixcNll5Z/gz+Ws5RpjTB9gr4isNMbEFLSOiB0NvTQbFRsbS2RkJABBQUG0bds297KknPqd\n0+dzluV/vLJvZfx3+zM8dDjJbZLp/1V/utOdgW0G0qdnH0e1P//8qlWrcsfFdEJ7SjL/xhtveOTn\nqTCfLye1z1s+X998E8fatXDkSAy//AJ//hlHp07wxBMx9OoFixfHIQI+Pnb98vp85dwvaBjD04jI\nGSfgRWAHsBVIBFKBT4F1QJhrnXBgnev+CGBEvufPBDoBYcDafMtvBT44wz7FGyxYsOCc6+xJ2SOD\npw6WWmNryYs/vyhHjx8t+4YVQ2Fi8RTeEou3xCHijFiyskS2bROZO1fk5ZdF+vcXiYgQCQkR6d1b\n5N//FlmwQCQ19ezbcVcsrrxZYB4v9CWUxphuwGNia/JjgQMi8rIxZgQQJCI5J14/BzriOvEKNBER\nMcb8DgwDlgLTgbekgBOv3liTP5d1+9fxbNyz/L7zd969+l2uaXaNu5uklNfJyLBllU2bYPNme5tz\nf+tWCA62Pzhq187W1Tt1gkaNyvbKl9JSKtfJu5L8P0TkWmNMCDAZaAAkADeLyCHXek8BdwGZwHAR\nmeVa3gF7ArcaMENEhp1hPxUuyeeYs3kO90+/nw7hHRhz5RgaBTc695OUUgU6eBDmz4fZs2HhQkhI\ngLp1oXFjm8xzbps0scnc34Ovgzhbkj9rucYdExWoXFOQtBNp8nzc81Lz5Zoy5MchsidlT+k2rBic\n8O90afGWWLwlDpHSiyU7W2T5cpGnnxbp2FEkMFDk6qtFXn9dZPVqkfT0UtnNWTmxXKMdlDlMtcrV\n+Ge3f7LuwXX4VfKj9futeW/Ze2RlZ7m7aUo5jgisWAEjR0LTpnDTTXDiBLz8su2hcfp0ePhhaNPG\neVe9lBft1sDh/t77Nw9Mf4DjWcf5vP/nNA5p7O4mKeVWIrYzr8mT4euv7fzNN9sE366dZ9TQS5v2\nXePhRIR3l73Lvxb+i/HXjqdv877ubpJS5S4nsU+erIn9VCXuu0YVXf7rWUvKGMODHR/k+1u/Z+iM\noQz7aVi5DjlYmrG4m7fE4i1xwLlj2bzZ/pK0Xz/IzrZJftMm22VA+/bOSvBOfF80yXuQ6Iholt+3\nnBNZJ2jxTgteX/I6qSdS3d0spcrEnj0wapS9lLFrV9vX+pgxzkvsTqflGg8Vvy+eUfNHsWjbImLb\nxnL/RffTJKSJu5ulVIlt3AijR9ueGwcMsIm+fn13t8rZtFzjhVrVbsXUAVNZdu8yDIZLxl9Clwld\n+O/y/5J0NOncG1DKYQ4cgOHD7WhITZrYkswHH2iCLylN8mWkvGpzUcFR/Kfnf9j16C6evPRJ5m2d\nR4t3W3DRfy/imfnPsHjH4hJffunEOmNxeUss3hJHdja8+mocd9xhE3tWlh1k4+mnoWZNd7eu6Jz4\nvugYr16ism9l+jbvS9/mfcnIymDJziX8tPEnhkwfwo4jO4iOiObiuhfTpUEXukd2x9fH191NVhXY\nhg0waRJ8+qm9fv3BB+GVV6BOHXe3zPtoTb4CSExJ5Ledv7Fs9zJmb57N3tS93Nn2Tu5sdyeRQZHu\nbp6qIA4etFfGTJpk+4q5/XYYPBhat3Z3yzyfXievTrJ6z2rGrxzP52s+p314ewa2Hkjr0NY0DWlK\noF/guTegVCElJ9vE/s03sHSpHf5u8GB7W0nrCKVGT7y6gRNrczkuDLuQt656i52P7uTOtncyfeN0\n7px2J6GvhNJ5fGc+Wf0J6Znpues7OZai8pZYnByHCMTF2ZGSoqJsJ2EPPAC7d9uEf801Jyd4J8dS\nVE6MRb9LK7Cqlapya+tbubX1rQBkZmcyY+MM3v/jfR6b/RgPdnyQoRcPdXMrlafIzLRD4b3+OqSn\nw9Ch8O67EBLi7pZVbFquUQVav389Y38dy9R1U+nZuCcX172YThGdiI6IppKPHhuok82bZzsCCwmx\nnYX17Ak+WicoN1qTV8W268gu5m2dx7Jdy/hlxy/sTtnNDS1voE+zPkRHRBNSTQ/TKqrsbJg1C954\nw17T/sortusB/TVq+dOavBs4sTZXHPXOq0eD5Aa8ffXbrPy/lSy+azH1z6vPq0teJfKNSFq804LH\nZz/O7zt/xxO+nL3lfXFnHCkp8M470LKl/TXqbbdBfDxcf33xEry3vCfgzFj0/25VJI1DGjOy60hG\ndh1JVnYWq5NWM3XtVAZ/N5i0jDRuaHkDN7S6gUvqX4KP0WMIbyECv/8O//ufrbtffjmMHw+XXqpH\n7k6n5RpVKkSE+H3xfBP/Dd+u/Zb9afvp37I/fZv1JToimhpVa7i7iaoYkpNh3DjbvUDlyjBwoL0E\nskEDd7dM5ac1eVXu1u9fz7drv2XmppmsSFxBw6CGdI7oTHRENJfWv5QWtVpg9BDQcU6cgGXLYOVK\ne/v997ab32HD4KKL9KjdqTTJu0FcXBwxMTHubkapKGksGVkZrNm7hiU7lvDbrt9YmLAQH+PDVU2u\n4qqmV3F51OUEVAkovQafhbe8L6UZx759MGMG/PADzJ1rB7ju0MEOxnHddXbw67LkLe8JuC+WsyV5\nrcmrMlfZtzLtw9vTPrw9QxmaW9r5adNPvPn7mwycMpDoiGh6Ne5Fr8a9uKDOBXqUX4bS0219PS4O\nZs+Gv/6CK6+Evn3hvfe0/xhvo0fyyu1Sjqcwf+t8Zm2excxNMzl47CDtw9vTIbwDHep2oEN4BxqH\nNNYTuSWUkmIvd3zjDdvjY0yMPYEaE1NxB7n2FlquUR4l6WgSKxJXsCJxBcsTl7M80Y6GdXWTq+nZ\nuCeNghsRHhhOqH8olX0ru7u5jnfsGLz/Prz8MvToAc89Z5O88h6a5N1A64yla9PBTczYOIP5W+ez\n48gOElMS2Ze2j5BqIYQHhFM3sG7ebWA4YQFhBFQJwM/XD/8q/tSsVpOa1WuyYskKt8dSGgrznqSl\n2R4fR4+Giy+Gf/8bLrigfNpXFE74fJUWrckrVUxNQpowrNMwhnUalrssMzuTfan72J2ym8SjiSSm\nJLI7ZTd/Jv3JrM2zSMtI43jmcY6eOMrBYwfZl7YP322+tNrUimY1m9E0pKm9rdnUq3rg3LrV1tY/\n/tiOsjRlCnTs6O5WKXfRI3lVYYgI+9L2sfHARjYc2MDGg3m3mw5uwr+yP2EBYdTxr0OTkCa0rtOa\ntmFtubjexVTxreLu5p+VCCxYAG++Cb/8ArGxMGSIvVJGeT8t1yh1DtmSTdLRJPam7mXP0T1sOLCB\nNXvX8MfuP9h0cBNdGnQhOiKaC+pcwAV1LqBxcGPHjK7199/wyCOwbRs8+qgdjMPf392tUuVJk7wb\naJ3RmYoTy4G0AyxIWMCKxBX8tfcv/tr7F3uO7qFFrRacX+d8Lqh9Aa1DW9OpXidqVi+fgUmnTYsj\nJSWGH36wR/BPP237bK/sgeehK/rnqzQUuyZvjKkKLAT8gCrANBEZaYwJAb4CGgIJwM0icsj1nJHA\nXUAWMExMyWuRAAAgAElEQVREZruWdwAmAlWBGSIyvOShKVX2alavyY2tbuTGVjfmLjt64ihr963N\nTfqvLXmNZbuXER4QziX1L+GS+peU+i974+Nh6lSYPh1Wr7bd+V5zjb1yRvtsV2dyziN5Y0x1EUkz\nxlQCfgEeA64F9ovIWGPMk0CwiIwwxrQCPgcuBuoBc4GmIiLGmKXAgyKy1BgzA3hLRGYWsD+vOJJX\nFU9WdhZ/7/ubxTsWs3jHYuIS4gipFsLd7e5mYJuBRe6WWQQ2b4Yff7QDXiclwQ032G4GLrtMr21X\neUqlXGOMqY49qo8FvgW6iUiSMSYMiBORFq6j+GwRedn1nJnAc8A2YL6ItHQtvwWIEZH7C9iPJnnl\nFbIlmwVbFzB+5XhmbJzB1U2v5p7299A9snuBR/dpaXn9xqxYAQsX2tGWeva0dfaYGPB1xmkA5TAl\n6k/eGONjjFkFJAELRORvIFREklyrJAGhrvt1gZ35nr4Te0R/6vJdruVey4n9SheXxlI8PsaHKxpd\nwec3fM6W4VvoHNGZYT8N4+KPLuaH9T8gIhw8aAfb6N7ddifw5JOwYQN07WoH5Ni5014KecUVJyd4\nfU+cyYmxnPM6eRHJBtoaY2oAs4wx3U95XIwxpXroHRsbS2RkJABBQUG0bds292RGzovo9PkcTmlP\nSeZXrVrlqPaUZH7VqlVu2/9DnR7i/LTz+XX7rzz+09MM/mwER2dG07FODCMfG8Rll8Hy5YXbXg53\nv576+Tp5vrw+Xzn3ExISOJciXV1jjHkGOAbcgy237DHGhGOP8FsYY0YAiMgY1/ozgWex5ZoF+co1\nt2LLPVquURVGZiZMm2YHt45fm03ve3/Ft81kZmz7hqqVqtKlQRe61O9ClwZdaFm7pfbVowqt2DV5\nY0wtIFNEDhljqgGzgOeBXsABEXnZldiDTjnx2pG8E69NXEf7vwPDgKXAdPTEq6ogtm61Iyp9+CE0\nbAhDh9oTqFVcv68SEdYfWM8v23/hl+2/sGj7Ig6lH+LyqMu5puk1XN30aur4a9eQ6szOluQRkTNO\nQGtgBbAK+BN43LU8BJvANwCzsUk+5zlPAZuAdUCvfMs7AGtcj711ln2KN1iwYIG7m1BqNJaiOXZM\nZNYskYcfFmneXKROHZH77hNZtarw29h1ZJdMWDFBbpx8owSNCZJ+X/aT6RumS0ZWhojoe+JU7orF\nlTcLzKlnrcmLyBqgfQHLDwJXnuE5LwIvFrB8uetLQymvkZJi+2NfsyZvWrECWreGq66yR/Dt2oFP\nESsvdQPrcme7O7mz3Z0cPXGUL//6kucXPs/tU27nikZXEJkcSeN2jalfo37ZBKa8hv7iValCyMy0\nV73kJPI//7S3e/dCy5Y2qedMHTqU3Y+TdqfsZvbm2czaPIs5m+cQGhDKXW3v4qFODzm+fx1VdrRb\nA6UKKSsL9uyxly7u3GkT+88/w+LFEBp6cjJv3dp2AOaua9ezsrP4Y/cf/Pvnf7P+wHpe6fEK1za/\nVkfVqoA0ybtBnPbH4UhxcXF06xbDnj2wdm3eFB8PGzfaX5XWqgUREXaKirLXrHftCjXLp1uaQjn1\nPZm1aRZPzH0CgCcueYKbz7/ZYwZU8bbPlzti0f7klVdLTobERFs6SUqyt3v3wuHDtmaekgJHjtgp\nIQEOHYLAQFtmyZn69oVmzaBevbyrXjxJrya96Nm4J7M2z2Lsr2N5av5TPBr9KHe3v7vcBklXzqRH\n8sqjJCXB8uX25Oby5XZKTrbJOTTU/mo057ZGDZvM809hYRAeDtWruzuSsrV011L+s/g/xCXE8eSl\nT/JgxwepWqmqu5ulyoiWa5RHSkzMS+Q5iT01Fdq3tyc3c6ZGjYp+9UpFsXbfWkbOG8nKPSsZ1XUU\nA1sPxL+KdjbvbTTJu4HWGYsuOdn2jT53LsybB/v2wUUX2USek9ijoqAk5xW95X0pahy/bv+VsYvH\n8sv2X7jtgtvoHtWddmHtiAyKdPuJWm95T0Br8kqd5Ngx+PVXm9DnzoV16+DSS+HKK+G++6BNGz1C\nLy2XNriUaQ2msf3wdj5Z/QkTVk5g1R7bz8rA1gO548I7OL/O+W5upSoLeiSvyk1Kii25LF5sE/tv\nv9lEfuWVtpfF6GjtI728xe+L59PVn/LZms/w8/Wjd5PeXB51Oe3D29OwRkO3H+WrwtFyjXKLzExY\nssSOZPTTT7Bpk722vFMnm9i7dYPzznN3KxXY7k3W7F3DzE0zWbhtISsTV5Kemc5trW/jiUufoEGN\nBu5uojqLEvUnr4rn1C5hPVlRYjlxwib0e+6xV7EMGwaVKsEHH9hLF3/7Dd58016y6I4E7y3vS2nH\nYYyhTWgbnrj0CabfNp3d/9jN6vtX41/Zn7YftOWOqXcwY+MMjmceL9X9gve8J+DMWDTJqxI7eBC+\n+sqOXhQWBqNHQ6tWeaMcvfACdO7smYNMV2T1a9Tn5R4vs2nYJtqFtePFRS8S9moYD898mMSURHc3\nTxWSlmtUkWVm2gQ+c6YdvSg+3o45evXVcP319gheeaedR3by6uJXmbR6EndceAdPXvok4YH6hrub\n1uRVie3caRP6zJn2pGn9+tCrl526dNETphVNYkoi/1n8HyaumsigNoMY0WWEJns30pq8GzixNldU\nhw/bUktUVBxt29rLHK+5xnatu3o1jB1rr4rxpATvDe8LuD+O8MBwXuv1GvFD4/ExPlzw/gU8M/8Z\njhw/UuRtuTuW0uTEWDTJq5OI2OvVn38emjSxnXY9/rjtTuCLLyA2FurWdXcrlVOEBYTxeu/XWXHf\nCrYf2U6Tt5ow/KfhrExcif5H7gxarqngRODvv+2ljkuW2FJMdjb06QOPPgpNm7q7hcqTbD64mU9W\nf8Kk1ZOoUbUGsRfGMrDNQB2+sIxpTV6dJjsbvv8exoyxfcTExNgfI3XrZntl1N/AqJLIlmwWJixk\n0upJfLfuOy5reBmxbWPp06yPDm5SBrQm7wZOrM2lpEBcHDzyiB3s4oUXbClmyxaYNAkeeMBe+nhq\ngndiLMXlLbE4PQ4f40P3qO5M7DeRHY/soH/L/ry99G3qvVaPYT8NY/nu5bnlHKfHUhROjEX7rvFi\niYkwdSpMm2ZPlKakwPnn21LMd9/ZLgX0iF2VtUC/QGLbxhLbNpatyVv5ZPUn3PT1TfhX8Sf2wlii\njkW5u4leTcs1XubQIfj2W/jsM1i1yl673r+/7Uqgbl3t8Es5Q7Zk88v2X5i4aiJT103l0vqXEts2\nln4t+lHJR489i0pr8l4uI8N2JfDppzB7tr2s8fbbbYKvquNEKIdLPZHKlLVT+HD5hySnJ/NKj1fo\n3aS3do5WBFqTd4PyqM1lZcEnn0CLFvDyy9CjB2zdClOm2KP30krwTqwzFpe3xOItcQAsW7yMQRcO\nYtGdi3jpipd4eNbDdJ/Une/WfUdWdpa7m1ckTnxfNMl7qMWLbU193DiYMMH2y37ffRAS4u6WKVU8\nxhiubX4tfz3wF/e2v5cxv4yh8VuNmbhqItmS7e7meSwt13iYtDQYNQq+/BLefhtuuEFPnirvtXjH\nYh6d9SiC8EqPV+jSoIuWcQqgNXkvsXmz7QCsVSt45x2oVcvdLVKq7GVLNp/9+RmjF40mLSONfs37\ncX3L67ms4WV6ktZFa/JuUNq1uZ9+gksugf/7P9u9QHkmeCfWGYvLW2Lxljjg3LH4GB/uuPAO1g1d\nx+zbZxMeGM6Tc58k7JUw7pp2FwsTFjqmCwUnvi/6Nehwf/8NTz8Ny5fbSyO7dHF3i5RyD2MMLWu3\npGXtljzV9Sl2HN7B1/FfM3TGUNIz0+nXoh/dGnajS4MuBFcLdndzHeOc5RpjTH3gE6AOIMB/ReQt\nY0wI8BXQEEgAbhaRQ67njATuArKAYSIy27W8AzARqArMEJHhBexPyzVAQgI8+6zt2vfJJ2HIEL0c\nUqmCiAjLE5fnDl34287faBzcmG4Nu3FZw8u4rOFl1Pav7e5mlqkS1eSNMWFAmIisMsYEAMuBfsCd\nwH4RGWuMeRIIFpERxphWwOfAxUA9YC7QVETEGLMUeFBElhpjZgBvicjMU/ZXoZP8gQO2B8j//Q8e\nfBD+8Q8dB1WposjIymB54nIWJizk5+0/8+v2XwkNCCUqKIq6gXVpWKMhLWq1oHmt5jSr2Yzqlau7\nu8klVqonXo0x3wHvuKZuIpLk+iKIE5EWrqP4bBF52bX+TOA5YBswX0RaupbfAsSIyP2nbN8rknxc\nXBwxMTGFXj87214KOWoU3HSTPYqv7ZCDj6LG4mTeEou3xAFlH0tWdhbx++LZcWQHu47sIuFQAusO\nrGP9/vVsTt5MHf86NA1pSoMaDWhYoyHREdF0adAF/yr+Rd6Xu96XsyX5ItXkjTGRQDvgdyBURJJc\nDyUBoa77dYHf8j1tJ/aIPsN1P8cu1/IKLTvb/nhp9Ghbjpk5E9q1c3erlPIevj6+tA5tTevQ1qc9\nlpWdRcKhBDYd3MSOIzvYkryF0YtGsyJxBe3D23N51OVcHnU50RHRHtt7ZqGTvKtU8y0wXERS8l+r\n6irFlNrhd2xsLJGRkQAEBQXRtm3b3G/HnLPX3jA/bx7cdVcc1avD2LEx9OkDCxfGERfnjPbln8/h\nlPYUdz5nmVPao/N2Pkd573/Rz4sA6BXTK/fxnlE9uXjgxfy641c+nvoxX/z4Bbtr7uaS+pfQ6FAj\nukd25+Y+Nxe4vZxl5fF6xcXFkZCQwLkUqlxjjKkM/Aj8JCJvuJatw5Zb9hhjwoEFrnLNCAARGeNa\nbybwLLZcsyBfueZWbLnHK8s1Z5OcbLv4nT0b3n3X9gqpv+9QyrmSjyXz87af+WHDD0xdN5XWdVpz\nW+vbuLHVjYRUc//PzEt0nbyxh+zjgficBO/yPTDYdX8w8F2+5bcYY6oYY6KApsBSEdkDHDHGdHJt\nc1C+53idU49QwCb355+HZs3suKh//QV9+zo/wRcUi6fylli8JQ7wjFiCqwVzXYvrGHftOHY/uptH\noh9h7pa5RL0ZRZ/P+/DhHx+y4/AOR8ZSmHLNpcDtwJ/GmJWuZSOBMcBkY8zduC6hBBCReGPMZCAe\nyASG5Ds0H4K9hLIa9hLKk66s8VZZWbYLgn//G667zvYz06yZu1ullCoOv0p+XNfiOq5rcR1Hjh9h\n+obpTN84nVHzR1FjTw0GZA3gmqbXEB0Rja+Pr7ubq90alLW//oJ77rFH7h99pMldKW+VlZ3Fbzt/\nY/pGm/R3HdlFrya96FK/C81rNadFrRaEB4SXSd872neNGxw/Di++CO+9Z4fZu/deHbBDqYpkx+Ed\nzNg4gz92/8H6A+tZf2A9xzKO0bxWcy6JuITbWt9Gx3odSyXpa5IvZ+vWQa9ecbRtG8N770E9D79Q\nNP/VAp7OW2LxljigYsWSfCyZdfvXMWfLHP635n8czzxOu/B2NAtpRtOaTWlWsxnNazYnNCD0jNso\nSKldJ6/ObeNGuPJKGDAAXnnF+SdVlVLlJ7haMJ3rd6Zz/c48c9kzrN2/lvh98Ww4sMFesrnqY9bu\nW0ut6rXo0agHPRr3oHtkd2pUrVHsfeqRfCnavBm6d7e/Vr37bne3RinlibIlmz+T/mTO5jnM2TKH\nJTuX0LpOa5rWbEqofyh1/Ovk3gb6BVK1UlU61O2g5ZqylpAAMTEwYgTcf/+51lZKqcI5lnGMJTuX\nsO3QNvam7iUpNYmk1CT2pu4l9UQq6ZnprLx/5RmTPCLiqMk2ybNs2yYSFSXy9tt5yxYsWOC29pQ2\njcV5vCUOEY2lNLjyZoE5Va/3KKHdu+Hyy+Ghh2yvkUop5SRarimB1FS47DI7JN/TT7u7NUqpikov\noSwD2dlw4422r/ePP9araJRS7qNjvJaBUaNg/3748MOCE7wT+7AoLo3FebwlDtBYyppeJ18MkybB\n5Mnw+++2uwKllHIqLdcU0aJFcMMNsHAhtGzp7tYopZSWa0rN5s12aL7PPtMEr5TyDJrkC2n3bujZ\nE557zt6eixNrc8WlsTiPt8QBGktZ0yRfCAcPQq9etqsC/TWrUsqTaE3+HI4dsz926tIFxo7VSyWV\nUs6j18kXkwjExtq+4b/4QhO8UsqZ9MRrMb39NqxeDePHFz3BO7E2V1wai/N4SxygsZQ1vU7+DObP\ntyM7LVkC/v7ubo1SShWPlmsK8OefduCPr76y/cMrpZSTabmmCLZvh2uusaUaTfBKKU+nST6f5GTo\n3Rv+8Q87fF9JOLE2V1wai/N4SxygsZQ1TfIuGRn216w9e8LDD7u7NUopVTq0Jo+9VPKBB2DnTpg2\nDXx9y3X3SilVImeryevVNcA778Cvv9pJE7xSyptU+HLNwoUwejR8/70dAKS0OLE2V1wai/N4Sxyg\nsZS1Cp3kd+yAW2+FTz+FqCh3t0YppUpfha3JHz9ux2ft3x+efLLMd6eUUmWmRNfJG2MmGGOSjDFr\n8i0LMcbMMcZsMMbMNsYE5XtspDFmozFmnTGmZ77lHYwxa1yPvVnSoErq8cehXj144gl3t0QppcpO\nYco1HwO9T1k2ApgjIs2Aea55jDGtgAFAK9dz3jMmt9eX94G7RaQp0NQYc+o2y82338KPP8KECWXX\n6ZgTa3PFpbE4j7fEARpLWTtnkheRRUDyKYuvBSa57k8C+rnuXwd8ISIZIpIAbAI6GWPCgUARWepa\n75N8zylXW7bYyyW/+gqCgs69vlJKebJC1eSNMZHADyLS2jWfLCLBrvsGOCgiwcaYt4HfROR/rsfG\nAT8BCcAYEenhWt4VeEJE+hawrzKryWdl2X7hBwzQHzwppbxHmfZd48rIzjp7ewavvgrVqsGwYe5u\niVJKlY/i/hgqyRgTJiJ7XKWYva7lu4D6+daLAHa6lkecsnzXmTYeGxtLZGQkAEFBQbRt25aYmBgg\nr+ZV1Pk6dWIYOxbeeSeOn38u+vOLOp+zrKy2X57zq1at4mHXvz5OaE9J5t94441S+Ty5ez5nmVPa\no58vO19en6+c+wkJCZyTiJxzAiKBNfnmxwJPuu6PwJZiwJ5wXQVUAaKAzeSVhH4HOgEGmAH0PsO+\npLRlZIhcdJHIBx+U+qbPaMGCBeW3szKmsTiPt8QhorGUBlfeLDB/n7Mmb4z5AugG1AKSgH8C04DJ\nQANsvf1mETnkWv8p4C4gExguIrNcyzsAE4FqwAwRKbBoUhY1+ZdesoOAzJ6tQ/gppbxPhR7jdc0a\nOxD38uXQoEGpbVYppRyjwg4akpFhB+IeM6b8E3z+2pmn01icx1viAI2lrHl1kn/pJahTB+66y90t\nUUop9/Dacs2SJXD99bZMU69eKTRMKaUcqsKVaw4fhoED4YMPNMErpSo2r0vyOaM89eoF/dzScYLl\nxNpccWkszuMtcYDGUta8bmSoTz+F1ath2TJ3t0QppdzPq2rymzZB584wbx60aVPKDVNKKYeqEDX5\nEyfsKE///KcmeKWUyuE1SX7UKAgLgwcfdHdLLCfW5opLY3Eeb4kDNJay5hU1+Xfege++g8WLtdsC\npZTKz+Nr8p9/bsdoXbQIXB1XKqVUhXK2mrxHH8lPnQqPPGJPtGqCV0qp03lsTf6//4WhQ2HGDLjg\nAne35nROrM0Vl8biPN4SB2gsZc0jj+RHj4bx42HhQmja1N2tUUop5/K4mvxLL8Enn9j+4cPDy7Fh\nSinlUF5Tk3/zTXsE//PPmuCVUqowPKYm//778Npr9iRr3brubs25ObE2V1wai/N4SxygsZQ1jziS\nf/11eOstWLAAGjZ0d2uUUspzOL4mP3asvZJm/nwdvk8ppQrisWO8xsXB7bfD0qWeUaJRSil38MgO\nytLS4J57bC3eExO8E2tzxaWxOI+3xAEaS1lzbJJ/9lm4+GLo29fdLVFKKc/lyHLNsmXCNdfAmjV2\nIG6llFJn5nE1+e7dhVtvhXvvdXdrlFLK+TyuJr9tG8TGursVJePE2lxxaSzO4y1xgMZS1hyZ5J95\nBipXdncrlFLK8zmyXJORIVTyiJ9pKaWU+3lcuUYTvFJKlY5yT/LGmN7GmHXGmI3GmCfLe//lxYm1\nueLSWJzHW+IAjaWslWuSN8b4Au8AvYFWwK3GmJanrbhqFWRlld6OMzLg6NHS214hrFq1qlz3V5Y0\nFufxljhAYylr5V0Y6QhsEpEEAGPMl8B1wNqT1rr1Vti/H665Bq66yo7OffAgiEBERN5Uq5ZdtmUL\nrF6dN23dahN7ZiYkJ8Phw/ZMbnAwnH9+3tSkCfj62i+U7OzTbzMyID0djh+3t+npdnvbt8OePXbE\nkuhoe3vihF2/QQOIjOTQoUPl/NKWHY3FebwlDtBYylp5J/l6wI588zuBTqettXatvY7y++/tSN2V\nK0PNmvax6dNhxw7YudP2feDrax+78EI7DRpkk7efny3uBwXZx42xyfnvv+Gvv+zI35Mm2W36+Njt\nnHpbqRJUrWonPz97W6MGXHophIbCunXw7beQkGAfr1zZfsEkJ0NAgO10p04d+zyAKlXsl1ODBvb+\nsWN2Sk/Pu3/smP1Sydl/Sgrs3g1799pt+PrabYeHQ1hY3m1QkI0RoHp1CAzMmwIC7DYPH86bjhyx\nbQgOzpsCAyE11cawa1de/ElJdgoOtrElJdkvwpo17X7T0207jxyxt6mpec+tVu3kqWpV+1hqat56\nAQH2NjnZfplXq2b7sjjvPBuTiN3H4cP29QkLs+s4zYkTto1ZWeDvb9uYlmaXpaXZg46MDPsaHT5s\n4woPt9N55+V9Zs/0uUhPt8/x9bV/A8uW2fVF7D4zM/OmUw9Qcj5TtWpB7dp5t9Wq5X0esrPtZz9n\nMsY+LzXV3lapYtuYc7CTnAyHDtnbrKy89zs4GEJC7OfQnHIuUMTuJ2cSgU2bYMoUu5+c+Ar6m6xU\nye7fz8/O5zw/Z1vVq9vPZI0adnlmpn2Ov799zKcIhYvsbBtzTpuPHs37/OX8bebc5nzW/fxsDOvW\n2ff41L/DSpXs65Tz2U9Nta9l7dp2Kkr7iqC8k3zhL+Vp2BAeeshOZ5KWZj/MNWoUfpsNG8LVVxe6\nGWd1pj4XkpNJGDQI7r/fJucTJ+zy9PS8P878fxQ5ya9aNfthqFnTfrAyMmxCi4mxXxY+PvaDe+SI\nTbSJifZLa948+8cG9nlpafZDln/y87OvU8503nl2+zl/rMnJ9gNcpQpERUG9evaDnp5Owl9/2S+z\ngwftH2+dOvYDfvCg3W/Vqid/mP39bXwFJav0dBtDzh9eZqb9sGdk5CWH9HT7JXP8eF4Cq1LFtjvn\nS+e88+y6lSrZ1+XECbt+drb9sq1SJW/y9c1NBAlbtsC0aacniJz5nD/eY8fsH/XRo3b/+dcVsckr\nf0LMaWdQkH1+aqp9H/z9bbv9/fMSQkBA3pdyYqL9Ej961LY/M7Pgz0XOfddnIGH9evjzT/u6GZO3\n7Zz2V658+gFKRob9D3nfvrzbY8fyPg/5XqfceP38bNv9/PJeYz+/kw8OgoLsfnPe5+RkOHDA3i9I\nTtJ2vYYJ69fb1yrnS6Gg/6pzvsRyvrTg5PfAGPua53wmfX3tlPP5OnbMvob+/nkHGr6+eTFlZeW1\nKzXVvh+VK+d9SeW8j9Wr2/ac+oV6/DgcP05CRoY9OA0MPP3vMKc9/v55k5+ffS8OHbKfizPFnvNe\nVK9e8HQW5XoJpTEmGnhORHq75kcC2SLycr51nHVNp1JKeQBHdGtgjKkErAeuAHYDS4FbRWTtWZ+o\nlFKqWMq1XCMimcaYB4FZgC8wXhO8UkqVHcf94lUppVTpccwvXj35R1LGmPrGmAXGmL+NMX8ZY4a5\nlocYY+YYYzYYY2YbY4Lc3dbCMMb4GmNWGmN+cM17ahxBxphvjDFrjTHxxphOHhzLSNfna40x5nNj\njJ+nxGKMmWCMSTLGrMm37Ixtd8W60ZUPerqn1ac7Qxz/cX2+VhtjphhjauR7zBFxOCLJF/pHUs6V\nATwiIucD0cBQV/tHAHNEpBkwzzXvCYYD8eRdDeWpcbwJzBCRlkAbYB0eGIsxJhK4F2gvIq2xpc5b\n8JxYPsb+bedXYNuNMa2AAdg80Bt4zxjjiDxFwXHMBs4XkQuBDcBIcFYcTnnxcn8kJSIZQM6PpDyC\niOwRkVWu+0exP+6qB1wLuC7GZxLQzz0tLDxjTARwNTAOyDlb74lx1AC6isgEsOeDROQwHhgLcAR7\nIFHddfFCdeyFCx4Ri4gsApJPWXymtl8HfCEiGa4fTW7C5ge3KygOEZkjItmu2d+BCNd9x8ThlCRf\n0I+k6rmpLSXiOupqh33DQ0UkyfVQEhDqpmYVxevA40B2vmWeGEcUsM8Y87ExZoUx5iNjjD8eGIuI\nHAReBbZjk/shEZmDB8aSz5naXhf795/Dk3LBXcAM133HxOGUJO8VZ3+NMQHAt8BwEUnJ/5jYM9yO\njtMY0wfYKyIryTuKP4knxOFSCWgPvCci7YFUTilneEosxpjGwMNAJDZ5BBhjbs+/jqfEUpBCtN3x\ncRljRgEnROTzs6zmljickuR3AfXzzdfn5G9BxzPGVMYm+E9F5DvX4iRjTJjr8XBgr7vaV0iXANca\nY7YCXwCXG2M+xfPiAPv52Skiy1zz32CT/h4PjOUiYLGIHBCRTGAK0BnPjCXHmT5Tp+aCCNcyxzLG\nxGJLnAPzLXZMHE5J8n8ATY0xkcaYKtgTFt+7uU2FZowxwHggXkTeyPfQ98Bg1/3BwHenPtdJROQp\nEakvIlHYE3vzRWQQHhYH2PMkwA5jTDPXoiuBv4Ef8LBYsCeMo40x1VyftSuxJ8Y9MZYcZ/pMfQ/c\nYoypYoyJAppifzTpSMaY3tjy5nUikp7vIefEISKOmICrsL+G3QSMdHd7itj2Ltga9ipgpWvqDYQA\nc7Fn3WcDQe5uaxFi6gZ877rvkXEAFwLLgNXYo98aHhzLE9gvqTXYE5WVPSUW7H+Fu4ET2HNvd56t\n7T7s4HEAAABaSURBVMBTrjywDujl7vafJY67gI3Atnx/9+85LQ79MZRSSnkxp5RrlFJKlQFN8kop\n5cU0ySullBfTJK+UUl5Mk7xSSnkxTfJKKeXFNMkrpZQX0ySvlFJe7P8B+dYWYA4ZXO8AAAAASUVO\nRK5CYII=\n",
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYAAAAD8CAYAAAB+UHOxAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzt3Xd4HNW5+PHv2V3trnqXXCTZsi33btnGphkbbIqpAWIg\nwZRcSAPSCHCTeyGEBLjkpkAIhPwgQK6DARNKiKm2iQEbcO+9y5LVe1ltOb8/zkiWwbZkWdIWvZ/n\nmWe1Z2dnz+ys5p1T5hyltUYIIUTvYwt2BoQQQgSHBAAhhOilJAAIIUQvJQFACCF6KQkAQgjRS0kA\nEEKIXkoCgBBC9FIdCgBKqSSl1CKl1Hal1Dal1DSlVIpS6gOl1C7rMdlaVymlHldK7VZKbVRKTWyz\nnfnW+ruUUvO7a6eEEEK0r6MlgD8A72qthwPjgG3AvcASrXUesMR6DnARkGcttwFPASilUoD7ganA\nFOD+lqAhhBCi56n27gRWSiUAG4BBus3KSqkdwAytdZFSqi/wkdZ6mFLqz9bfL7Vdr2XRWt9upR+z\n3vGkpaXpgQMHnsbuCSFE77NmzZoyrXV6e+s5OrCtQUAp8Fel1DhgDXAXkKm1LgKwgkCGtX5/4FCb\n9xdYaSdKP6GBAweyevXqDmRRCCFEC6XUgY6s15EqIAcwEXhKaz0BqOdodc9xP/s4afok6ce+Wanb\nlFKrlVKrS0tLO5A9IYQQndGRAFAAFGitP7eeL8IEhGKr6gfrsaTN+tlt3p8FFJ4k/Rha62e01vla\n6/z09HZLMEIIITqp3QCgtT4CHFJKDbOSZgFbgbeAlp4884E3rb/fAm60egOdAVRbVUXvAbOVUslW\n4+9sK00IIUQQdKQNAOAOYIFSygnsBW7GBI9XlFK3AgeBa6x1FwMXA7uBBmtdtNYVSqlfAqus9R7U\nWld0yV4IIUKC1+uloKCApqamYGelV3C73WRlZREVFdWp97fbCyiY8vPztTQCCxE+9u3bR3x8PKmp\nqSh1vGY/0VW01pSXl1NbW0tubu4xryml1mit89vbhtwJLIToMk1NTXLy7yFKKVJTU0+rtCUBQAjR\npeTk33NO97sO7QBQcxgCgWDnQgghIlJoB4C6Evj86WDnQggRRux2O+PHj2fcuHFMnDiRFStW9Hge\nbrrpJhYtWvSVdK01Dz30EHl5eQwdOpTzzjuPLVu2tLu9N954g61bt3Z5PkM7ALgT4YP/hsL1wc6J\nECJMREdHs379ejZs2MDDDz/Mfffdd0rv9/v93ZQzePLJJ1mxYgUbNmxg586d3HfffVx22WXt1uP3\nzgCQlAOx6bDoFvDUBTs3QogwU1NTQ3KyGXPyo48+Yu7cua2vff/73+f5558HzLAzDz74IGeddRav\nvvoqM2bM4J577mHKlCkMHTqUjz/+GDDB4e6772by5MmMHTuWP//5z4C5sv/+97/PyJEjueSSSygp\nKeF4Hn30UZ544gliYmIAmD17NtOnT2fBggUAxMXFta67aNEibrrpJlasWMFbb73F3Xffzfjx49mz\nZ0+XfT8dvQ8gOGwOuOoJeOFSeOencMWfgp0jIUQH/eKfW9haWNOl2xzZL4H7Lx110nUaGxsZP348\nTU1NFBUVsXTp0g5t2+1288knnwDw9NNP4/P5+OKLL1i8eDG/+MUv+PDDD3n22WdJTExk1apVeDwe\nzjzzTGbPns26devYsWMHmzZtori4mJEjR3LLLbccs/2amhrq6+sZPHjwMen5+fknrQaaPn06l112\nGXPnzuXqq6/u0L50VGgHAIDcs+Gcn8Dyx2DI+TD6qmDnSAgRwlqqgABWrlzJjTfeyObNm9t939e/\n/vVjnl91lTnXTJo0if379wPw/vvvs3Hjxtb6/erqanbt2sXy5cu57rrrsNvt9OvXj5kzZ3Y4v1rr\noPWcCv0AAHDuPbBnGfzzB5A1GZKy23+PECKo2rtS7wnTpk2jrKyM0tJSHA4HgTa9Cr9c7x4bG3vM\nc5fLBZhGZZ/PB5iT9RNPPMGcOXOOWXfx4sXtnsQTEhKIjY1l7969DBo0qDV97dq1nHvuucCx3Tp7\n4m7q0G4DaGGPgq/9BbQfXr8dAt3XSCOEiBzbt2/H7/eTmprKgAED2Lp1Kx6Ph+rqapYsWXLK25sz\nZw5PPfUUXq8XgJ07d1JfX88555zDwoUL8fv9FBUVsWzZsuO+/+677+bOO++ksbERgA8//JBPPvmE\n66+/HoDMzEy2bdtGIBDg9ddfb31ffHw8tbW1p5zf9oRHCQAgZRBc/Bi88R349Pdw9o+DnSMhRAhq\naQMAc8X+wgsvYLfbyc7O5tprr2Xs2LHk5eUxYcKEU972t771Lfbv38/EiRPRWpOens4bb7zBlVde\nydKlSxkzZgxDhw5tvaL/sjvuuIPKykrGjBmD3W6nT58+vPnmm0RHRwPwyCOPMHfuXLKzsxk9ejR1\ndabzy7x58/iP//gPHn/8cRYtWvSVdoTOCq+xgLSGV2+C7f+C25ZBnzFBy5sQ4qu2bdvGiBEjgp2N\nXuV433lkjgWkFFzyW4hOhte/DT5PsHMkhBBhK7wCAEBsKlz2BBRvho8eCXZuhBAibIVfAAAYdiFM\n+IZpC9jTsT6+QgghjhWeAQBgzsOQPgJe/iYUbQh2boQQIuyEbwBwJ8A3FoE7CRZcA5X7g50jIYQI\nK+EbAAAS+sE3XgNfkwkCTV1727kQQkSy8A4AABnD4esLoHwP/PNO01VUCNFrtQwH3bLs37+f1atX\nc+eddwY7ayEnfG4EO5ncs2HWf8GHD0DONJh6e7BzJIQIkrZjAbUYOHAg+fntdovvdcK/BNBi+l0w\n9EJ472dQIBPJCyGOajsU9AMPPMAtt9zCjBkzGDRoEI8//njrev/3f//HlClTGD9+PLfffnu3zg0Q\nCiKjBABgs8EVT8Gfz4V//Ad8+1NwxgQ7V0L0Xu/cC0c2de02+4yBi05+/0/boSByc3OPGVOnxfbt\n21m2bBm1tbUMGzaM73znO+zevZuXX36ZTz/9lKioKL773e+yYMECbrzxxq7dhxASOQEAICYFLv8j\nvHgZLPsVzPlVsHMkhOhhx6sC+rJLLrkEl8uFy+UiIyOD4uJilixZwpo1a5g8eTJgAklGRkZPZDlo\nIisAAAw6FybdDJ/9CUZdCVlS7ydEULRzpR5MLUM9w9HhnrXWzJ8/n4cffjiIOetZkdMG0NYFD0J8\nX3jzezJekBCiQ2bNmsWiRYtap3OsqKjgwIEDQc5V94rMAOBOgEsfh9Lt8O690jVUCNGukSNH8tBD\nDzF79mzGjh3LBRdcQFFRUbCz1a06NBy0Umo/UAv4AZ/WOl8plQK8DAwE9gPXaq0rlZnS5g/AxUAD\ncJPWeq21nfnAz63NPqS1fuFkn/uV4aBP1Qf3m/GC5jwM077b+e0IITpEhoPueT01HPR5WuvxbTZ6\nL7BEa50HLLGeA1wE5FnLbcBTVoZSgPuBqcAU4H6lVPIpfP6pm3U/jLgM3vtP2L64Wz9KCCHCzelU\nAV0OtFzBvwBc0Sb9RW18BiQppfoCc4APtNYVWutK4APgwtP4/PbZbHDln6HfBHjtVijd2a0fJ4QQ\n4aSjAUAD7yul1iilbrPSMrXWRQDWY0t/qf7AoTbvLbDSTpR+DKXUbUqp1Uqp1aWlpR3fkxNxxsC8\nv4PDbe4P8DWf/jaFECICdDQAnKm1noip3vmeUuqck6yrjpOmT5J+bILWz2it87XW+enp6R3MXjsS\n+sJlj0PRevj3o12zTSGECHMdCgBa60LrsQR4HVOHX2xV7WA9llirFwDZbd6eBRSeJL1njLjUTCLz\nyW/h4Gc99rFCCBGq2g0ASqlYpVR8y9/AbGAz8BYw31ptPvCm9fdbwI3KOAOotqqI3gNmK6WSrcbf\n2VZaz7nwEUjKgVfmQ8n2Hv1oIYQINR0pAWQCnyilNgBfAP/SWr8LPAJcoJTaBVxgPQdYDOwFdgN/\nAb4LoLWuAH4JrLKWB620nuOKh3kvARr+ehEcXtOjHy+E6H4FBQVcfvnl5OXlMXjwYO666y6am0+t\n7W/GjBkMGzasdUjpRYsWATB9+vTuyHLQdOg+gGA57fsATqRiL7x4BTSUww2vwoDIOqhCBEuw7wPQ\nWjN16lS+853vcPPNN+P3+7nttttISUnhscce6/B2ZsyYwW9+85sODSHt9/ux2+2nk+3T0lP3AUSO\nlEFwy3tmuIiF18t0kkJEiKVLl+J2u7n55psBM87P7373O5577jkaGhp4/vnnueqqq7jwwgvJy8vj\npz/96SltPy4uDjDDS5933nlcf/31jBkzBgjPoaQjbzC4jkroC9e/DH85DxbeALe+D87YYOdKiIjx\n6BePsr2ia9vahqcM554p95zw9S1btjBp0qRj0hISEsjJyWH37t0ArF+/nnXr1uFyuRg2bBh33HEH\n2dnZX9nWDTfcQHR0NABLliwhNTX1mNe/+OILNm/eTG5uLtu2bQvLoaR7bwAASB0MVz9n5hN+47tw\nzfOgjtdbVQgRDrTWqOP8D7dNnzVrFomJiYAZ/+fAgQPHDQALFiw4aRXQlClTyM3NBQjboaR7dwAA\nGHI+nP8AfPDfsPo5mHxrsHMkREQ42ZV6dxk1ahSvvfbaMWk1NTUcOnSIwYMHs2bNmuMOBd0ZsbFH\nawzCdSjp3tkG8GXT74Tcc8ycwjWRPfqfEJFs1qxZNDQ08OKLLwKmgfbHP/4xN910EzEx3TdDYLgO\nJS0BAEy1z9zfg78Z3jm1RiEhROhQSvH666/z6quvkpeXx9ChQ3G73fz617/u1s8N16Gke2c30BP5\n+H9hyYNm7KDhl/Tc5woRIYLdDbQ3km6gXWX6nZAxCv71E2iqDnZuhBCiW0kAaMseBZc/AXVH4L2f\nBTs3QgjRrSQAfFn/SXDmXbDub7Drw2DnRoiwE8rVypHmdL9rCQDHc+69kD4c/nmnVAUJcQrcbjfl\n5eUSBHqA1pry8nLcbnentyH3ARxPlBsu/xM8ez68cw9c+XSwcyREWMjKyqKgoIAumcxJtMvtdpOV\nldXp90sAOJGsSXD2T2D5/0DONJg0v/33CNHLRUVFtd4dK0KfVAGdzIx7YdB5sPhuKFwX7NwIIUSX\nkgBwMjY7fO1ZiE2HV26Ehp6dvkAIIbqTBID2xKbCtS+aISIW/yTYuRFCiC4jAaAjsibBOXfD5tdg\nZ8/OYimEEN1FAkBHnfVDSB8Bb/8IPLXBzo0QQpw2CQAd5XDCZY9DzWFY+lCwcyOEEKdNAsCpyJ4C\nU/4DPv8zbH0r2LkRQojTIgHgVJ3/AGRNhkW3SHuAECKsSQA4Vc5Y+MYi6DMaXv4m7Fka7BwJIUSn\nyJ3AneFOhG/8A164FP4+zwwVMfqqYOdKCBEBtNYU13jYW1rHgYoGqhq81DR5qW70UtXQTEV9Mw3N\nfpp9Abz+AL6Axh/QBAIav9b4Ax3/LAkAnRWTAvP/CS9dB4tuhqqDZhRRmVReCNFBPn+AHcW1rD9U\nxebDNew4UsPO4jrqPMfOU+ywKRKjo0iKiSI5xklKrJMouw2n3YbDrrArhc129HFNBz9fAsDpiEmB\nG9+EN74NH94PJVvhov+B6KRg50wIEWIq6pvZfqSG7UW17DhSy/biWnYeqaXR6wcgMTqKYX3iuWpi\nf/Iy4hiUHseA1BhSY124o2yoU7i4fKSD63U4ACil7MBq4LDWeq5SKhdYCKQAa4Fvaq2blVIu4EVg\nElAOfF1rvd/axn3ArYAfuFNrHf6tqFFu+NpzkDYMlj8G+5bDpY/D0NnBzpkQooc1ef0crmrkYHkD\n+8vr2VdWz67iOnaV1FFW52ldLyXWybDMeOZNyWZ8dhITspPJTok+pZN8VziVEsBdwDYgwXr+KPA7\nrfVCpdTTmBP7U9ZjpdZ6iFJqnrXe15VSI4F5wCigH/ChUmqo1trfRfsSPDYbnHcfDJ0Db3wX/n4N\nTPgGzHkY3Antv18IEXaavH7WHqzksz3lfL6vgv3l9RTXeI5ZJ87lYEhGHOcNS2doZjzD+8YzrE88\n6XGuHj/ZH0+HAoBSKgu4BPgV8CNlcj4TuN5a5QXgAUwAuNz6G2AR8Edr/cuBhVprD7BPKbUbmAKs\n7JI9CQX9J8Lt/4aPHoFPfw97l8MVT0LuOcHOmRDiNDT7AuwvN1fzmwurWbWvgo0F1TT7A9gUjO6f\nyDl56WQlx5CVHM3AtBhyUmJJi3OGxIn+RDpaAvg98FMg3nqeClRprVtaKgqA/tbf/YFDAFprn1Kq\n2lq/P/BZm222fU/kcLjg/Pth2MXw+u3wwmVw3n+auQVs0utWiFBXXNPExoJqNh2uZueRWnaV1LK/\nvAF/wMxy5rApxmQlcvOZA5mSm8Lk3BQS3FFBznXntBsAlFJzgRKt9Rql1IyW5OOsqtt57WTvaft5\ntwG3AeTk5LSXvdCVPRm+/Qm8/UNY9isoWA1X/Rmik4OdMyGEpbrBy8q95Xyxr4LtR2rYcaSW8vpm\nAGwKBqbFkpcRx4Wj+zA0M568jHgGpcfijrIHOeddoyMlgDOBy5RSFwNuTBvA74EkpZTDKgVkAYXW\n+gVANlCglHIAiUBFm/QWbd/TSmv9DPAMQH5+fnhPLOqMMfcIZOXDu/fBU2fB3N9JA7EQQRIIaDYd\nrmbp9hI+2lnKpoIqAhrcUTaGZcYza0QGI/omMDYrkRF9E4hxRnZHSXUqkzdbJYCfWL2AXgVea9MI\nvFFr/Sel1PeAMVrrb1uNwFdpra9VSo0C/o6p9+8HLAHyTtYInJ+fr1evXt35vQslBWvgze9C6XYY\ncw1c+KiZa0AI0a38Ac36Q5X8c0MRizcVUVLrQSkYn53EOXnpnJWXxrisJJyOyKmiVUqt0Vrnt7fe\n6YS3e4CFSqmHgHXAs1b6s8DfrEbeCkzPH7TWW5RSrwBbAR/wvYjoAdRRWZPg9uXwye9g+W9g/ydw\n1TPSQCxEF9Nas/5QFR9uK2bdwSo2FlRT5/HhdNg4b1g6F43uyzlD00mJdQY7q0F3SiWAnhZRJYC2\njmyCV2+G8t1wzk/g3HvBHtlFTSG624HyehZ8fpC3NxRSWN2E3aYY0TeeCdnJ5A9MZubwDOLDtLH2\nVPVECUB0Vp8xprvo4p+am8f2fwpf+3+QGHmdooToTs2+AB/vKuXvnx9k6Y4S7EoxY1g6P5kzjFkj\nMkmM7h0n/M6SABAszlhzj8Cgc+GfP4CnzzJVQnkXBDtnQoQ0rz/Aij3lvL2hkPe2HKGmyUdanJM7\nZuZxw9QcMhPcwc5i2JAAEGxjr4V+E+HVm2DBNXDez+DsH8s9A0K0UdXQzMo95SzfVcq7m49Q2eAl\nzuVg9shMLh3XjzOHpEVUI25PkQAQCtKGwLc+MCWBZQ/BkQ1wxVPgim//vUJEINM/v4zP91Wwan8F\nWwpr0BpinXZmjshk7ti+nDs0PWL64weLBIBQERVt7hnoOxbe/zn8YTzk3wz5t0JC32DnTohu1+T1\ns2hNAe9uPsJne8vxBTQuh40JOUncOTOPs/PSGJedRJRdrvS7ivQCCkWHvjDdRXe8Aza76So6fC4M\nvwTi+wQ7d0J0uY0FVfzolQ3sLqkjNy2W2aMyOX9EJmOzEnE55Cr/VHW0F5AEgFBWsQ/WPA/b3oKK\nvaBsMPIKmH6HGXhOiDDX5PXzp2W7efKjPaTHuXjka2M4d2h6SA+gFg4kAEQSrc0dxOsXwJoXwFMD\n2WfA+Oth1BVmikohwsxHO0q4/60tHChv4MoJ/Xng0lEkxki3za4gASBSNdXA2hdh7QtQthPsLkgf\nBrHpEJcBidmQlAMJ/cCVAK44EyCiU8zkNUIEUVVDM+9tOcKb6wtZsaecQWmx/OLyUZydlx7srEUU\nCQCRTmsoXAubXjN3FNeXQl0x1BaBPsGs0FGxEJMKMckmIMSkmmkt4zIgORdSBkHyQDNiqRTBRRcp\nqWni/a3FvL+1mJV7yvD6NTkpMVw3JYdbzhoodfzdQO4EjnRKQf9JZmnL1ww1h00g8NRBcy00VUND\nhVkarceGcqg6YP5uqjp2G854EwgGTIe82TDwLCk9iFNSWNXIu5uP8M7mIlYfqERrGJAawy1n5nLJ\n2L6M6Z8o9fwhQAJApHE4ISXXLB3VXA+V+02jc9UBqDpoqpfWvghf/BmiYmDQeTDsQhhygXRLFcdV\nUNnAO5uOsHhzEesOmouK4X3i+cGsodZ4+nFy0g8xEgCEGZYic5RZ2vI2mlFLd74LO96FHf8y6al5\nkHu2mfVs0AywS8Ndb3WwvIEPthXz9sbC1pP+qH4J3D1nGBeN7sOg9Lgg51CcjLQBiI7R2oxiuvcj\n2P8xHFhpqpdi0mDk5TD4PMiZBrFpwc6p6EYNzT4+31fBp7vKWLajhD2l9QCM7JvA3HF9mTumHzmp\nMUHOpZBGYNG9fB7YvQQ2vWJKB75Gk56aB/0mmCX3HOgzOrj5FKclENBsKaxh+a5Slu8sZe3BSrx+\njdNuY+qgFM4blsF5wzPITYsNdlZFG9IILLqXwwXDLzaLzwOF6+HAp1CwylQbbXrFrJcxEsZcDeOu\nM11TRUgLBDQ7S2pZuaecz/aW8/m+CqoavICp2rnlrFzOGpJG/oAUop3SeyfcSQlAdI+aItj+Nmxa\nBIc+A2WHoRfCpJtMu4FDZmMKFfUeH8t2lPDu5iOs2FNOhTUpenZKNNMGpTJtcCpnDUknPd4V5JyK\njpIqIBE6KvaaHkXr/s/cr+BKhKFzzNwHWfnmHgTpHdJjSmqbWLG7nC2F1WwprGHNgUo8vgBpcS7O\nGZrWetLPSpa6/HAlAUCEHl8z7FkC296GHYvNPQlgbkjrPwn655u5k/uOl8bkLra3tI4PtxXz3pZi\n1h40/fKdDhvD+8QzMSeZi0b3IX9gCnabBOJIIG0AIvQ4nDDsIrP4fVCyFQ6vhoI1cHgN7PoAsC5I\n4vuaQDDwTBh4tplG0yZ1zh1V3ehl1b4KVu4tZ9n2EvaWHe2t84NZQ5k1IoPhfeJxyNDKvZoEABEc\ndoeZ+6DvWMi/xaQ11UDReijaaLqcFqyCne+Y11yJkDPVdDUdcKbpZSTtCAD4A5p9ZXVsOlzN2gNV\nrDlQyfYjNQSsq/ypuSnMnz6QWSMypFpHHEMCgAgd7gTTdTT3nKNpNYWmV9GBT829B7veN+kON2RN\nhtFXwagrzfhFvYTPH2BDQTUf7yrlk11lbC6spslrxn+KddqZkJPMHTPzmDY4lfHZSTJrljghaQMQ\n4aW+DA6uNMFg9wfWiKhOGDzLVBcNmA59xkbU3cnVjV7WHzJX9msOVLD+YBX1zX6UgrFZSUzKSWZU\nvwRG9ktgaGa81OMLaQQWvYDWpspow0JTMqjYa9IdbtN+kJVvAsKA6WFTQvD4/Gw4VM3Ggiq2Ftaw\nubCaXSV1aA02BcP7JDBpQDJTclM4a0gaybFSDSa+SgKA6H1qiuDgCihYbZaiDeD3AMqMc5Qx0syd\nkDHSNConZgW1+2lDs489JfXsKqllV0kdG6yrfI/PVOdkJrgY2TeBCTnJTBqQzLjsJOJcUmsr2tdl\nAUAp5QaWAy5Mm8EirfX9SqlcYCGQAqwFvqm1blZKuYAXgUlAOfB1rfV+a1v3AbcCfuBOrfV7J/ts\nCQDitPg8JhDs/wQKvoDSHVB96Ojr0clHg0L6cFNq6DMGnF3fUNrk9bPuYBVrD1ay7mAl24/UUlDZ\n2Pq6w6bIy4znjEEpTBuUysQByaTFyY1XonO6shuoB5ipta5TSkUBnyil3gF+BPxOa71QKfU05sT+\nlPVYqbUeopSaBzwKfF0pNRKYB4wC+gEfKqWGaq39ndpDIdrjcFndSM88muaphZJtcGSj6W1UugM2\nv2bmTAAz73LmKDP89ZBZpteRo3MnYo/Pz4o95by1vpD3thyhodn81AenxzIhJ5lr87PJy4gjLzOe\nAakxREmXTNHD2g0A2hQR6qynUdaigZnA9Vb6C8ADmABwufU3wCLgj8oMAn45sFBr7QH2KaV2A1OA\nlV2xI0J0iCsesqeYpYXWZgKdwvWmTeHACvjsKVjxuJlWc9hFZsTTrMlm6s0TVBsFApqtRTX8e2cp\nK/aUseZAJU3eAAluB5eN68fsUZlMzEkmKUbq7UVo6FCFolLKDqwBhgBPAnuAKq21z1qlAOhv/d0f\nOASgtfYppaqBVCv9szabbfseIYJHKTNQXUI/M7gdmNnU9n9sxjPa/i/Y+LJJj0421UV9xkCfMZTE\nj+Tj6gxW7qvk3ztLKa31AGYilHmTczhrSBpnD02TaQ9FSOpQALCqacYrpZKA14ERx1vNejze5ZE+\nSfoxlFK3AbcB5OTkdCR7QnQ9V9zRu5bn/h7f/pWU7F5DXcEWnGXbyTz0N6J1IxnA+TqGdDWcKeln\nEnvu5UwZN1YGThNh4ZS6FGitq5RSHwFnAElKKYdVCsgCCq3VCoBsoEAp5QASgYo26S3avqftZzwD\nPAOmEfiU9kaILuTx+Vm+s4y3Nxby4dYG6puHAkNJiokiLz2GyYlVTHXuY6x/K2eXfoEqfQI+fAK2\njDcjnw6dDX0ngE3q9kVoajcAKKXSAa918o8Gzsc07C4Drsb0BJoPvGm95S3r+Urr9aVaa62Uegv4\nu1Lqt5hG4Dzgiy7eHyE6RWtNSa2HLYXVrD9UzRf7yll/qIomb4CkmCjmju3H9CGpTMhOJjsl+vhz\n25btPlpl9O9H4d+PmDaDYRfB8EvNNJpR0T2/c0KcQEe6gY7FNPLaARvwitb6QaXUII52A10HfENr\n7bG6jf4NmIC58p+ntd5rbetnwC2AD/iB1vqdk322dAMV3aHe42PlnnI+3VPGwfIGimubOFzZSKU1\n8YlNwch+CUwZmMo5Q9M4c0jaqffQqS83I5/uWGwGuWuuA5sDMkaY7qYpgyAxG5JyzHhIEhhEF5Ib\nwYQAvP4Amw9Xs/5QFTuO1LL9SC1bCqvx+jXRUXZy02LJTHDRJ9HNsMx4RvVPZETfhK694crngX3L\nTe+iovXmBrWG8qOv253Qb6IZ7K7fROg3HpIGyBwJotNkOGjRKx2pbmLdwUo2HjbDKaw9UEWj1/S/\nT4l1MixXZSAsAAAeQ0lEQVQznlvOyuXcvHQmDUzumd45DpeZ/CbvgqNpnjqoOQzle6yxjVbAyj9B\nwJRCcCeZnkZ9x8HgmWZIbBn9VHQxKQGIsKa1ZvPhGj7cVsyH24rZUlgDmDtrh/WJJ39AMlNyU8kf\nmExGvOv4dfehwucxcyQUrjs6JHbxZvA1meGwh8w09yL0m2ACg1MmYhfHJyUAEbHK6zx8sa+Cj3eX\nsXRbCUdqmlAKJuUkc+9Fw5k2KJVhfeLDbxhkh8uc3PtNOJrmbYS9H5lZ1PYshS2vm3Rlhz6jIXuq\nNZNavmlXCOUAJ0KOlABEyCuv87BqfyWf7S1n5Z5ydhTXAmbs+7Pz0pk1IoOZwzNI7Q1j59QWm3aE\ngtVw6HPz6DWzfRGdbJUOxpuSQu455n4G0etICUCEpZZG21X7K1hzoJLNh2s4XGUGTXNH2cgfkMJl\n4/txxqBUxmYl9r7xc+IzIX4ODJ1jnvt9ULrdmlpztQkOKx6HgM80Lg8409yTMPwSSMo++bZFryMl\nABE0Pn+gdRjkzYXVbCmsYVtRTevsVgNSYxiblcSY/mZI5HFZSTgdveyE3xneJjP66a73Yef7ULbD\npPcdB0MvkhvUegHpBipCktcfYOn2EhatKeCTXWWtPXTi3Q5G9jWzWuUPSGFybjIZ8e4g5zZClO+B\nbf80N6gVrAI0RKeYgNBnjBnxdMj50ssogkgAECFlf1k9L31xkEVrCiivbyY93sVFo/swMSeZ8dlJ\nDEiNCe0eOpGivtw0Ju/7txkSu2Qb+JtNQBh9FeTNMQ3KMSnBzqk4DRIARNAV1zTx/tZi3tlUxIo9\n5dhtivNHZHBtfjbnDk3H0dvq70ORr9n0Mtq40JQQfE0mPWWQuf8gb7a5B6EbJskR3UcCgOhRWmsO\nVjTwxb4KNhRUse5gVWuf/IGpMXxtYhbXTs4mM0GqdUKWp87cg3B4NRz83JQSvA1mjuVBM0xj8pBZ\nZggLKa2FNOkFJLpdvcfHRztKWbK9mJV7yimqNlePcS4HY7MS+fEFQ5kzug95GXFSvRMOXHFmwLrc\ns81znwcOfAo73oWd78DOd016fF8zoc7gmSYoxPcJXp7FaZESgOgwrTX7yupZvrOU5bvK+GR3Gc2+\nAMkxUUwfnMYZg1KYOiiVIelx2Gxywo8oWpv2ggOfmvsPDqyEmgLzWv9JMHYejLla2g5ChFQBiS5R\n3eDls33l/HtnKct3lrZOZD4gNYZZwzOZPSqT/AHJUp/f22hthq3YsRi2vmmGrbA7TZvB0AvNuEdS\nMggaCQCiw3z+AOX1zRTXmGGRCyob2Vdez9oDleworkVrc9ft9CFpnDM0nXPz0slJlUZB0UbRRli/\nwHQ3rTls0vpNMDegDb8UMoYHN3+9jASAXkprTWWDl8KqRmqavNQ0+qhsaKas1kN5fTM1TV7qPT7q\nPD7K65opq/NQUd9M4Es/gwS3g3HZSUwZaKp1JuQk9b67bsWp0xqKt8Cu92D7YtOgDNBnLEz4Boy5\nRqqJeoAEgF5Aa82WwhpW769ga1EN24/Usq+sntom33HXj3c7SHBHEeuyE+dykBrnIi3ORXqck8xE\nNxnxbvoluclKjiExOqqH90ZEpJpCUypYv8DMg2B3wqirYPK3zP0G0jmgW0gAiGBNXj9vbSjkbysP\nsOlwNWDGuh/ZN4FB6bEMSI2lf5KbxGgnCdEOkmKcpMY6w290TBFZjmyCNS/AhoXQXGvuQL7kfyFz\nVLBzFnEkAESgw1WN/G3lARauOkhVg5e8jDi+OW0As0f2ITMhxMe6F6KFpxbWvwQfPQyeGpj2PTj3\nHpnfoAvJfQARwucPsGxHKS+vOsjS7SUAzB7Zh/nTB3LGoBQ56Yvw44qHqbeZbqMf/Dd8+gfY+Cpc\n8KBJk990j5ESQIgKBDRvrD/MY+/toKi6ifR4F1dPyuKGqTlkJUsPHBFBDn4O7/zUDGXdPx9GXGom\nuuk3AaLkzvHOkCqgMLaxoIr/fnML6w9VMS4rke+eN4SZwzOkF46IXIGAaSj+9PdQvtukOaLNEBTD\nLjSjlSZmBTOHYUWqgMLUy6sO8vM3NpMU4+Q314zjqgn95a5aEflsNpj4TbPUlZq7jff9++gwFACJ\nOTBgOgy7yNxwJgPUnTYpAYQInz/AQ//axvMr9nN2Xhp/vG4iiTHSFVP0ci13HO/7GA6ugP2fQEM5\nRMXC8Ith0s0mKEi7wTGkBBBGDlU08KNX1rNqfyW3npXLfRcNl6EVhABzYs8cZZYzvg0BvwkCW/4B\nW16HTa9CxiiYfCuMvdY0MIsOkxJAEGltGnr/+40tAPzyitFcMaF/kHMlRJhoboDNi+CLv5jJbaJi\nTS+iMddAzhlg770laGkEDnH1Hh//9cZm/rHuMFMGpvC/144jO0XqNIU4ZVrD4bWw+jnY/Br4GsGV\nAIPOhT7jIH0Y9BsPSTnBzmmP6bIqIKVUNvAi0AcIAM9orf+glEoBXgYGAvuBa7XWlcp0TP8DcDHQ\nANyktV5rbWs+8HNr0w9prV841R2LBNuKavje39eyv6yeH54/lO/PHIJdGnqF6BylIGuSWS56BPb+\nG3a9b2Y62/bPo+sNnmmGoMibA3ap/YYOlACUUn2BvlrrtUqpeGANcAVwE1ChtX5EKXUvkKy1vkcp\ndTFwByYATAX+oLWeagWM1UA+oK3tTNJaV57osyOtBOAPaJ79ZC+/eX8nSdFR/GHeBKYNTg12toSI\nXJ46KNsJuz+ENc+bkUqd8aZEkDUZBp5phqSIsLuQu60KSCn1JvBHa5mhtS6ygsRHWuthSqk/W3+/\nZK2/A5jRsmitb7fSj1nveCIpABwor+cnr25g1f5KZo/M5NdXjSEtzhXsbAnRe/h9ZlazvcugYDUU\nb4aAD2xRZmC6zNGmuihjJPQdG9YNyt3SC0gpNRCYAHwOZGqtiwCsIJBhrdYfONTmbQVW2onSv/wZ\ntwG3AeTkREad3bubi7j71Y2g4LfXjuPKCf1lCAcheprdASPmmgVMI/LBlaaq6OBK2PiyGZsIAAVp\neZA2FJIHQtIAExwyR0FsWpB2oOt1OAAopeKA14AfaK1rTnICO94L+iTpxyZo/QzwDJgSQEfzF4q8\n/gD/8+52/vLxPsZlJfLkDRNlGAchQoUzxkxyP2SWea61Gb66eAsUrjNL2S7YvcQ0LLeI7wdDZpq2\nhNyzITo5OPnvAh0KAEqpKMzJf4HW+h9WcrFSqm+bKqASK70AyG7z9iyg0Eqf8aX0jzqf9dC240gt\nP351PZsP1/DNMwbw87kjcDlkOGYhQpZSkNjfLENnH03XGmqPQOk2KN4KBatg6z9h3f+Z15NyzIQ3\naXmQMsgsGSPDYuKbjvQCUsCzwDat9W/bvPQWMB94xHp8s03695VSCzGNwNVWkHgP+LVSqiVczgbu\n65rdCB3+gObpf+/h9x/uJMEdxZ9umMjFY/oGO1tCiM5SChL6mmXwTJPm98KhL8yQFUc2mWXnu6ZN\noUVcH1NtlDzABIn0EaatIYTmSu5ICeBM4JvAJqXUeivtPzEn/leUUrcCB4FrrNcWY3oA7cZ0A70Z\nQGtdoZT6JbDKWu9BrXVFl+xFiCiuaeKuhev4bG8Fl4zpy4OXjyJVGnqFiDz2KNODaOCZR9P8Pqgp\nMIPZlWwzVUllO814RvUlR9dL6G/aEtKHm5JC/4mQmmfGQ+phciNYF1m+s5QfvryehmY/v7xiNFdP\nkpELhRCW5no4stnMkXx4LZRuN8HB32xedyVA33EmIGSONCWG2AyITYfoJHCc2oWkjAXUgxZ8foD/\nemMzeRnxPHnDBIZkhG/3MSFEN3DGQs5Us7Tw+0xp4fAaExiKNpp2BW/9V9/viAZ3AjjjzLYcbrDZ\nweYAZTN/K/spD4onAeA0aK35zfs7eHLZHs4bls4fr59IrEu+UiFEB9gdkDHcLBNuMGmBAFQfNL2R\n6kqgvhSaqo8uzXXm5ja/xwyMF/CDbrYe/aecBTlbnYZfvr2N5z7dx3VTsvnl5aNlBE8hxOmx2cx9\nB8kDT287t3esJCABoJNeW1PAc5/u46bpA7n/0pFyY5cQIuzIJWsnbD5czX++volpg1L5+SUj5OQv\nhAhLEgBOUWV9M7f/bQ2psU7+eP0EqfYRQoQtqQI6Rb/45xZKaptY9O3p0sdfCBHW5PL1FCzdXswb\n6wv57owhjMtOCnZ2hBDitEgA6KDaJi8/e30zwzLj+d55Q4KdHSGEOG1SBdRBj7yzneKaJp76xiSc\nDombQojwJ2eyDli2o4QFnx/k1rNyGS9VP0KICCEBoB0ltU385JUNDO8Tz49nDwt2doQQostIFdBJ\nBAKaH728gfpmHwuvOwN3lIznL4SIHFICOIlnPt7LJ7vLuP/SUeRlygBvQojIIgHgBHYV1/Lb93dy\n4ag+zJuc3f4bhBAizEgAOA5/QHP3oo3Euuw8dOVoGepBCBGRpA3gOP766T7WH6riD/PGkyZ3+woh\nIpSUAL5kf1k9j723g/NHZHDZuH7Bzo4QQnQbCQBtmKqfDTgdNh66YoxU/QghIppUAbXx10/3sWp/\nJf97zTj6JLqDnR0hhOhWUgKw7C6p5X/e28EFIzO5amL/YGdHCCG6nQQAwOcP8ONXNxLrtPPrK6Xq\nRwjRO/T6KiCtNT97fTMbDlXx5PUTSY+XXj9CiN6h15cAfvfhLl5efYg7Z+Vxydi+wc6OEEL0mF4d\nAP7++UEeX7KLa/Oz+OH5ecHOjhBC9Kh2A4BS6jmlVIlSanObtBSl1AdKqV3WY7KVrpRSjyuldiul\nNiqlJrZ5z3xr/V1Kqfndszsd9+b6w/z8jU3MGJbOr6TeXwjRC3WkBPA8cOGX0u4Flmit84Al1nOA\ni4A8a7kNeApMwADuB6YCU4D7W4JGMLy7uYgfvbKByQNTeOqGSUTJxO5CiF6o3TOf1no5UPGl5MuB\nF6y/XwCuaJP+ojY+A5KUUn2BOcAHWusKrXUl8AFfDSo94qMdJdzx0jrGZSXy7E2TiXbKEM9CiN6p\ns5e+mVrrIgDrMcNK7w8carNegZV2ovQeVVTdyF0L1zMkI56/3jyFOFev7wQlhOjFurru43gV6fok\n6V/dgFK3KaVWK6VWl5aWdlnG/NbkLl5/gD/dMJHE6Kgu27YQQoSjzgaAYqtqB+uxxEovANoOnp8F\nFJ4k/Su01s9orfO11vnp6emdzN5XPbN8Lyv3lvPApaPITYvtsu0KIUS46mwAeAto6ckzH3izTfqN\nVm+gM4Bqq4roPWC2UirZavydbaX1iA2Hqvjf93dw8Zg+XJOf1VMfK4QQIa3dSnCl1EvADCBNKVWA\n6c3zCPCKUupW4CBwjbX6YuBiYDfQANwMoLWuUEr9Elhlrfeg1vrLDcvdorK+me8uWEtmgpuHrxwr\n3T2FEMLSbgDQWl93gpdmHWddDXzvBNt5DnjulHJ3mgIBzQ9fWU9prYdXvz2NxBip9xdCiBYR3QH+\nyWW7+WhHKf916UjGZScFOztCCBFSIjYArDlQwe8+3MkV4/vxjak5wc6OEEKEnIgMAI3Nfn7y6kb6\nJkbzkAzzIIQQxxWRd0I99t4O9pXV8/dvTZWbvYQQ4gQirgTw+d5y/rpiHzdOG8D0IWnBzo4QQoSs\niAoAJTVN/OiVDWQnx3DPhcODnR0hhAhpEVM/Uu/xccsLq6hsaOaV26cRK1U/QghxUhFRAvD5A3z/\n72vZWljDk9dPZHT/xGBnSQghQl5EXCY//M52lu0o5aErRnPe8Iz23yCEECL8SwDvbznCs5/sY/60\nAXzjjAHBzo4QQoSNsA4Ahyoa+MmrGxjTP5H/vGREsLMjhBBhJWwDQLMvwB0vrUNr+OP1E3A5ZGYv\nIYQ4FWHbBvDE0l2sP1TFn26YyIBUGd9fCCFOVViWADYcquJPH+3hqon9uXhM32BnRwghwlLYBYAm\nr58fv7qB9DgX9186KtjZEUKIsBV2VUC//WAnu0vqeOGWKTKvrxBCnIawKgF8vKuUv3y8l+um5HDu\n0K6bL1gIIXqjsAkAJTVN/GDheoakx/Ffc6XLpxBCnK6wqALyBzR3LVxPQ7OfhbdNJMYZFtkWQoiQ\nFhZn0j8u3c3KveU8dvVY8jLjg50dIYSICCFfBbT9SA1PLN3F5eP7cU1+drCzI4QQESPkA8C9r20i\nITpKunwKIUQXC+kqoLI6D2WHqvjDvPGkxDqDnR0hhIgoIV0CKK7xcO7QdC4b1y/YWRFCiIgT0gEA\n4FdXjkYpFexsCCFExOnxAKCUulAptUMptVspde/J1k1P9HCwcT0VTRUEdIDa5lqK6oooqC2gqK6I\n8sZytNZfeZ/WGq/fS11z3XFfP5kmXxNljWVUe6pp8DYQ0IFT20EhhAgT6lRPkKf1YUrZgZ3ABUAB\nsAq4Tmu99XjrR+dG6yEPDDHvRaH5al7TotMYkzaGjJgM9lfvZ0/1HiqbKvFrPwAuu4t+cf1Ij07H\nr/14A16ibFGkR6eTFp1Go6+RIw1HKK4vprSxlGpP9THbtys7Sa4kUqJTUCi8AS82bIxIHcG49HEM\nThqMw+bAruzERsWS6Eok0ZWIQhHQAfzajz/gx6/92JUdt8ONw/bVphd/wOTNruzYbXZsKuQLZ0KI\nEKWUWqO1zm93vR4OANOAB7TWc6zn9wForR8+3voTJk3QT7/9NNsrtlPTXEOCM4F4Zzw2ZSOgA9R7\n69lWvo1NZZsobywnNzGXQUmDSI9OJ9oRTZQtirLGMgrrCyltKMVhc+C0O/H4PZQ1llHSUEK0I5rM\nmEz6xPYhIyaDjJgMEpwJ+LWfZn8ztc21VDRVUNFUAUCULYpmfzObyzdT1ljWqe/BoRw4bA5syoZS\nCo/fgy/gO2adRFciAxIGMDBhIGnRaSS7kol1xuIPmHwppYiNiiUmKoZUdypp0WmkRafhtDtNEFH2\nk1adaa1bX9da4w14afI3EQiYEk+AAI2+Ruq99TT6Gmn2N+MNeEFDlD2KKFsUvoCPJn8THr8HG7bW\nwGVTNhzKgU/7aPKZ11t+ZzZlI84ZR4IzAZfdhVIKrXXr9+0L+Ih2RBMTFUOULYoGXwMN3gZ8AV/r\n9h3K0Rog67x11DTX4A/4iXfGk+BMwG6ztwbeliAc0AHsyry/5TtLciWZkqW3lvrmerzaiz/gb92v\nJl8TAR0wn2lztObLZXfh8Xvw+Dz4tR+bsmFX9tZ98Aa8NPmaaPI34Q/4cdlduB1uXHYXUbYonHZn\n67acdic1nhqqPdU0B5qJccS07jtAQAeo8lRR3lhOnbeOKJv57l0OF7EOc/wBmv3N+LUfh81BlC0K\nhWrdB2/Ai1/70Vpjt9lbt6Ewx9+nfTT6GmnyNeGwOXDb3TjtTrwBL42+Rjx+D16/l+ZAMzZlw213\n43a4AVovbtp+X26HG7fd3XphpJSi3ltPbXMtNc01VDVVUeWposFnStgBHSDBmUBGTAZp0WmkRqeS\n4k4hxhFDrbeW2uZa/Nrfmu+WY+8L+KjyVJnvzt9sXrdHtX7f0fZooqOiiY2Kxa7sVHuqqfRUYsNG\nkiuJRHdi6/cMYMOGzWbDhq31fyPKFtV67Fp+p96AlwZvA/XeemzKRrwznjhnHHZlxxfw4dd+PD5P\n6/F3O8z3pbVu/V0o1Fc+qy233U2CKwG33d2pKvCOBoCe7gXUHzjU5nkBMPVEK9uVnal9pzK17wlX\nCRqtNYfrDlNQV0AgEMCnfdR761t/kEDrCaflZB/QgWNODD7tQ2uNy+7C5TAnh4AO4Av4KGssY3/N\nfj4r+oyKpoqvBIiOUKjWPLScPFtKGi0lJLuyo9G9sqrrRKVK0b3cdjeJrkRiomJaf59bPVspayxr\n/V0Ko+X7AfN7jbJH4bQ5W88pX77Qa0nrqJ4OAMcLZcf8ByqlbgNuA8jJyemJPHWKUoqs+Cyy4rO6\n/bO01jT4Gqhtrm29ytFa0+BtoM5bR0VTBSUNJZQ3luPTvtarspYrX7/2EwgEjrlCdNgcrVdfANGO\naFx2F3bb0R9Py9Voy2tOu+mK23I12PZqUaNbP6PlMx02h7kaa3MV4w/4qfPWUdtcS5O/qfWzHMpB\nlD0Ku7LT5Gui3leP1+8lNirWXMHZ7K2BtmX7APFR8cQ747Hb7NQ2W1eLAX9rwGv5B7IpW2upoMHX\nQHljORVNFTjtTnMFFxXX+k/lsDmItkfjcrhar+z9AX9ricjj97ReZba83lLCaPkHdTlcrVfBLVfH\nLaWDZn8zDT5zBdnsbybBmUCyOxmnzUm9r556bz3+gL/1d5boSiTVnUq8Mx5fwNd6Zd72KjTKHmVK\nXdbrba/EW75Xm7K1vu4NeI/57t0ONy6Hi0Ag0Fqqazl2TrsTp91JlM387lr2RynV+h23/EZsytb6\nui/ga/2NxUbFEu80x6ql9PBlAR2gsqmS8qZyyhvL8fg9xxyblu+vJWjblZ1EVyJJriRcdlfrd+vx\ne/D4Pcd8R37tN1f9rkQ0urUU0vI70loTIEAgECBAoDWttZTr87Tur8PmML9LRywa3VqygaMn4Jbv\n3mazmdKA9X25He7W/6OWEsXx/t+b/E3UeGqo8x5txwwQwOs3x66lpBHQgdbXWy7kAjrA27zdoXNL\nSFcB5efn69WrV/dY/oQQIhJ0tAqop1saVwF5SqlcpZQTmAe81cN5EEIIQQ9XAWmtfUqp7wPvAXbg\nOa31lp7MgxBCCKPHh4LQWi8GFvf05wohhDiWdDYXQoheSgKAEEL0UhIAhBCil5IAIIQQvZQEACGE\n6KV69EawU6WUqgV2BDsf3SQN6NxgQqEtUvcLInffInW/IHL3rb39GqC1Tm9vIyE9IxiwoyN3s4Uj\npdTqSNy3SN0viNx9i9T9gsjdt67aL6kCEkKIXkoCgBBC9FKhHgCeCXYGulGk7luk7hdE7r5F6n5B\n5O5bl+xXSDcCCyGE6D6hXgIQQgjRTUI2AJzK5PGhTCmVrZRappTappTaopS6y0pPUUp9oJTaZT0m\nBzuvnaGUsiul1iml3rae5yqlPrf262Vr2O+wo5RKUkotUkptt47dtEg4ZkqpH1q/w81KqZeUUu5w\nPWZKqeeUUiVKqc1t0o57jJTxuHU+2aiUmhi8nLfvBPv2mPV73KiUel0pldTmtfusfduhlJrT0c8J\nyQBgTR7/JHARMBK4Tik1Mri56jQf8GOt9QjgDOB71r7cCyzRWucBS6zn4eguYFub548Cv7P2qxK4\nNSi5On1/AN7VWg8HxmH2MayPmVKqP3AnkK+1Ho0Zkn0e4XvMngcu/FLaiY7RRUCetdwGPNVDeeys\n5/nqvn0AjNZajwV2AvcBWOeTecAo6z1/ss6h7QrJAABMAXZrrfdqrZuBhcDlQc5Tp2iti7TWa62/\nazEnkv6Y/XnBWu0F4Irg5LDzlFJZwCXA/7OeK2AmsMhaJVz3KwE4B3gWQGvdrLWuIgKOGeben2il\nlAOIAYoI02OmtV4OVHwp+UTH6HLgRW18BiQppfr2TE5P3fH2TWv9vta6ZXLwz4CW+WgvBxZqrT1a\n633Absw5tF2hGgCON3l8/yDlpcsopQYCE4DPgUytdRGYIAFkBC9nnfZ74KdAy4zyqUBVmx9puB63\nQUAp8Fereuv/KaViCfNjprU+DPwGOIg58VcDa4iMY9biRMco0s4ptwDvWH93et9CNQC0O3l8uFFK\nxQGvAT/QWtcEOz+nSyk1FyjRWq9pm3ycVcPxuDmAicBTWusJQD1hVt1zPFZ9+OVALtAPiMVUjXxZ\nOB6z9kTKbxOl1M8wVcsLWpKOs1qH9i1UA0ABkN3meRZQGKS8nDalVBTm5L9Aa/0PK7m4pQhqPZYE\nK3+ddCZwmVJqP6aKbiamRJBkVS9A+B63AqBAa/259XwRJiCE+zE7H9intS7VWnuBfwDTiYxj1uJE\nxygizilKqfnAXOAGfbQPf6f3LVQDQMRMHm/Viz8LbNNa/7bNS28B862/5wNv9nTeTofW+j6tdZbW\neiDm+CzVWt8ALAOutlYLu/0C0FofAQ4ppYZZSbOArYT5McNU/ZyhlIqxfpct+xX2x6yNEx2jt4Ab\nrd5AZwDVLVVF4UIpdSFwD3CZ1rqhzUtvAfOUUi6lVC6mofuLDm1Uax2SC3AxpqV7D/CzYOfnNPbj\nLExxbCOw3louxtSXLwF2WY8pwc7raezjDOBt6+9B1o9vN/Aq4Ap2/jq5T+OB1dZxewNIjoRjBvwC\n2A5sBv4GuML1mAEvYdoyvJir4FtPdIww1SRPWueTTZieUEHfh1Pct92Yuv6W88jTbdb/mbVvO4CL\nOvo5ciewEEL0UqFaBSSEEKKbSQAQQoheSgKAEEL0UhIAhBCil5IAIIQQvZQEACGE6KUkAAghRC8l\nAUAIIXqp/w8TtkNp4lrx8wAAAABJRU5ErkJggg==\n",
"text/plain": [
- "<matplotlib.figure.Figure at 0x10a019eb8>"
+ "<matplotlib.figure.Figure at 0x119756320>"
]
},
"metadata": {},
@@ -294,14 +278,12 @@
{
"cell_type": "code",
"execution_count": 8,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [
{
"data": {
"text/plain": [
- "<matplotlib.axes._subplots.AxesSubplot at 0x10a4ddc50>"
+ "<matplotlib.axes._subplots.AxesSubplot at 0x11c105a20>"
]
},
"execution_count": 8,
@@ -310,9 +292,9 @@
},
{
"data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXkAAAEACAYAAABWLgY0AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzs3Xd4FOX2wPHvSQIEkkCAUAKCoPSiVEVBQaQqHWkKEkS8\n2BC91yt4LXgVLIgKiv6ujQ5SlV5CCYpKEQmioSpBuiCEAKGknN8fs4HdEFI3md3N+3meeTIzO7Nz\n9s3mzOTMzDuiqhiGYRi+yc/uAAzDMIy8Y5K8YRiGDzNJ3jAMw4eZJG8YhuHDTJI3DMPwYSbJG4Zh\n+LBMk7yIjBSR30Rkh4jMFJEiIlJKRCJFZI+IrBKR0DTL7xWRXSLSzml+Y8d77BWR8Xn1gQzDMIyr\nMkzyIlIFGAI0UtX6gD/QFxgBRKpqDWCNYxoRqQP0AeoAHYCPRUQcb/cJMFhVqwPVRaSD2z+NYRiG\n4SKzI/l4IBEoJiIBQDHgCNAFmOJYZgrQzTHeFZilqomqGgvsA24XkXAgRFU3O5ab6rSOYRiGkUcy\nTPKqegoYB/yJldzjVDUSKKeqxx2LHQfKOcYrAIec3uIQUDGd+Ycd8w3DMIw8lFm55mZgOFAFK1EH\ni0h/52XU6hfB9I1gGIbhgQIyeb0J8IOq/g0gIguAO4BjIlJeVY85SjF/OZY/DFRyWv8GrCP4w45x\n5/mH09ugiJgdhmEYRjapqqQ3P7Mkvwt4WUSKAheBNsBm4DwwEHjb8fMbx/KLgJki8h5WOaY6sFlV\nVUTiReR2x/oDgAkZBJvVz+XzRo0axahRo+wOw2OY9nBl2uNadrVJfDzs3Qt79lg/nYfERKheHapV\ns4YKFaB0aWsoVerqeHAwSLqpOmOSwUoZJnlV3S4iU4GfgBTgZ+BTIASYIyKDgVigt2P5GBGZA8QA\nScATejVjPwFMBooCy1R1RfY/SsETGxtrdwgexbSHK9Me18rrNomLg+ho2LYNfv31alI/e9ZK5DVq\nWD/vvReGDrXGy5TJWfJ2h8yO5FHVd4B30sw+hXVUn97yY4Ax6czfCtTPQYyGYRj5ThWOHr2a0FOH\n48fhllugYUNo2hT697cSe4UK9iXyjGSa5A17RURE2B2CRzHt4cq0x7Vy0iaXL8POnbB9u+sAcOut\n0KgR9OwJb7xhHZn7+7s35rwknlb/FhH1tJgMw/ANFy9a5ZWYGGvYudP6+ccfULWqldCdh/Bwzzw6\nT0tErnvi1SR5DxcVFUWrVq3sDsNjmPZwZdrjWlFRUTRs2IqdO60kvmsXV8YPHYKbboLataFOnatD\njRoQGGh35DmXUZI35RrDMLyWKsTGwk8/wdat8PPP1s9Ll6BWLSuZ16oFERHWeLVqUKiQ3VHnL3Mk\nbxiG1zh+HH74ATZtuprUixaFxo2vDvXrww03gF8B6mPXlGsMw/A6yclWvfyHH+D7762fp07BHXfA\n7bdfTerly9sdqf0ySvIeua9L0RS7Q/AYUVFRdofgUUx7uPK19khKguXL4cEHrZuDevaEH3+Eu+6C\nRYvg5ElYuhReeQXuvz/9BO9rbZJbHlmTf2b5M0zoOCHDu7gMw/ANqlbZZfp0mDULqlSBAQPgvffM\nUbo7eGS5ptH/GtHupna82eZNu8MxDCOPHDxoJfZp06xLG/v3v3pjkZE9Xnd1zcr+K2k5uSUhRUJ4\n8a4X7Q7HMAw3OXcOFiyAKVOsu0cfeAA+/RSaN/eO69G9kUfW5MOKhbF6wGomRU9i/MaC/aRAU190\nZdrDlTe0R3IyrF4NDz9sXfUyd67Vp8uRI1aCb9HCvQneG9okP3nkkTxAeEg4qwes5u7JdxNUOIhH\nGz1qd0iGYWRRap191iz46isoV85K8u++C2XL2h1dweKRNXnnmPb+vZdWU1oxtu1YHqz/oI2RGYaR\nmZ07ryb2lBTo1w/69oW6de2OzLd5XU3eWfXS1VnZfyVtprYhMCCQHrV72B2SYRhO/v4bJk2yTqKe\nOAF9+sCMGdCkiamzewKPrMmnVa9sPZY9tIzHlz7O0j1L7Q4nX5n6oivTHq7sbI/t2+HRR62uAn79\nFT74AP7807r0sWlT+xK8+Y648ookD9AovBGL+i5i0MJBRP4eaXc4hlEgJSXB/PnQqpV1M1LVqrB7\nN0yebM3zpi54CwqPr8mnteHPDXSf3Z15vebRskrLfIzMMAquM2esK2E++ggqVYJhw6B794LX2Zen\n8rpuDTLSonILZj8wmwfmPsCPB3+0OxzD8GlHj8KIEVb3vNHR1jXuGzZA794mwXsLr0vyAK2rtmZa\n92l0/aorPx35ye5w8pSpL7oy7eEqr9pj7174xz+sq2LOn7d6fJwxw+oQzNOZ74irTJO8iNQUkW1O\nwxkRGSYipUQkUkT2iMgqEQl1WmekiOwVkV0i0s5pfmMR2eF4LVd3OXWo1oHPOn9Gp5md2H5se27e\nyjAMrGvbv/8eevWCO++0+o3ZvRs+/NDqT8bwTtmqyYuIH3AYuA14Gjipqu+IyAtASVUdISJ1gJlA\nU6AisBqorqoqIpuBp1R1s4gsAyao6oo028hWV8Nzf5vLMyueYd3AddQMq5nl9QzDsBw4AFOnWkNA\ngHUE/+ijEBxsd2RGVrmzJt8G2KeqB4EuwBTH/ClAN8d4V2CWqiaqaiywD7hdRMKBEFXd7FhuqtM6\nOdarbi9Gtx5Nu+ntOBB3ILdvZxgFwrlzVv8xrVtbJZhjx6xyTEwMDB9uErwvyW6S7wvMcoyXU9Xj\njvHjQDnHeAXgkNM6h7CO6NPOP+yYn2uDGg7iuWbP0WZaG46dO+aOt/QYpr7oyrSHq+y2x7ZtMGSI\ndYXM/Pnw5JNw+DBMnAi33eYbNy+Z74irLN/xKiKFgc7AC2lfc5RibL0W85lmz3Dm0hnaTWtHVEQU\npYqWsjMcw/AYly5ZnYJNnGgl9KFDre4HTF/tBUN2ujXoCGxV1ROO6eMiUl5VjzlKMX855h8GKjmt\ndwPWEfxhx7jz/MPpbSgiIoIqjjM9oaGhNGjQ4MoT6VP30ulNv3z3y/y66VdavNKCTW9sIqRISIbL\ne8N06jxPicfu6dR5nhKP3dOp89J7/cABGDkyiuXLoWnTVrzwAoSEROHvD+XLe0b8eTXt3DaeEE9e\nfL6oqChiY2PJTJZPvIrIV8ByVZ3imH4H+FtV3xaREUBomhOvt3H1xGs1x9H+JmAYsBlYihtOvKal\nqjy2+DF+P/07yx5aRmBAYI7fyzC80ebNMHYsrF1rPWHp8cehprkmwafl+sSriARhnXRd4DT7LaCt\niOwBWjumUdUYYA4QAywHnnDK2k8AnwN7sU7guiR4dxAR/q/T/1E2qCy95/YmMTnR3ZvIV2mPTAo6\n0x6uUtsjJQWWLIGWLa1LIJs3h9hYqz+ZgpbgzXfEVZbKNap6HghLM+8UVuJPb/kxwJh05m8F6mc/\nzOzx9/NnWvdpdJ/dnSGLhzCp6yTzvFjDJ12+bPUA+e67ULgwPP+8leTN3ahGKq/ruyY7zl8+T4tJ\nLXiw3oM83/x5t7ynYXiChASrL5mxY6FePSu533uvb1wdY2SfV/cnnxtBhYNY1HcRzb5oRq2wWnSu\n2dnukAwjV86dg48/trrzbd7cKtE0bGh3VIYn88q+a7KjUolKLOi9gMGLBrPj+A67w8k2U190VVDb\n48wZGD3a6ijs558hMtK6zv3MmSi7Q/M4BfU7cj0+n+QBbr/hdj7o8AFdvurCX+f/ynwFw/AQZ87A\nqFFw882waxesX289Wq9+np/ZMnyFT9fk03pp7UtExUax5uE1FAkokifbMAx3SEiwbl4aOxY6dICX\nX4bq1e2OyvBUPtWffG78957/UjaoLEOXDsXTdm6GAdbVMp98YiX0jRth3Tqr4zCT4I2cKlBJ3k/8\nmNZ9GtHHohn34zi7w8kSU1905avtkZwM06ZBrVqwcKE1zJ9v9eeeEV9tj9wwbeLKp6+uSY/zFTdV\nQ6vSs05Pu0MyCrjNm+GxxyAoyLrmvaV5qqXhRgWqJu9s29FtdJjRgc87f24urTRscfYsvPQSzJkD\n48ZBv37mOncjZ0xNPh0NwxuypN8SBi8azIp9bu9dwTAytGiRVYo5exZ+/RUefNAkeCNvFNgkD9C0\nYlO+6fsND3/9MGv+WGN3OOky9UVX3t4eR47AAw/AP/9pPbTjyy+hdOmcv5+3t0deMG3iqkAneYA7\nK93JvN7z6De/H+tj19sdjuGjUlKsq2ZuvdU6ufrLL3DPPXZHZRQEBbYmn9ba/WvpO68vX/f5muaV\nm+f79g3f9csv1nNT/fzgf/+z+poxDHcyNfksaF219ZWeKzcf3pz5CoaRifPn4YUXrI7DBg2C774z\nCd7IfybJO2lfrT1fdv2SzrM6s/PETrvDAUx9MS1vaY/ly62EfuiQdWL1scesI3l385b2yE+mTVyZ\nJJ9GpxqdGN16NL3m9uL85fN2h2N4mePHoU8feOopqzQzYwaUK5f5eoaRV0xNPh2qysPfPEwhv0J8\n2fVLW2MxvMf8+fDkk1Zp5pVXoGhRuyMyCoqMavImyV/HucvnaPpZU0Y0H8HABgPtDsfwYHFx8PTT\nsGmT1c9Ms2Z2R2QUNObEaw4EFw5mbq+5/CvyX/z212+2xWHqi648rT0iI61uf0NDYdu2/E/wntYe\nnsC0iausPsg7VETmichOEYkRkdtFpJSIRIrIHhFZJSKhTsuPFJG9IrJLRNo5zW8sIjscr43Piw/k\nTvXK1mNs27H0mtuLc5fP2R2O4UHOn7fq7oMHWzc0ffih1feMYXiaLJVrRGQKsF5VvxSRACAI+A9w\nUlXfEZEXgJKqOkJE6gAzgaZARWA1UF1VVUQ2A0+p6mYRWQZMUNUVabblEeUaZ4MWDiI5JZkp3aaY\nB4IbbN9uPSy7WTOYMME6ijcMO+WqXCMiJYC7VPVLAFVNUtUzQBdgimOxKUA3x3hXYJaqJqpqLLAP\nuF1EwoEQVU29CH2q0zoebeJ9E/n56M98uc2chC3oZs+GNm3gtdes+rtJ8Iany0q5pipwQkQmicjP\nIvKZiAQB5VT1uGOZ40DqhWIVgENO6x/COqJPO/+wY77HK1aoGHN6zWHEmhH8cvyXfN22qS+6sqs9\nkpPhxRdhxAirDt+vny1hXMN8P65l2sRVVvqTDwAaYZVZtojIB8AI5wUcpRi31VgiIiKoUqUKAKGh\noTRo0IBWrVoBV3+Bdky/1+497h9zPx91/IiuHbrmy/ajo6Nt+7yeOG1He5w7B5980oqEBHj//Sji\n4gAKbnt4+nR0dLRHxZMX06njsbGxZCbTmryIlAd+VNWqjukWwEjgJuAeVT3mKMWsU9VaIjICQFXf\nciy/AngVOOBYprZjfj+gpaoOTbM9j6vJO3txzYss27uMNQ+voXSxXHQfaHiFXbuga1frOavvvguF\nCtkdkWFcK1c1eVU9BhwUkRqOWW2A34DFQOoF5AOBbxzji4C+IlJYRKoC1YHNjveJd1yZI8AAp3W8\nxujWo+lQrQNtprXh1IVTdodj5KElS+Duu60SzfjxJsEb3imr18k/DcwQke3ALcBo4C2grYjsAVo7\nplHVGGAOEAMsB55wOjR/Avgc2AvsS3tljTcQEd68903a3tSWNlPzPtE7/3tm5E97qMLbb8PQodbD\nPQYNyvNN5pj5flzLtImrLD3jVVW3Y10SmVab6yw/BhiTzvytQP3sBOiJRIS327zN85HP025aOyIH\nRFKyaEm7wzLc4NIlq1vgHTusO1gresWlAYZxfaZbg1xQVf656p989+d3RA6IJDTQXE/nzU6cgO7d\noXx566lN5uYmw1uYbg3yiIgwrt04mldqTrtp7Thz8YzdIRk59OuvcNtt1tOa5swxCd7wHSbJ55KI\n8H7792l2QzPaT29P/KV4t76/qS+6yov2WLoUWreGN96A11/Pm37f84r5flzLtIkrL/o6ey4RYXyH\n8TQKb8R9M+4z/dx4CVX44AMYMgQWLoSHHrI7IsNwP1OTd6MUTeEfi//B3lN7WfbQMooVKmZ3SMZ1\npKTAP/9p3b26dCnceKPdERlGzpn+5PNRiqYwaOEgjpw9wqK+iyhayDw5wtNcugQDB8KRI9YRfElz\nYZTh5cyJ13zkJ3582eVLyhQrQ485PbiUdClX72fqi65y2x5nzkDHjpCUBKtWeX+CN9+Pa5k2cWWS\nfB7w9/NnavepBBUKotfcXlxOvmx3SAbWkfvdd0PdulZvkoGBdkdkGHnPlGvy0OXky/Sa24sAvwC+\n6vkVhfzNffF22bnTOoIfOhReeAHMYwEMX2Jq8ja6lHSJ7rO7U7xIcab3mE6AX5ZuMjbc6IcfoEcP\neOcdePhhu6MxDPczNXkbFQkowoI+Czh14RQR30SQnJKcrfVNfdFVdttj0SLo1s26g9UXE7z5flzL\ntIkrk+TzQWBAIN/0/Yaj544yeNFgUjTF7pAKhM8/t/qhWboU2re3OxrDsIcp1+Sj85fPc//M+6lW\nqhqfdv4UPzH72LygCqNHWw/YXrkSqle3OyLDyFumJu9Bzl0+R8cZHakTVodPOn1iEr2bJSfDsGHw\n/fewYoXV2Zhh+DpTk/cgwYWDWfbgMnb8tYOnlz1NZjs0U190lVF7XLwIfftaV9KsX18wErz5flzL\ntIkrk+RtEFIkhOUPLeenoz8xfMXwTBO9kbkzZ6xH9Pn5wfLlUKKE3REZhmcw5RobxV2Mo83UNjQK\nb8SHHT+kSEARu0PySidPQrt20Ly59Zg+b+pF0jDcwZRrPFRoYChrB67lRMIJWk1pxeH4w3aH5HWO\nHoVWrayj+AkTTII3jLTMn4TNihcpzvze8+lSowtNP2vK+tj1Lq+b+qIr5/Y4eBBatoR+/WDMmIJ5\nF6v5flzLtImrLCV5EYkVkV9EZJuIbHbMKyUikSKyR0RWiUio0/IjRWSviOwSkXZO8xuLyA7Ha+Pd\n/3G8k5/4MfKukUzuNpne83rzwcYPTJ0+E3/8YfVD8/jj8J//2B2NYXiuLNXkRWQ/0FhVTznNewc4\nqarviMgLQElVHSEidYCZWA/+rgisBqqrqjp2EE+p6mYRWQZMUNUVabZVYGry6dl/ej895vSgdlht\nPuv8GUGFzXPo0tq1C9q2tZL70KF2R2MY9nNXTT7tG3QBpjjGpwDdHONdgVmqmqiqscA+4HYRCQdC\nVHWzY7mpTusYDlVLVuWHR36gkH8h7vjiDv44/YfdIXmUX36xHtU3erRJ8DklImbw4iG7sprkFVgt\nIj+JyBDHvHKqetwxfhwo5xivABxyWvcQ1hF92vmHHfONNIoWKsrkrpN5rPFjNHmxCT8c/MHukDzC\nd99Bq1ZRjB/vm/3Q5ERO68+qagYvHHIiq10iNlfVoyJSBogUkV1pvjAqIm6rsURERFClShUAQkND\nadCgAa1atQKufqkLwvRTtz3FLwt/oePojnz61Kf0qdfHo+LLr+nkZPjxx1ZMnAi9ekVTpgyA58Rn\n53R0dHSO1je8V+rvMCoqitjY2EyXz/Z18iLyKnAOGAK0UtVjjlLMOlWtJSIjAFT1LcfyK4BXgQOO\nZWo75vcDWqrq0DTvrzndY/mq7ce203lWZ4Y2GcrIFiNz9C+btzp6FPr3t7ormDEDKpr//XLNUb+1\nOwwjB673u8tVTV5EiolIiGM8CGgH7AAWAQMdiw0EvnGMLwL6ikhhEakKVAc2q+oxIF5EbhcrSw1w\nWsfIwK3lb2XjoxuZFzOPwYsGF5gnTa1aBY0bw113wZo1JsEbRk5kpSZfDvhORKKBTcASVV0FvAW0\nFZE9QGvHNKoaA8wBYoDlwBNOh+ZPAJ8De4F9mubKGuNaqf+aVQipwLeDvuVkwkk6TO/A6Qun7Q0s\nDyUmwsiR8MgjMHMmjBoF/v7Wa6bc4Mq0R/7y8/Pjjz+862KITJO8qu5X1QaOoZ6qvumYf0pV26hq\nDVVtp6pxTuuMUdVqqlpLVVc6zd+qqvUdrw3Lm4/ku4ILB/N1n6+5tdyt3Pnlnew/vd/ukNzu0CHr\nDtbt22HbNmvcKDiqVKlCsWLFCAkJoVSpUnTq1IlDhw5lvqKHWLJkCbfddhvBwcGEhYXRv39/Dh/O\n+p3srVq14osvvnBrTOaOVw/XKk2W8/fz5/0O7/Nk0ydpMakFW49stSewPLBuHTRtCp07w5IlOE6w\nukrbHgWdr7WHiLBkyRLOnj3L0aNHKVeuHE8//XSO3ispKcnN0WVs3rx5PPTQQzz33HP8/fff/Pbb\nbxQpUoQWLVoQFxeX+RtA3pxvs/uSoHQuEVIjaxbELNCwd8J02Z5ldoeSKykpquPGqZYvr7p6td3R\n+D5P/hurUqWKrlmz5sr00qVLtUaNGlemW7ZsqZ9//vmV6UmTJmmLFi2uTIuITpw4UatVq6Y33XST\nRkVFacWKFXXcuHFatmxZDQ8P10mTJl1Z/uLFi/rPf/5TK1eurOXKldOhQ4fqhQsXrrz+zjvvaHh4\nuFasWFG/+OILFRH9/fffr4k7JSVFK1eurGPHjr1mfr169fSVV15RVdVXX31V+/fvf+X1/fv3q4ho\nUlKSvvjii+rv76+BgYEaHBysTz/99DXbud7vzjE/3ZxqjuQ9XEY11+61u7Oo7yIGLRzEFz+791+8\n/HLunNX3zMyZsHEj3HtvxsubGrQrX2wPdZzCS0hIYPbs2dxxxx1XXsvKDUELFy5ky5YtxMTEoKoc\nP36c+Ph4jhw5whdffMGTTz7JmTNnABgxYgT79u1j+/bt7Nu3j8OHD/Pf//4XgBUrVjBu3DhWr17N\nnj17WL169XW3uXv3bg4ePEivXr1c5osIPXv2JDIyMsOYRYTRo0dz1113MXHiRM6ePcuECRMyXCer\nTJL3cndUuoNvB33LmA1jeHXdq151adzevdCsGQQFwYYNcOONdkdkgNXRmzuGnFBVunXrRsmSJQkN\nDWXNmjX861//ytZ7jBw5ktDQUIoUsbruLlSoEK+88gr+/v507NiR4OBgdu/ejary2Wef8d577xEa\nGkpwcDAjR47kq6++AmDOnDk88sgj1KlTh2LFivHaa69dd5snT54EIDw8/JrXypcvf+X1rHD337BJ\n8h4uKzXXGqVr8OPgH1m+bzmDFw0mMTkx7wPLpcWLrf7fhw2zHrgdGJi19XytBp1bedEequ4ZckJE\nWLhwIadPn+bSpUt8+OGHtGzZkr/++ivL71GpUiWX6dKlS+Pn1Ad1sWLFOHfuHCdOnCAhIYHGjRtT\nsmRJSpYsSceOHa8k5KNHj7q8V+XKla+7zbCwsCvrpHX06FHKpHeC6TrcXZc3Sd5HlA0qy7qB6ziR\ncILOszpz7vI5u0O6rg8+sHqPXLQIHnusYHYRbGROROjevTv+/v5s2LABgKCgIM6fP39lmWPHjqW7\nXlaEhYVRtGhRYmJiOH36NKdPnyYuLo74+HjAOir/888/ryzvPJ5WzZo1ueGGG5gzZ47L/JSUFObP\nn8+9jjpkUFAQCQkJ140/L068miTv4bJTcw0qHMTXfb6mYkhF2k9vT9zFrJ3Rzy8pKfD88/Dpp/DD\nD1apJrt8sQadG77YHqnlClW9clRfu3ZtABo0aMCCBQu4cOEC+/bty9Xlhn5+fgwZMoThw4dz4sQJ\nAA4fPsyqVasA6N27N5MnT2bnzp0kJCRkWK4REd59913eeOMNZs2axcWLFzl27BiPPvoo586d49ln\nnwWgYcOGfPvttxw8eJAzZ87w5ptvurxPuXLl+P3333P8mdL9nG59N8N2AX4BfNblM5pWaErrKa05\ncf6E3SEBcPmy1anYDz9Y9fcM/vM1CrjOnTsTEhJCiRIlePnll5k6deqVJP/ss89SuHBhypUrx6BB\ng+jfv7/L0W96R8IZHR2//fbbVKtWjWbNmlGiRAnatm3Lnj17AOjQoQPDhw+ndevW1KhRg3vvvTfD\n9+rduzfTpk3j/fffJywsjLp163Lp0iW+//57SpYsCUCbNm3o06cPt9xyC02bNqVz584u7/nMM88w\nb948SpUqxfDhw7PXcNdhnvHqo1SVV9a9wryd81g9YDUVi9vXJ0B8PPTsCcHB1lU0RYvaFoqB6bvG\nm+VJ3zWGdxIRXm/9OoMaDOKuSXfZ1i/9sWPWI/qqVYN580yCN4z8ZpK8h8ttzfXfzf/Nv5v/m5aT\nWxJzIsY9QWXRnj1w553WUfzHH1/tfyY3fLEGnRumPYzMZLU/ecOLDW0ylODCwdw79V6WPriURuGN\n8nybmzZBt27WE5weeSTPN2cYxnWYmnwB8s2ub/jHkn+wsO9Cmt2Qg0tbsmjJEiuxT5oE99+fZ5sx\ncsjU5L2XqckbGepWqxuTu06my6wufHfguzzZxuefw5AhVqI3Cd4w7GeSvIdzd821Y/WOzOw5k55z\nerJ2/1q3va8q/Pe/8OabsH493Hab297ahalBuzLtYWTGJPkCqM1NbZjbay595/Vl5b6Vma+QiaQk\n+Mc/YOFC6zr4GjXcEKRhGG5havIF2A8Hf6DbV934ossXdK7ZOUfvkZBg9SJ54QLMnw8hIW4O0nA7\nU5P3XqYmb2TLnZXuZOmDS3l08aMs2Lkg2+vHx0P79lC8uFWDNwnecCdVJSQkhNjYWLtD8WomyXu4\nvK65Nq3YlBUPreDJZU/y1a9fZXm9uDho1w7q14cpU6Bw4TwM0ompQbvypfZwfvRfarcGe/fupUqV\nKnaH5tWylORFxF9EtonIYsd0KRGJFJE9IrJKREKdlh0pIntFZJeItHOa31hEdjheG+/+j2LkVMPw\nhkQOiOTZlc9mKdGfOgVt21onVydOBD9zqGC4gfOj/86ePUt8fDzly5e3Oyyvl9U/z2eAGCC1GDQC\niFTVGsAaxzQiUgfoA9QBOgAfy9Xedz4BBqtqdaC6iHRwz0fwbfnVf3q9svWuJPrZv86+7nJ//209\nvenuu2H8+PzvJtj0J+/K19vDz8+PP/6wuuSIiIjgySefpFOnThQvXpxmzZpdeQ1g165dtG3bltKl\nS1OrVi3mzp1rV9geJdMkLyI3APcBnwOpf9JdgCmO8SlAN8d4V2CWqiaqaiywD7hdRMKBEFXd7Fhu\nqtM6hocSzms6AAAgAElEQVSoV7Yeq/qvYvjK4cz5bc41r584AffcY9Xh333X9ANvuF9mJ4Rnz57N\nqFGjOH36NNWqVeM///kPAOfPn6dt27b079+fEydO8NVXX/HEE0+wc+fO/Ajbo2WlW4P3geeB4k7z\nyqnqccf4caCcY7wCsNFpuUNARSDRMZ7qsGO+kYmoqKh8PVqrX64+K/uvpN20dghCr7rWMyuPH7eO\n4Lt3t66HtyvB53d7eLq8aA95zT2/XH01e1fwpD76LyDASktpP5eI0KNHD5o0aQLAQw89xHPPPQfA\nkiVLqFq1KgMHDgSsfud79OjB3LlzeeWVV3L5SbxbhkleRDoBf6nqNhFpld4yqtYT0t0ZVERExJWT\nLaGhoTRo0ODKLzz1RFNBmY6OjrZl+yv7r6T99Pb8tvk36hdvxUsvtaJfP7j77ijWry947eGp0zlt\nj4xkNzm7S+qj/1q3bn1lnvNj+8B6qEaqokWLcu6c9QS0AwcOsGnTpiv9tgMkJSXx8MMP53HU+S/1\ndxgVFZW1K49U9boDMAY4COwHjgLngWnALqC8Y5lwYJdjfAQwwmn9FcDtQHlgp9P8fsD/XWebaniG\n6KPRWubtclqhzVx94w27ozHcxVP/xqpUqaJr1qxxmSci+vvvv6uqakREhL700ktXXlu3bp3ecMMN\nqqo6a9Ysbdu2bf4Fa5Pr/e4c89PN4xnW5FX1RVWtpKpVgb7AWlUdACwCBjoWGwh84xhfBPQVkcIi\nUhWoDmxW1WNAvIjc7jgRO8BpHcNDldVbKbZgBfEtnqJ6t2tr9IaRnzSDev3999/Pnj17mD59OomJ\niSQmJrJlyxZ27dqVjxF6puxe/Jbaym8BbUVkD9DaMY2qxgBzsK7EWQ48oVd/M09gnbzdC+xT1RW5\njL1AsOs66KNHrZOsj3VpwIZ/rOS5lc/x4aYPbYnFmS9dF+4Ovt4eaR/tl/bxe6nTISEhrFq1iq++\n+oqKFSsSHh7OyJEjuXz5cr7G64lMtwYezo4TjakJ/uGH4cUXrXmxcbF0mN6BbrW6MebeMfiJPRfH\nmxOvrnLSHqZbA++Vk24NTJI3XKSX4FOdTDhJ51mdqVaqGl90+YLC/vl0m6vhVibJey/Td42RKxkl\neICwYmGseXgNZy6eofOszpy9dDb/gzQMI1tMkvdw+VVzPXIk4wSfqlihYizos4AbS9xIqymtOH7u\n+PUXzgO+XoPOLtMeRmZMkjfYtw9atIBBgzJO8KkC/AL4X6f/0bVmV+788k72/r0374M0DCNHTE2+\ngIuOhvvug9desx7bl12f//w5r6x7hcX9FtO4QmP3B2i4nanJey9z4tXIlu++g5494eOP4YEHcv4+\nC3ctZMjiIczoMYO2N7d1X4BGnjBJ3nuZE68+KK9qrkuWWAl+5szcJXiArrW6Mr/3fPp/3T9bfdLn\nhKlBuzLtYWQmKx2UGT5m2jR4/nkr0bvrgdt33XgXqwes5r6Z9/HX+b8Ydvsw97yxYRi5Yso1Bcz4\n8TBuHKxcCbVru//9D8QdoP309vSo3YPRrUdfc4eiYT9TroE///yTunXrEh8f71XfUVOuMa5L1eoi\neOJEqxafFwke4MbQG9nwyAbW7F/Do4seJSklKW82ZPisyZMnU79+fYKCgggPD+eJJ57gzJkzOX4/\nPz8/goODrzxWsFSpUlSuXJmzZ896VYLPKZPkPZw7aq6qMHIkzJ1rJfgbb8x9XBlJvWnqyLkjbr9p\nytSgXflae4wbN44RI0Ywbtw44uPj2bhxIwcOHKBt27YkJibm+H1/+eWXK48VPHXqVIbLpvbe6CtM\nkvdxKSnwzDOwejVERYFTd9x5KrhwMIv6LqJS8UrcPfluDscfzp8NG14rPj6eUaNG8dFHH9GuXTv8\n/f258cYbmTNnDrGxsUyfPh2AUaNG0bt3bwYOHEjx4sWpV68eW7duzda2YmNj8fPzIyUlBbD623/p\npZdo3rw5QUFB7N+/33ceJ3i9PojtGvDQvq69UVKS6uDBqnfeqRoXZ08MKSkp+uZ3b2ql9yrp9mPb\n7QnCcOGpf2PLly/XgIAATU5Ovua1gQMHar9+/VRV9dVXX9XAwEBdvny5pqSk6MiRI7VZs2bXfV8R\n0X379rnM279/v4rIlW21bNlSb7zxRo2JidHk5GSNi4vTG264QSdPnqzJycm6bds2DQsL05iYGDd+\n4uy73u+OnPYnb3ivxESri4I//rBOspYoYU8cIsKIFiN4p+07tJnahlW/r7InEMPjnTx5krCwsGue\nBgVQvnx5Tp48eWX6rrvuokOHDogI/fv3Z/v27Rm+d6NGjShZsiQlS5Zk+PDh17wuIkRERFC7dm38\n/PxYsWLFlccJ+vn5uTxO0NuYJO/hclJzvXwZ+vSB06dh6VIIDnZ/XNnVt15f5veez4CvB/Dlti9z\n/D6+VoPOrTxpDxH3DNkUFhbGyZMnr5RQnB09epQyZcpcmXZ+DGCxYsW4ePFiuuul2rZtG6dPn+b0\n6dN88MEH6S5TqVKlK+POjxNMHWbOnMnx4/nbV5M7mCTvY86dg27drPGvv4aiRe2Nx9ldN97FtxHf\nMvq70by09iVS9Pp/lIaNVN0zZNMdd9xBkSJFmD9/vsv8c+fOsWLFCu699153fcJ0OV9pU7lyZVq2\nbHllx3D69GnOnj3LxIkT8zSGvGCSvIfLzgMhDh60OhoLD4c5c6BIkbyLK6dqhtXkx8E/sv7Aeu6b\nYd04lR3mgSGufKk9SpQowauvvsrTTz/NypUrSUxMJDY2lt69e1OpUiUGDBiQp9tXpx1Tp06dfOZx\ngibJ+4gtW+COO6B/f/j8cwjw4HuZywaVZd3AdTQKb0Sj/zVifex6u0MyPMTzzz/PmDFj+Ne//kWJ\nEiVo1qwZN954I2vWrKFQoUJAxo8BTM/1XsvoPYKDg33mcYLmjlcPl5XHu82ZA089ZSX3Ll3yJy53\nWbFvBYMWDuLJpk8yssVI/P38M1zePP7PlXn8X8Hi9jteRSRQRDaJSLSIxIjIm475pUQkUkT2iMgq\nEQl1WmekiOwVkV0i0s5pfmMR2eF4bXyOP6VxhSq8/rrVD01kpPcleIAO1Trw05CfWPX7KjrM6JDv\nDyExDF+X6ZG8iBRT1QQRCQA2AP8CugAnVfUdEXkBKKmqI0SkDjATaApUBFYD1VVVRWQz8JSqbhaR\nZcAEVV2RzvbMkXwWXLwIgwdbD/xYuBDKl7c7otxJSkliVNQoJkVPYnr36dxT9R67Q/JZ5kjee+VJ\n3zWqmuAYLQz4A6exkvwUx/wpgON6DroCs1Q1UVVjgX3A7SISDoSo6mbHclOd1jGy6a+/rEf1JSVZ\nd7F6e4IH62lTb7R+g0ldJ/Hgggd5ff3r5uobw3CDTJO8iPiJSDRwHFinqr8B5VQ19f/q40DqRasV\ngENOqx/COqJPO/+wY76RibTXQcfEQLNm0LYtzJrlWZdIukO7m9ux9bGtRP4RSccZHTlx/oTL6+Y6\neVemPYzMZHoNhqqmAA1EpASwUkTuSfO6iohb//eLiIigSpUqAISGhtKgQYMrJ5dSv9QFZTo6OvrK\ndGQk9OoVxRNPwH//6xnx5dX02oFreWXdK9T5dx1evutlhvUddk17eFK8nvD9yM76hvdK/R1GRUUR\nGxub6fLZurpGRF4GLgCPAq1U9ZijFLNOVWuJyAgAVX3LsfwK4FXggGOZ2o75/YCWqjo0nW2Ymnw6\nPvsMXn7ZupLm7rvtjib/LNu7jEELB/Fss2f5d/N/4yfmqt/cMjV575UXV9eEpV45IyJFgbbANmAR\nMNCx2EDgG8f4IqCviBQWkapAdWCzqh4D4kXkdrEuRh3gtI6RgeRk6+qZsWOtboILUoIHuK/6fWwZ\nsoVFuxfReVZn/k742+6QDMOrZHgkLyL1sU6s+jmGaao6VkRKAXOAykAs0FtV4xzrvAg8AiQBz6jq\nSsf8xsBkoCiwTFXTfT6cOZK/6vx5aN8+ioCAVixYAKVK2R2RfRKTExmxegRzls1hy+gtlA/2gbPN\nbpDT6+QN75XdI3lzM5SHOnYMOnWC0qWjWLy4FYUL2x2RZxj0wSC2FN5CVEQUYcXC7A7HdubmsGsV\nxDYxSd7LxMTA/ffDI4/ASy/lqEM/n6WqjFwzklW/r2LNw2soWbSk3SEZhu1Mkvci69ZB377w7ruQ\nx/0xeS1VZfiK4Ww6vInIAZGEFAmxOyTDsJV5kLeXmD7d6gd+1qyrCd5c8uYqKioKEeGDDh9wa7lb\n6TSrEwmJCZmv6KPM9+Napk1cmSTvAVL7oHnpJetIvnVruyPyfCLCJ50+oUpoFbp+1ZWLSRftDskw\nPJIp19gsMRGGDoXoaFiyxOoL3si6pJQkHlrwEOcvn2dBnwUU9jdnqI2Cx5RrPNTly/DAA9aVNOvX\nmwSfEwF+AUzvPp0AvwC6ftWV+EvxdodkGB7FJHmbJCXBgw9apZqvv77+c1hNfdFVeu1RyL8Q83rP\no2poVe784k5i42LzPS67mO/HtUybuDJJ3gbJyfDww9bNTnPnYq6Bd4MAvwAm3jeRxxo/xp1f3MmP\nB3+0OyTD8AimJp/PUlKsfuAPHoTFi32vF0lPsHTPUiIWRjC+w3gerP+g3eEYRp4z18l7CFXrJOvO\nnbB8OQQF2R2R79pxfAedZ3UmokEEr7Z81dzKb/g0c+LVA6jCsGGwYwcsXZr1BG/qi66y2h71y9Vn\n06ObWLFvBQ8ueJALiRfyNjCbmO/HtUybuDJJPh+oWj1JbtxoHcGHmBs080W54HKsG7iOFE2h44yO\n5sobo0Ay5Zp88PLLVv197dqC3ZOkXZJTknl6+dNsOryJ5Q8tp2xQWbtDMgy3MuUaG40ZA/PnQ2Sk\nSfB28ffzZ+J9E7mv2n3cNeku/jzzp90hGUa+MUk+D33wAUyaBGvWQJkyOXsPU190ldP2EBFeb/06\njzd5nBZftmDXyV3uDcwm5vtxLdMmrjJ9xquRM//7n5Xkv/3W3MnqSYY3G07JwJLcM+UeFvdbTJMK\nTewOyTDylKnJ54GpU+E//4GoKLj5ZrujMdKzcNdChiwewuwHZnNP1XsyX8EwPJipyeejuXPhhRdg\n1SqT4D1Z11pdmf3AbPrM68OU6Cl2h2MYecYkeTdasgSeegpWrIDatd3znqa+6Mqd7XFP1XtYN3Ad\nYzaMYeiSoVxKuuS2984v5vtxLdMmrjJN8iJSSUTWichvIvKriAxzzC8lIpEiskdEVolIqNM6I0Vk\nr4jsEpF2TvMbi8gOx2vj8+Yj2WPdOutxfYsXw6232h2NkVV1y9Zly5AtnEw4SYtJLTgQd8DukAzD\nrTKtyYtIeaC8qkaLSDCwFegGDAJOquo7IvICUFJVR4hIHWAm0BSoCKwGqquqishm4ClV3Swiy4AJ\nqroizfa8ria/fTu0bQuzZ8M9przrlVSV9358j7E/jGVKtym0r9be7pAMI8tyVZNX1WOqGu0YPwfs\nxEreXYDUYuYUrMQP0BWYpaqJqhoL7ANuF5FwIERVNzuWm+q0jtc6cMB66PZHH5kE781EhH/e+U9m\nPzCbRxY9wn/X/5cUTbE7LMPItWzV5EWkCtAQ2ASUU9XjjpeOA+Uc4xWAQ06rHcLaKaSdf9gx32v9\n/Te0bw///jf07p032zD1RVd53R4tq7Rky5AtRP4RSedZnT2+KwTz/biWaRNXWb5O3lGqmQ88o6pn\nnXv1c5Ri3FZjiYiIoEqVKgCEhobSoEEDWrVqBVz9Bdo9fdttrejUCRo2jOKWWwDyZnvR0dEe8Xk9\nZTq/2mPtw2sZtnwYDUc25K1736LX/b084vOb70fm09HR0R4VT15Mp47HxsaSmSxdJy8ihYAlwHJV\n/cAxbxfQSlWPOUox61S1loiMAFDVtxzLrQBeBQ44lqntmN8PaKmqQ9Nsy+Nr8klJ0KMHhIbC5Mng\nZ65R8kmqytvfv80nP33CsgeXUbdsXbtDMox05aomL9Yh+xdATGqCd1gEDHSMDwS+cZrfV0QKi0hV\noDqwWVWPAfEicrvjPQc4reM1VOHxx63ns37xhUnwvkxEGNFiBKNbj6b11NZExUbZHZJhZFtWUlRz\noD9wj4hscwwdgLeAtiKyB2jtmEZVY4A5QAywHHjC6dD8CeBzYC+wL+2VNd5g1CjYtg3mzYNChfJ+\ne87/nhn2tEf/W/ozq+cses/tzawds/J9+xkx349rmTZxlWlNXlU3cP2dQZvrrDMGGJPO/K1A/ewE\n6EnmzLG6LNi48foP3jZ8U+uqrVnz8Brun3k/h+IP8a87/2WeNmV4BdN3TRbFxEDLllZ3BQ0b2h2N\nYZdD8Ye4b8Z9NLuhGW+1eYtSRU3/0Yb9TN81uRQfb51oHTvWJPiC7obiN/DdoO8AqPlRTd7e8DYJ\niQk2R2UY12eSfCZUISLCutEpIiL/t2/qi648oT1KBJbg086fsmHQBrYc2UKND2vw2dbPSEpJyvdY\nPKE9PI1pE1cmyWdi7Fg4fNjqG94wnNUMq8m83vNY0GcBs36dRb2P6zE/Zj6eWG40Ci5Tk8/A2rXw\n0EOweTNUqmR3NIYnU1VW/b6KEWtGEFQoiE87f0qdMnXsDssoIExNPgcOHrQS/IwZJsEbmRMR2ldr\nz9bHtvJQ/YdoObklr69/ncvJl+0OzSjgTJJPx6VL0KsXPPsstG5tbyymvujK09vDT/x4vOnj/PzY\nz2w+spkmnzZh8+HNma+YQ57eHnYwbeLKJPk0VOHpp6FCBXj+ebujMbxVpRKVWNR3ES/e9SJdZnXh\nuZXPcf7yebvDMgogU5N3omr1KLlunVWPL17cljAMH3My4STPrnyW7//8no/u+4iO1TqaG6kMt8qo\nJm+SvIOq9fDt5cthzRooZe5xMdxsxb4VDF8xnEolKvFu23e5tbx5hJjhHubEaxaMGmU9ozUy0rMS\nvKkvuvLm9uhQrQM7Ht9Bj1o9aD+9PY8sfITD8Ydz9Z7e3B55xbSJK5PkgTfesDocW70awsLsjsbw\nZYX8C/F408fZ/dRuygeX55b/u4VX1r3C2Utn7Q7N8FEFvlzz9tswaRJERUH58vm2WcMA4M8zf/LS\n2pdY/cdqXrzrRR5t9CiBAYF2h2V4GVOTv4733oNPPrESfEWvfhCh4e1+Pvozr61/ja1HtvJC8xcY\n0niISfZGlpmafDomTrQevr12rWcneFNfdOWr7dEovBEL+y5kYd+FrN6/mpsn3MyETRO4kHghw/V8\ntT1yw7SJqwKZ5OfNgzfftBK8uZvV8CSNKzRmYd+FLO63mLX713LzhJsZv3F8psneMK6nwJVrvvsO\neva0+oVv0CDPNmMYbrHt6DZeW/8aW45sYUTzEaaMY6TL1OQdYmKsLoOnT4e2bfNkE4aRJ7Ye2cqo\n9aOIPhbNyBYjGdxwMEUCitgdluEhcvsg7y9F5LiI7HCaV0pEIkVkj4isEpFQp9dGisheEdklIu2c\n5jcWkR2O18bn9kNl15EjcN998O673pXgTX3RVUFtj8YVGrO432Lm957Pkj1LqPFRDT7d+imRayLt\nDs3jFNTvyPVkpSY/CeiQZt4IIFJVawBrHNOISB2gD1DHsc7HcvX+7U+AwapaHajueBh4voiPh44d\nYehQGDAgv7ZqGO53W8XbWPbQMmY/MJv5O+cz4OsBTNw80dTsjevKUrlGRKoAi1W1vmN6F9BSVY+L\nSHkgSlVrichIIEVV33YstwIYBRwA1qpqbcf8vkArVR2azrbcWq65fNk6gq9Z07qaxnQZYviSjYc2\n8taGt9h4aCPDbh/GE02fIDQwNPMVDZ+SF5dQllPV447x40A5x3gF4JDTcoeAiunMP+yYn6dSUuCR\nRyAkBCZMMAne8D3NbmjGN32/Ye3Atez+ezc3T7iZFyJf4OjZo3aHZniIXF9C6Tjs9qyzt1gdjr3w\nAvzxB8ycCf7+dkeUM6a+6Mq0h6vU9qhTpg5Tuk3h58d+5mLSRep+XJfBCwcTFRtFckqyvUHmM/Md\ncRWQw/WOi0h5VT0mIuHAX475hwHnK89vwDqCP+wYd55/3Z6ZIiIiqFKlCgChoaE0aNCAVq1aAVd/\ngZlN//BDK1asgNGjo9i0KfPlPXU6Ojrao+Kxe9q0h+t02vbYH72f7kW78/LTL/Plti95dMKjxF2M\nY0CXAfSr34/ze84jIh4Tf15MR0dHe1Q8eTGdOh4bG0tmclqTfwf4W1XfFpERQKiqjnCceJ0J3IZV\njlkNVFNVFZFNwDBgM7AUmKCqK9LZVq5r8hMnwvvvW9fEh4fn6q0Mw+vtOrmLWTtmMevXWSRrMn3r\n9qX/Lf2pXaa23aEZbpKr6+RFZBbQEgjDqr+/AiwE5gCVgVigt6rGOZZ/EXgESAKeUdWVjvmNgclA\nUWCZqg67zvZyleSnT4eRI+Hbb6Fq1Ry/jWH4HFVl27FtzNoxixk7ZnBzqZv5R+N/0LN2T4oWKmp3\neEYuFJiboRYutC6TXLsWavvIQUpUVNSVf9UM0x5p5bQ9EpMTWbJnCf/b+j+2Ht1K//r9eazxYz5x\ndF8QvyMFooOyNWtgyBDrwR++kuANI68U8i9E99rdWdF/BZsf3UyxQsVoPbU1d0+6m9m/ziYpJcnu\nEA038Ykj+Y0boUsXq+Oxu+/Oo8AMw8clJieyeM9iPtj4AX+e+ZNnmz3LIw0fIaRIiN2hGZnw6XLN\njh3Qpo314I/77svDwAyjANl0aBPjfhzH2v1rGdJoCE/f/jQVQirYHZZxHT5brtm3Dzp0gPHjfTfB\nO18yZZj2SCuv2uP2G25nTq85bB6ymfOJ56n3cT0GLRzE9mPb82R77mS+I668NskfPmx1NPbqq9C3\nr93RGIZvuqnkTUzoOIF9w/ZRvVR17p95P3dPupu5v80lMTnR7vCMLPDKcs3Jk1btPSIC/v3v/InL\nMAyrbv/Nrm/4aMtH/H7qd4Y2GcpjjR+jbFBZu0Mr0HyqJh8fD/fea9Xh33wzHwMzDMPF9mPb+Wjz\nR8zbOY9ONTrRr14/WldtbR5qYgOfqclfuABdu0KTJjBmjN3R5A9TX3Rl2sOVne1xa/lb+azLZ/w+\n7HcahzfmrQ1vUe7dcvSc05Mp0VM4mXDSlrjMd8SV1yT5xETo08fqpmDiRNOjpGF4ilJFSzG82XC+\nHfQtvw/7na41u7JozyJunnAzd0+6m3E/jONQ/KHM38jIE15RrklJgYcfhrg4+PprKFTIpuAMw8iy\ni0kXWbd/HQt2LmDBrgXcWu5W+t/Sn561e1IisITd4fkUr67Jq8KwYfDLL7BiBRQ1XWwYhte5mHSR\nZXuXMf2X6azZv4b2N7dnwC0DaF+tPYX9C9sdntfz6pr866/Dhg2waFHBTPCmvujKtIcrb2mPwIBA\netTuwYI+C9j/zH7a3NSGsT+MJXxcOAO/GcjCXQvd9ghDb2mT/OLRSX7iRJg2zTqCL2H+uzMMn1Cq\naCkea/wY3w76lu1Dt9O0QlPGbxpP+XHleWDOA8z4ZQZxF+PsDtNneGy5ZtYseP55q09402WwYfi+\nkwknWbx7MQt2LWB97HoaV2hM0wpNaVqhKU0qNKFKaBXEXHGRLq+ryS9frgwcaPUsWa+e3REZhpHf\nzl46y4Y/N7D16FZ+OvITW45s4VLSJZpUaEKTCk1oFN6IhuUbmsTv4HVJPixMWbgQ7rzT7mjsVxD7\nxs6IaQ9XBak9jpw9wtYjW9lyZAvbjm0j+lg0Zy+d5dbyt9KgXAMalLeGv377i/Zt2tsdbr7KKMnn\n9BmveWrqVJPgDcNwVSGkAhVqVqBzzc5X5p1MOMn2Y9uJPhbN2ti1jPtxHHu27qHCrxWoGVaTGqVq\nUDOsJjVL16RG6RpUKlEJP/HoU5Fu55FH8p4Wk2EY3iMpJYkDcQfY/fdudp/cze6/d7Pn7z3sOrmL\ns5fPUjusNnXL1qVuGWuoU6YOlUtU9uqyj9eVa2yPSdXqQ+HsWWs4d+7qz4sXoWxZqFgRKlSAwuYa\nX8PwFqcvnGbnyZ389tdv/HbCGmJOxHD20llql6l9JfHXLWsl/0rFK3lF8veoJC8iHYAPAH/gc1V9\nO83rqiVLQrFi1oXxqT+dx9POK1wY/Pysvg6cBz8/SEiAU6fg779dh1OnrISteu0AEBgIISHWEBx8\ndbxIEfjrL6uv46NHITTUSvgVK0K5clYsAQFXh0KFrJ/+/tbgPJ46BAdDmTKuQ1AQiGSt5pq6Uzp/\n3vq8Fy5Ynz+9bYG1s3LecaXuzK7XHqpWO5ct6zqEhOR7/xLptkdiovU7TUqyrrUNDs7buFJSrG15\nwA6+INXksyonbXL6wmliTsRYid9pB3D+8nnqla1Ho/BGNA5vTJMKTahdpjYBfp5V6faYmryI+AMf\nAW2Aw8AWEVmkqjtdFtyzx0pUFy5cTVqpP9Mbv3zZ+qNLSbk2OQUGQs2aULq0NZQqdXU8MPDaHYPz\nkJmUlKsJ//BhOH7ciiMpyUo8qeOpw+XLkJxsjScnXx0/dw5OnHAdVKFMGaITEmhVokT6ide5LYoU\nsXYMQUHW51K9dlvJyVbcwcFXd1zOO7CM2iMhwfqsqcOJE9bnKVPGSnbOnyd1PCXF2tEULnx1KFLk\n6s9ixawhKMj1Z2DgtW3naM/on36iVdmyVlI/edIaEhKs36u/P5w5Y+2sQkKshF+iBBQvbn3uCxes\n1y5evDp+6ZK1zdBQayhR4up4UJD1fs4HBn//bfWvIWJ1pFSjhvX9Sh1q1LDmJyZeHS5fvjoeGAhh\nYdY23SA6Otok+TRy0iYli5akeeXmNK/c3GX+qQun2HF8B1uPbmX1/tW89f1bHI4/TP1y9Wkc3pgG\n5RtQK6wWtcJqEVYszI2fwn3ye3d0G7BPVWMBROQroCvgmuTDPLOxruHnB+XLW0Pjxu5974QEOHGC\nuHffhWeeST/xpv4nU6yYFUt+u3DharK/3n8pqTu3S5esn6lDaqJN/e/j/Pmr4xcvWv8BFSvm+l9R\nQAW8QmAAAAX/SURBVABxx47Bo49a35HUoXhx18+flGT1SR0fbyXpM2es1wMDrTYLDLw6Xriwtc24\nOGu5uLir42fPWkk/9aAgdShZ0nq/Awdg925riImBb76xxv/6y4q/UCHr/VPHCxWy2uHECStO589Q\nurS1U0ndCTrvEAsXvvrfQ5qDh7gNG6yDIucdeeogYsVaqtTVn6lDYKAVS+qQusO7dMnalvPv0Pn3\nmt5/zKn/NQcGpr/TLlw4X//ji4tz341UpYqWomWVlrSs0vLKvPhL8Ww7uo2tR7fy3Z/f8fnPn7Pr\n5C4C/AKoGVaTWqWtpF+1ZFXCg8MJDwknPDicooXsuWU/v5N8ReCg0/Qh4PZ8jsE7FCsGN95o/fFX\nq2Z3NOkrWhQqV87fbR4/Dp06ZbxMQMDVZJYVwcFW+Sm7brrJGjp2zP66CQlX/xNx/o8kvR3i2bNX\nk21AgJVMU8dLl4a6da/duQYEWIk+Ls76D2T/fti61RpPLVUWKWINgYFXx4sUsRJ22h1G6n9o6f23\nrGrNv3jR+gypO+3Un8nJV3cQzkNqnKn/gQYFWb+L1J9Fi15/p1eo0LWfOXXYvh2mT3fdTur49cqR\ncM0BxZXB39/6fI6heEoKLVNSaJlSBwrXgvIpaJlkziSc5vCZgxzZdYgjcev4/fI8NqTEczD5NAeT\nT5FYtAghpcpTvFQ4IcXDCAosQfGgkpQoUoLiRYpTItD6Wdi/MP7ij7+f/zU/hezvLPM7yXvWWV4v\nEBsba3cIHsVn2qNYMWsHmcudZOwvv8CTT7opqDySWkp1HlJ3GImJ1s7g3Dlrh+D8MyHBej3tTu/8\neWt+ev+9JCdbbRIU5Lqd1PGMSrOpO7O0Q3LytTuo1EEE/P0RPz9CHUNdPz/wC4bEInC+BJwriZ4v\nR8rZeFLOnUYSDiOXLuOflIyKkOwvpPj7kewnJPlDYoAflwv5kRggXA7w43Ih4XKANSigIqg4kqlk\nnlTz9cSriDQDRqlqB8f0SCDF+eSriJgdgWEYRjZ5xNU1IhIA7AbuBY4Am4F+15x4NQzDMNwiX8s1\nqpokIk8BK7EuofzCJHjDMIy843E3QxmGYRju4zGdOIhIBxHZJSJ7ReQFu+PJbyLypYgcF5EdTvNK\niUikiOwRkVUiEmpnjPlNRCqJyDoR+U1EfhWRYY75BbJdRCRQRDaJSLSIxIjIm475BbI9UomIv4hs\nE5HFjukC3R5peUSSd7pJqgNQB+gnIrXtjSrfTcL6/M5GAJGqWgNY45guSBKBZ1W1LtAMeNLxvSiQ\n7aKqF4F7VLUBcAtwj4i0oIC2h5NngBiuXmhS0NvDhUckeZxuklLVRCD1JqkCQ1W/4//buWPXKII4\niuPfFzAgWEkgShKJhaWF2FkYFCwUOazURvInWIiFFrYWFvoHmCJcERCEGLBVsBOEBEFbbQQvTRDF\nMs9iV3PGizbxdtl5HziYm7li7hU/7mZnBrZ2dfeA5bq9DFwZ66QaZvuz7Y26/Y3q0NwMBedi+3vd\nnKR6rrVFwXlImgUuAY/h1ybyYvMYpS1FftQhqZmG5tIm07YHdXsATDc5mSZJmgdOAa8pOBdJE5I2\nqL73S9vvKDgP4CFwG9ge6is5jz+0pcjn6e8/1FdzFpmTpEPAU+Cm7a/DY6XlYnu7Xq6ZBc5KOrdr\nvJg8JF0GNm2vw+ijoCXlsZe2FPlPwNzQ+zmqX/OlG0g6AiDpKLDZ8HzGTtIBqgLft71adxefi+0v\nwHPgNOXmcQboSfoArADnJfUpN4+R2lLk3wAnJM1LmgSuAWsNz6kN1oDFur0IrP7ls52j6iLvJeC9\n7UdDQ0XmImnq504RSQeBC8A6heZh+67tOdvHgevAC9s3KDSPvbRmn7yki+zcM79k+37DUxorSSvA\nAjBFtY54D3gGPAGOAR+Bq7b374q9lqt3jrwC3rLzl/sO1Unp4nKRdJLqQeJE/erbfiDpMAXmMUzS\nAnDLdi95/K41RT4iIvZfW5ZrIiLiP0iRj4josBT5iIgOS5GPiOiwFPmIiA5LkY+I6LAU+YiIDkuR\nj4josB9MSBmuscOXqgAAAABJRU5ErkJggg==\n",
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYAAAAD8CAYAAAB+UHOxAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzs3Xd4FNX6wPHv2ZJeIKEFCIQSeid0VHoXBEER6ShwFfXa\n9Zaf5Vqv96pXVBABKSJVEVQUAQVFeu8l9NACCaS33T2/P2YSgwJJIMmmvJ/nmWd2z87Mvpsy7845\nZ85RWmuEEEKUPhZ3ByCEEMI9JAEIIUQpJQlACCFKKUkAQghRSkkCEEKIUkoSgBBClFKSAIQQopSS\nBCCEEKWUJAAhhCilbO4O4GbKlSunw8LC3B2GEEIUK9u3b7+stS6f03ZFOgGEhYWxbds2d4chhBDF\nilLqVG62kyogIYQopSQBCCFEKSUJQAghSilJAEIIUUpJAhBCiFIqVwlAKfWkUmq/UmqfUmq+UspL\nKVVDKbVZKXVUKbVQKeVhbutpPo80Xw/LdpwXzfLDSqmeBfORhBBC5EaOCUApVQV4HIjQWjcCrMBQ\n4G3gPa11OHAFGGfuMg64orWuDbxnbodSqoG5X0OgF/CxUsqavx9HCCFEbuX2PgAb4K2UygB8gPNA\nF2CY+fps4GVgCjDAfAywBPhQKaXM8gVa6zTghFIqEmgNbLz9jyGEEMWT06VJSM0gMc1BcrrTWKc5\nSUp3kJTmIM3hwunSOF0ah0vjdLlwuDQul8alQWvQaHONUZBLOSYArfVZpdR/gNNACvAjsB24qrV2\nmJtFAVXMx1WAM+a+DqVUHBBslm/Kdujs+wghRLGmtSYuJYPohDTiUzKIS8kgPjWDuOQM4lMdxKVk\ncDU5g6vJ6VxJTudKcgZXktOJS8nIyzk7X+WYAJRSZTG+vdcArgKLgd7X2TTzI6gbvHaj8j++33hg\nPEC1atVyCk8IIQqF1poL8akcv5TE8ctJnLuawoW4VM7HGesL8amkZrhuuL+Ph5Uy3nbK+HgQ5OtB\n5TLelPXxoKyPnUAfD/w9bfh4WvH1tOHrYcPHw4qfpw0PmwWbVWGzWLBaFDaLwmpRWJTCokAphQKU\n+RhAvZ27z5SbKqBuwAmt9SUApdRXQHugjFLKZl4FVAXOmdtHAaFAlFLKBgQCsdnKM2XfJ4vWehow\nDSAiIsJNeVEIUVpdTU7nVEwyJ2OSOHk5meOXEzl2KZETl5JISndmbWe3KioGeBES6EXjqmXoHuBJ\npUBvKvh7UsbHToCXnUBvOwHedvy9bNitRa/TZW4SwGmgrVLKB6MKqCuwDfgZGAwsAEYBy8ztl5vP\nN5qv/6S11kqp5cAXSql3gcpAOLAlHz+LEELkitaaqCsp7D8Xx4Fz8ZyISeZ0TBInY5KJS8nI2k4p\nqBzoTa0KfkRUD6JWeV9qlfejZnk/Kvh7YrFcr2Kj+MhNG8BmpdQSYAfgAHZifEP/DliglHrNLJth\n7jIDmGs28sZi9PxBa71fKbUIOGAe51GttRMhhChgZ6+msP3UFfafjWPfuTj2nY3POtFbFFQt60P1\nYB/ubhpC9SBfqgf7UD3Yl2pBPnh7lNzOikq7q/UhFyIiIrSMBiqEyKvUDCdbTsSy7sgl1h25RGR0\nIgAeVgt1K/nTqEoADSsH0qhKIPUq+eNlL1kneaXUdq11RE7bFenhoIUQIjdcLs3hiwlsPBbDL0cv\nsel4DKkZLjxsFtrUCGJoq1Da1QomvII/HraiVxfvLkU7AaQlujsCIUQRlO5wsfdsHFtPxrLlRCzb\nTsYSn2r0Sq9Z3pehrapxV93ytK0RXKKrcG5X0U4AMZGwewE0HeruSIQQbpKa4eTwhQQOno83lwT2\nnL2a1eWyZnlf+jYJoVVYEK1rBFG1rI+bIy4+inYC8PSDpRNBu6DZsJy3F0IUa1prTsYks/7oJbac\nvMKBc3GcuJyEy2yq9PGwUq+SP0NbVaNNjSAiwoIo7+/p3qCLsaKdAIJqQs1Q+PoR4/bm5g+6OyIh\nRD67kpTOhmMxrI+8xC9HLnP2agoAIYFeNKwcSN/GIdQPCaB+SADVgnyKfdfLoqRoJwBlgQcWwPwH\nYNmjgIbmw90dlRDiNmQ4Xew8fZVfjlzi16OX2HM2Dq3B39NGu1rBTLyrJh3DyxMW7JN1Z6soGEU7\nAQDYveGB+bBgGCybZFQHtRjp7qiEEHlwJjaZdUcu8cuRS2w8FkNCmgOrRdE8tAx/7VqHjuHlaFo1\nEFsRvFu2JCv6CQCMJDB0Pix8EJY/ZiSBlqPdHZUQ4ga01uw/F8/K/Rf4Yd8Fjpr98KuU8aZf08rc\nVacc7WqVI9Db7uZIS7fikQAA7F5w/zxYOBy+ecJIAhFj3R2VEMLkdGm2n7qSddI/ezUFi4LWNYJ4\noHUD7qpbnprlfKVapwgpPgkAzCTwOSwaCd8+CS4ntH7Y3VEJUapdjE9l3ubTLNhymuiENDysFu4I\nL8cTXcPpWr8CwX7SS6eoKl4JAMwkMBcWj4EVz4AzA9o94u6ohChVtNZsO3WFWRtOsnLfBZxa07lu\nBQY2r0KnuuXx95KqneKg+CUAAJsnDJkFX46DlS+CKwM6POHuqIQo8VLSnSzffZZZG05x8Hw8AV42\nRrcPY0S76lQP9nV3eCKPimcCALB5wODPYOl4WPV/4EyHO591d1RClDgul2bLyVi+2hHFir0XSExz\nULeiP28MbMw9zSvj41F8TyOlXfH+zVltMHAaWGzw02vgdECnF4xBvIUQt+X4pUSW7jzLVzvOcvZq\nCr4eVno3DmFwy6q0qREkjbklQPFOAGAkgXumgMUO694yrgS6/p8kASFuQUxiGt/tPc/XO8+y4/RV\nLAo61C7Hsz3r0qNhRfm2X8KUjN+mxQr9JxvJYP274F8J2kxwd1RCFAvJ6Q5WHbjI1zvP8uvRyzhc\nmroV/Xmxdz0GNKtCpUAvd4coCkhuJoWvCyzMVlQT+D9gjlkeBpwE7tNaX1HGdeH/gD5AMjBaa73D\nPNYo4B/mcV7TWs/On48BWCzQ9z1IjIaVf4OKjSCsQ74dXoiSJM3hZENkDMt3n2Pl/gskpzsJCfRi\n3B01uKdZFeqHBLg7RFEI8jQjmFLKCpwF2gCPArFa67eUUi8AZbXWzyul+gCPYSSANsD/tNZtlFJB\nGHMJRwAa2A601FpfudH73dKMYKlx8GkXYz1+HQRWydv+QpRQKelO1h2J5od9F1hzMJqENAcBXjb6\nNglhQLMqtA4LkoHWSoiCmhGsK3BMa31KKTUA6GSWzwbWAs8DA4A52sgsm5RSZZRSIea2q7TWsWaA\nq4BewPw8xnBzXoEw9AsjCSwaAWO+N7qNClEKpaQ7+fHABb7fe4G1R6JJzXBR1sdO78aV6NWoEh1q\nl8PTJhOmlFZ5TQBD+f2EXVFrfR5Aa31eKVXBLK8CnMm2T5RZdqPy/Fe+Lgycagwb8d3TRvuANAqL\nUiQyOoHPN53myx1RJKQ6qODvyZCWofRqVIk2NYJk0DUB5CEBKKU8gP7Aizltep0yfZPyP77PeGA8\nQLVq1XIb3p/Vv9u4L+CXd6BKCxk3SJR46Q4XK/dfYN7mU2w6HovdqujdKIQHWhuTp0j1jvijvFwB\n9AZ2aK0vms8vKqVCzG//IUC0WR4FhGbbrypwzizv9IfytX98E631NGAaGG0AeYjvzzq9COd2wYrn\noEJDqNbmtg4nRFF0Pi6FeZtOs2DrGS4nphEa5M3zveoxJKIq5WQcHnETeUkAD3Btff1yYBTwlrle\nlq18klJqAUYjcJyZJFYCbyilyprb9SDnq4nbY7HCvZ/CtM5Ge8CEX4wuokIUc1prtp68wqwNJ1i5\n/yJaa7rUq8CDbatzV3h5+bYvciVXCUAp5QN0B7J3rn8LWKSUGgecBoaY5SswegBFYnQDHQOgtY5V\nSv0L2Gpu92pmg3CB8i5rNApP72ZMKjPqG/CQMUtE8ZSa4WTZrt/H4gn0tvNQxxoMb1ud0CCZDF3k\nTZ66gRa2W+oGeiOHvjMahWt3MxKCVUYrFMXHlaR0PttwkjkbT3I1OYN6lfwZ1T6Me5pVwdtDevGI\naxVUN9Diq15f6PsufPtXWP443POx9AwSRV50Qiozfj3B55tOkZTupHuDioztUIO2NWUsHnH7Sk8C\nAIgYY9wpvPYN8KsA3V9xd0RCXNe5qylM++U487ecJsPpol+TyjzauTZ1K/m7OzRRgpSuBABw13OQ\neBF+ex/8KspkMqJISUjN4K3vD7Fo2xm0hkEtqvCXTrWpUU7arUT+K30JQCno8w4kXTImk/GrAI0H\nuzsqIdh15iqPzd/BuaupPNA6lIl31aJqWWnYFQWn9CUAMLqHDvoUPo+FpRPBJwhqdXF3VKKUcrk0\nn/56nHdWHqZigBeLJrSlZfUgd4clSoHSez+43QuGzjOGjVgwHM5ud3dEohS6lJDG6FlbefP7Q3Rv\nUJEVj98hJ39RaEpvAgDwLgMPLgHfcjB3EFzY5+6IRCny69FL9P7fr2w+HsNr9zTi4wdbEOgj3ZNF\n4SndCQAgIARGLQe7D8y9By5HujsiUcLFpWTw8vL9jJixhbI+dpZP6sjwttWlW6codJIAAMqGwchl\noDXM6Q9XTrk7IlECuVyahVtP0+U/a5m98SQj21Vn+aSO0rVTuI0kgEzl68DIryE90UgC8efdHZEo\nQXaevsLAj3/j+S/3UqOcL99M6sirAxrJXbzCrSQBZFepMQz/CpIuG9VBSZfdHZEo5i4lpPHM4t0M\n/HgD5+NSef/+Ziye2I5GVQLdHZoQpbQb6M1UjYBhC+Hze2HuQGPwOO8y7o5KFDMZThdzNp7i/VVH\nSHU4mXhXLSZ1qY2fp/zLiaJD/hqvJ6wj3D8P5g+FeYONqwIvmSRb5M6GyMu8/M1+jlxM5K465Xnp\n7gbULO/n7rCE+BOpArqR8G4w5DM4txM+H2RMMi/ETZy7msKj83YwbPpmUjKcfDoyglljWsnJXxRZ\ncgVwM/XvhiGzYfFoozpo+FdSHST+JM3hZPqvJ/jwp0hcWvNU9zqMv7MmXnZp4BVFmySAnNTvB/fP\nhYUjjIbhEUuNSWaEANYducRLy/ZxMiaZXg0r8fe+9WViFlFs5KoKSClVRim1RCl1SCl1UCnVTikV\npJRapZQ6aq7LmtsqpdQHSqlIpdQepVSLbMcZZW5/VCk1qqA+VL6r2xvu/xwu7oc5AyC54CcyE0Xb\n+bgUHpm3nVEzt6CUYs7Y1kwd0VJO/qJYyW0bwP+AH7TW9YCmwEHgBWCN1jocWGM+B2Py+HBzGQ9M\nAVBKBQEvYcwT3Bp4Kdv8wEVf3V5Gw3D0IeM+AUkCpVKG08W0X47R9b/rWHMwmmd61OGHv97BnXXK\nuzs0IfIsxwSglAoA7gRmAGit07XWV4EBwGxzs9nAPebjAcAcbdgElFFKhQA9gVVa61it9RVgFdAr\nXz9NQavTAx74Ai4dgdn9ISnG3RGJQrT5eAx9P/iVN1Ycon2tYFY/dReTuoTjaZO6flE85eYKoCZw\nCfhMKbVTKTVdKeULVNRanwcw1xXM7asAZ7LtH2WW3aj8Gkqp8UqpbUqpbZcuXcrzBypwtbvBsAUQ\ncxQ+6w1xZ90dkShgkdEJTJy7nfunbSIpzejdM31UK6nuEcVebhKADWgBTNFaNweS+L2653quN6KV\nvkn5tQVaT9NaR2itI8qXL6KX1bW6GD2CEs7DzJ4ygFwJFXUlmWcW76bHe7+wPvIyT3arw+qn7qJ7\ng4ruDk2IfJGbBBAFRGmtN5vPl2AkhItm1Q7mOjrb9qHZ9q8KnLtJefEU1gFGfwsZKUYSOLfL3RGJ\nfHI5MY1XvtlPl/+sY/nuc4zrWINfnuvME93CZeweUaLkmAC01heAM0qpumZRV+AAsBzI7MkzClhm\nPl4OjDR7A7UF4swqopVAD6VUWbPxt4dZVnyFNIWxK8HuDbPvhpO/uTsicRsynC4mrznKXf/+mdkb\nTjKoRRXWPtOJv/dtQJCvh7vDEyLf5fY+gMeAeUopD+A4MAYjeSxSSo0DTgNDzG1XAH2ASCDZ3Bat\ndaxS6l/AVnO7V7XWxb8rTbnaRhKYO9C4Y3jILKPbqChWTlxO4q8Ld7H7zFV6N6rEMz3rUkvu4BUl\nnNL6T9XwRUZERITetm2bu8PInaQYY9yg87vhno+h6VB3RyRyQWvNwq1nePXbA9itFt4Y2Ji+TULc\nHZYQt0UptV1rHZHTdnIncH7xDTZmFlswDJZOMNoGIsa4OypxE7FJ6bzw5R5+PHCRDrWD+c+QpoQE\ners7LCEKjSSA/OTpD8MWw6IR8O1fweWA1g+7OypxHWsPR/Pskj3EJWfwj771GduhBhaLTMkoShdJ\nAPnN7mUMG7FoFKx4BlxOaDvR3VEJU3K6g7e+P8ScjaeoW9GfOWNbUz9EhvoWpZMkgIJg84T75sCS\nMfDD88aVQPtJ7o6q1Nt+6gpPL9rFyZhkxnaowXO96sqInaJUkwRQUGweRo+gL8fBj383kkDHv7o7\nqlIp3eHif2uOMGXtMUICvZn/cFva1Qp2d1hCuJ0kgIJktcO9M8EyHla/BK4MuPNZd0dVqhw8H89T\ni3Zz8Hw890VU5Z/9GuDvZXd3WEIUCZIACprVBgOngcUGP71mtAl0utlIGiI/OJwuPv31BO+tOkKA\nt43pIyPoJkM4CHENSQCFwWqDe6YYSWDtm0Z1UOe/g5JeJwXh8IUEnluym91RcfRqWInXBzYi2M/T\n3WEJUeRIAigsFiv0/9BY//IOODOg28uSBPJRhtPFlLXHmPzTUfy97Hw4rDl9G4eg5GcsxHVJAihM\nFgv0+59xJfDb+8aVQI/XJAnkg31n43h2yR4Ono+nf9PKvHR3A/nWL0QOJAEUNosF+r5rJIGNHxpt\nAr3elCRwi1IznEz+6ShT1x0n2NeDaSNa0qNhJXeHJUSxIAnAHZSC3v8Gix02fWT0Dur9jpEcRK5t\nORHLC1/t4filJO6LqMrf+zQg0Ed6+AiRW5IA3EUp6Pm60Saw4QOjOqjve5IEciEuJYO3vj/E/C2n\nqVrWmzljW8ucvELcAkkA7qQUdH/VuF/g1/9CWiIMnGo8F9f1w77z/N+y/VxOTOPhO2rwZPc6+HjI\nn7EQt0L+c9xNKejyT/DwgzWvQFo8DJkNHjLfbHYX4lL5v2X7+PHARRpWDmDGqFY0rhro7rCEKNZy\nVd+glDqplNqrlNqllNpmlgUppVYppY6a67JmuVJKfaCUilRK7VFKtch2nFHm9keVUqNu9H6ljlJw\nx1PQ7304usqYWCblqrujKjK+33ue7u+tY92RS7zYux7LHu0gJ38h8kFeKpw7a62bZZtk4AVgjdY6\nHFjD7xPF9wbCzWU8MAWMhAG8BLQBWgMvZSYNYYoYA4NnQtQ2mNUPEi66OyK3Sne4eHn5fv4ybwe1\nyvvx45N3MuGuWtis0k4iRH64nf+kAcBs8/Fs4J5s5XO0YRNQxpw0viewSmsdq7W+AqwCet3G+5dM\njQbBsAUQe8yYbP7KSXdH5BZnYpMZ8slGZm04ybiONVg0oR3Vg33dHZYQJUpuE4AGflRKbVdKjTfL\nKpqTvWOuK5jlVYAz2faNMstuVC7+qHY3GLkMUq7AjJ5w8YC7IypUqw9cpN/k9RyPTmTq8Jb8s18D\nPGzyrV+I/JbbRuAOWutzSqkKwCql1KGbbHu9O5r0Tcqv3dlIMOMBqlWrlsvwSqDQ1jDme2Oy+Zk9\nYfBnEN7N3VEVqAyni/+sPMwnvxynYeUAPn6whXzrL2YyMjKIiooiNTXV3aGUCl5eXlStWhW7/dZ6\nDuYqAWitz5nraKXUUow6/ItKqRCt9Xmziifa3DwKCM22e1XgnFne6Q/la6/zXtOAaWBMCp+XD1Pi\nVGwAD62G+Q/AF0Og5xvQZmKJvGv4cmIaj8zbwZYTsQxvW41/9G0gk7UUQ1FRUfj7+xMWFiZjMBUw\nrTUxMTFERUVRo0aNWzpGjtfVSilfpZR/5mOgB7APWA5k9uQZBSwzHy8HRpq9gdoCcWYV0Uqgh1Kq\nrNn428MsEzdTJhTG/gB1+8APL8A3T4Aj3d1R5au9UXHcPXk9u89c5f37m/HaPY3l5F9MpaamEhwc\nLCf/QqCUIjg4+LautnJzBVARWGr+Qm3AF1rrH5RSW4FFSqlxwGlgiLn9CqAPEAkkA2MAtNaxSql/\nAVvN7V7VWsfecuSliacf3DcXfn7NuGEs5hjcPxd8gtwd2W1bujOKF77cSzk/T778S3saVZHuncWd\nnPwLz+3+rHO8AtBaH9daNzWXhlrr183yGK11V611uLmONcu11vpRrXUtrXVjrfW2bMeaqbWubS6f\n3VbkpY3FAl3/DwZ9ClFb4dPOEH2zppiizeF08dq3B3hy4W6aVyvD8kkd5OQv8oXVaqVZs2Y0bdqU\nFi1asGHDhkKPYfTo0SxZsuRP5VprXnvtNcLDw6lTpw6dO3dm//79OR7v66+/5sCB/O8MIl0ripsm\n98Ho7yA9GWZ0h6Or3R1Rnl1JSmf0Z1uZvv4Eo9uHMXdcGxm6WeQbb29vdu3axe7du3nzzTd58cUX\n87S/0+ksoMjgo48+YsOGDezevZsjR47w4osv0r9//xyrcSQBiN+FtoLxP0PZ6kbj8MaPQReP9vID\n5+Lp/9F6tpyI5d+Dm/By/4bY5cYuUUDi4+MpW9a433Tt2rX069cv67VJkyYxa9YsAMLCwnj11Vfp\n2LEjixcvplOnTjz//PO0bt2aOnXq8OuvvwJGcnj22Wdp1aoVTZo04ZNPPgGMb/aTJk2iQYMG9O3b\nl+joaK7n7bffZvLkyfj4GEO99OjRg/bt2zNv3jwA/Pz8srZdsmQJo0ePZsOGDSxfvpxnn32WZs2a\ncezYsXz7+chYQMVVYFUYuxK+Gg8rX4RLB6HPf8Hm4e7IbujL7VH8beleyvp4sHBCW5pXkxvBRf5L\nSUmhWbNmpKamcv78eX766adc7efl5cX69esBmDp1Kg6Hgy1btrBixQpeeeUVVq9ezYwZMwgMDGTr\n1q2kpaXRoUMHevTowc6dOzl8+DB79+7l4sWLNGjQgLFjx15z/Pj4eJKSkqhVq9Y15RERETetBmrf\nvj39+/enX79+DB48OI8/jZuTBFCcefgajcNr3zCmmYw5DvfNAd9gd0d2jTSHk399e4DPN52mXc1g\nJg9rTjmp8inxXvlmPwfOxefrMRtUDuCluxvedJvMKiCAjRs3MnLkSPbt25fjse+///5rng8aNAiA\nli1bcvLkSQB+/PFH9uzZk1W/HxcXx9GjR/nll1944IEHsFqtVK5cmS5duuT6M2mt3dZwLtfexZ3F\nAl3+AYOmG43D07sUqcbhc1dTuO+TTXy+6TQT7qrJ3HGt5eQvCk27du24fPkyly5dwmaz4XK5sl77\nY727r++1Nx16ehp/p1arFYfDARgn68mTJ7Nr1y527drFiRMn6NGjB5Bzj5yAgAB8fX05fvz4NeU7\nduygQYMGfzpGYdxMJ1cAJUWTIRBUAxYMg+ndjEHl6vRwa0i/RV7msfk7SXe4mDq8Bb0ahbg1HlG4\ncvqmXhgOHTqE0+kkODiY6tWrc+DAAdLS0khNTWXNmjV07NgxT8fr2bMnU6ZMoUuXLtjtdo4cOUKV\nKlW48847+eSTTxg5ciTR0dH8/PPPDBs27E/7P/vsszz++OMsXrwYb29vVq9ezfr167PaEipWrMjB\ngwepW7cuS5cuxd/fHwB/f38SEhJu/wfyB5IASpKqEfDwz7DgAZh/P/R5B1o9VOhhaK2Zuu4476w8\nRK3yfkwd0ZJa5f1y3lGIfJDZBgDG3+Ls2bOxWq2EhoZy33330aRJE8LDw2nevHmej/3QQw9x8uRJ\nWrRogdaa8uXL8/XXXzNw4EB++uknGjduTJ06dbjrrruuu/9jjz3GlStXaNy4MVarlUqVKrFs2TK8\nvb0BeOutt+jXrx+hoaE0atSIxMREAIYOHcrDDz/MBx98wJIlS/7UjnCrlC7CvUciIiL0tm3bct5Q\nXCs9CZaMgyPfQ/vHoNurhTbVZHK6g2eX7OG7Pefp2ySEf9/bBF9P+Z5RWhw8eJD69eu7O4xS5Xo/\nc6XU9mxD99+Q/GeWRB6+MHQefP88bJgMV88YU03avQv0bc/EJvPwnG0cuZjAi73rMf7OmnJXqBBF\nmCSAkspiNaqAylaHH/8BCedh6PwC6yH0W+RlHv1iBy6X5rMxrblLJmkXosiTXkAlmVJGFdCQ2XB+\nN8zoZowjlI+01sxYf4KRM7dQwd+T5ZM6yslfiGJCEkBp0PAeGPUNpMYZPYSO/Zwvh03NcPL0ot38\n69sDdKtfga8e6UBYORm/X4jiQhJAaRHaGsatAt/yMPce+P4FyEi55cNdTU5n+PTNfLXzLE91r8OU\nB1viJ429QhQrkgBKk+BaMH4ttJ4Am6fAtE5G1VAeRV1J5t4pG9gTFcdHw1rweNdwLBZp7BWiuJEE\nUNp4+ECff8PwLyHlKnzaFX59F1y5GwFx/7k4Bn28gUsJacwZ15q+TeTmLlG0ZA4HnbmcPHmSbdu2\n8fjjj7s7tCJHrtlLq9rd4JGN8O2TsOYVOPqj0VW0bNgNd1l/9DITP9+Ov5eNJX9pT52K/oUXrxC5\nlH0soExhYWFEROTYLb7UyfUVgFLKqpTaqZT61nxeQym1WSl1VCm1UCnlYZZ7ms8jzdfDsh3jRbP8\nsFKqZ35/GJFHPkEwZBYM/AQu7oepd8Kp60+esXRnFKM/20LVst589Yic/EXxkn0o6JdffpmxY8fS\nqVMnatasyQcffJC13eeff07r1q1p1qwZEyZMKNC5AYqCvFQBPQEczPb8beA9rXU4cAUYZ5aPA65o\nrWsD75nboZRqAAwFGgK9gI+VUjLxq7spBU2HwsRfwa8CzB0Ih1Zkvay1ZsraYzy5cDetwoJYNLEd\nIYEFe0OZELcjcyiIZs2aMXDgwOtuc+jQIVauXMmWLVt45ZVXyMjI4ODBgyxcuJDffvuNXbt2YbVa\ns8bpL6lyVQWklKoK9AVeB55Sxu2dXYDM0Y5mAy8DU4AB5mOAJcCH5vYDgAVa6zTghFIqEmgNbMyX\nTyJuT9nTikjoAAAgAElEQVQwY36BeYNh4XDo/wFpjR/gb1/t48sdUdzdtDL/GdIET5vkbJFL378A\nF/bm7zErNYbeb910k+tVAf1R37598fT0xNPTkwoVKnDx4kXWrFnD9u3badWqFWAkkgoVKuRb6EVR\nbtsA3geeAzKv+4OBq1prh/k8CqhiPq4CnAHQWjuUUnHm9lWATdmOmX2fLEqp8cB4gGrVquX6g4h8\n4Bts3C+wcDgse5SFa3bw5eWuPNmtDo93rS3DOogSI3OoZ/h9uGetNaNGjeLNN990Y2SFK8cEoJTq\nB0RrrbcrpTplFl9nU53Dazfb5/cCracB08AYDC6n+EQ+8/TjYOdPOXtyNCMTZ9CxsaZm1z5GVZEQ\neZHDN/WipmvXrgwYMIAnn3ySChUqEBsbS0JCAtWrV3d3aAUmN1cAHYD+Sqk+gBcQgHFFUEYpZTOv\nAqoC58zto4BQIEopZQMCgdhs5Zmy7yOKiB/2XeDJhbsI8n6C5rVrUfPATPg6FfpPBqt0GhMlV4MG\nDXjttdfo0aMHLpcLu93ORx99VKITQJ6GgzavAJ7RWvdTSi0GvtRaL1BKTQX2aK0/Vko9CjTWWk9U\nSg0FBmmt71NKNQS+wKj3rwysAcK11jdsZpfhoAuP1pqPfo7kPz8eoVloGaaNaEkFf09Y9zasfRPC\ne8KQz4yRRoW4ARkOuvC5azjo54EFSqnXgJ3ADLN8BjDXbOSNxej5g9Z6v1JqEXAAcACP3uzkLwpP\ncrqD55bs4ds95xnYvApvDmqMl91s7O30gjF8xIpnYHZ/GLaoyM05LIS4NXlKAFrrtcBa8/FxjG/z\nf9wmFRhyg/1fx+hJJIqI0zHJjJ9rjOH/Qu96TLjeGP6txhldRJeMg5k9YPhXxjDTQohiTYaCKMXW\nH71M/4/Wc+5qCp+Nac3Eu2rduKdP/bth5DJIugQzusP5PYUbrBAi30kCKIW01kz75RgjZ26mor8X\n3zyWyzH8q7eDsT+CxQ6f9YHj6wo+WCFEgZEEUMqkpDt5YsEu3lhxiF6NKvHVI+2pHpyHht0K9WDc\nj1AmFD6/F/YuKbhghRAFShJAKXIm1hjG+Zs953i2Z10+Gtbi1iZsD6wCY7435hj4cpwx5aQzI/8D\nFkIUKEkApcSGyMv0/3A9Z64kM3NUKx7tfJt39nqXMRqDI8YZE89/1huuns6/gIW4RVFRUQwYMIDw\n8HBq1arFE088QXp6ep6O0alTJ+rWrZs1ptCSJcaVbvv27QsiZLeRBFDCaa2Z/utxRszcQjk/Y87e\nzvXyaXwTuxf0excGfwbRh2DqHXDou/w5thC3QGvNoEGDuOeeezh69ChHjhwhMTGRv//973k+1rx5\n89i1axe7du1i8ODBAGzY8OfRcovziKGSAEqw1AwnTy7cxWvfHaRb/QosfbQDNQpizt5Gg2DiL8aA\ncguGGYOAOfL2jUuI/PDTTz/h5eXFmDFjAGOcn/fee4+ZM2eSnJzMrFmzGDRoEL169SI8PJznnnsu\nT8f38/MDjOGlO3fuzLBhw2jcuDFQPIeSlgRQQp29msLgqRtYtvscTxfGnL1BNY3G4czpJmf2gCsn\nC+79hLiO/fv307Jly2vKAgICqFatGpGRkQDs2rWLhQsXsnfvXhYuXMiZM2eue6wHH3wwqwooJibm\nT69v2bKF119/nQMHDhTboaRlcJcSaNPxGB6Zt4MMh4vpIyPoWr9i4byxzdOYbjKsIyybBJ/cCQOn\nQd1ehfP+okh5e8vbHIo9lK/HrBdUj+dbP3/D17XW123byl7etWtXAgMDAWP8n1OnThEaGvqnfebN\nm3fTWcRat25NjRo1AIrtUNKSAEqYJdujePGrPYQG+fDpyAhqlfcr/CAa9IeQJrBwBMy/H+54Gjr/\nHSwyl4AoWA0bNuTLL7+8piw+Pp4zZ85Qq1Yttm/fft2hoG+Fr+/v1anFdShpSQAlhMuleXfVET78\nOZIOtYP5+MGWBHrb3RdQ2TAYtwq+fxZ+/S9EbYN7Z4BfLm44EyXCzb6pF5SuXbvywgsvMGfOHEaO\nHInT6eTpp59m9OjR+Pj4FOj7FsehpKUNoARIzXDy+IKdfPhzJPdHhDJrTGv3nvwz2b2MYaQHfARn\nNhtVQqc3uzsqUYIppVi6dCmLFy8mPDycOnXq4OXlxRtvvFGg75t9KOkmTZrQvXt3zp8/X6DvmR/y\nNBx0YZPhoHMWk5jGw3O2seP01RsP5lYUnN8Di0ZAXBT0eB3aTJBJZkogGQ668N3OcNByBVCMRUYn\nMvDjDew/F8+UB1vcfDA3dwtpAuPXQXgP+OF5WDIG0hLcHZUQpZokgGJq8/EYBn38G8npThZOaEfv\nxiHuDiln3mXg/nnQ9SU4sAymdYbog+6OSohSK8cEoJTyUkptUUrtVkrtV0q9YpbXUEptVkodVUot\nVEp5mOWe5vNI8/WwbMd60Sw/rJTqWVAfqqT7Yd95RszcQnl/T5Y+0p5moWXcHVLuWSxwx1PG0NKp\nV+HTLrBnsbujEqJUys0VQBrQRWvdFGgG9FJKtQXeBt7TWocDV4Bx5vbjgCta69rAe+Z2KKUaYMwO\n1hDoBXyslJJ+gXn0+aZTPDJvBw0rB7BkYntCgwquZ0OBqnEnTPgVQprBVw/Bd0+DI83dUYl8UJTb\nFUua2/1Z55gAtCHRfGo3Fw10ATLHAp4N3GM+HmA+x3y9qzIqpgcAC7TWaVrrE0Ak15lRTFyf1kY3\nz398vY/OdSvwxUNtKevr4e6wbk9ACIxaDu0fg63TYWYvGVCumPPy8iImJkaSQCHQWhMTE4OXl9ct\nHyNX9wGY39S3A7WBj4BjwFWtdeYdFFFAFfNxFeCMGaBDKRUHBJvlm7IdNvs+4iYcThf/XLaf+VtO\nM6RlVd4c1BibtYQ031jt0OM1CG0DXz8CUzvC3R9Aw3ty3lcUOVWrViUqKopLly65O5RSwcvLi6pV\nq97y/rlKAObk7c2UUmWApcD1+nllpvzrdUPRNym/hlJqPDAeoFq1arkJr0RLzXDy+Pyd/HjgIo92\nrsUzPeoW3Z4+t6P+3VCxIXz5ECweBZHDodfb4OmGO5nFLbPb7VnDI4iiL09fI7XWVzEmhW8LlFFK\nZSaQqsA583EUEApgvh4IxGYvv84+2d9jmtY6QmsdUb586b5rNCE1g5Ezt7Dq4EVevrsBz/asVzJP\n/pmCasLYlXDHM7BznnHj2Nkd7o5KiBIrN72Aypvf/FFKeQPdgIPAz8Bgc7NRwDLz8XLzOebrP2mj\nQnA5MNTsJVQDCAe25NcHKWmuJKXz4PTN7Dh1hf8Nbc7oDqXkW5XVDl3/CaO/BUeqMQH9+vfA5XJ3\nZEKUOLm5AggBflZK7QG2Aqu01t8CzwNPKaUiMer4Z5jbzwCCzfKngBcAtNb7gUXAAeAH4FGzakn8\nQXRCKkOnbeLQhQQ+GdGS/k0ruzukwhfWEf7yG9TrB6tfhjn9jbuIhRD5RoaCKGKiriQzfPpmohPS\n+HRkBB1ql3N3SO6lNeyaByueM0YT7fkGNB8uw0gIcRMyFEQxdOJyEvdN3UhMUjpzx7WRkz8YJ/rm\nw42rgUpNYPkkmDcE4v/UfCSEyCNJAEXEoQvxDJm6kTSHiwXj29Kyell3h1S0BNWAUd9A73/DyfXw\nUVvY9YVxhSCEuCWSAIqA3Weucv8nm7BZFAsntKNh5UB3h1Q0WSzGKKJ/+Q0qNoCv/wLzh0J80R92\nV4iiSBKAm206HsOwTzcR4G1j8cR21K4g/d5zFFwLRn8HPd+E42vh4zaw83O5GhAijyQBuNHaw9GM\nmrmFSoFeLJ5QjMf1cQeLFdo9AhPXQ4UGsOxRmDsQrpxyd2RCFBuSANzk+73neXjONmqV92PRhHZU\nCrz18TxKtXLhMHoF9PkPRG2Fj9vBpqngkh7GQuREEoAbLNkexaNf7KBxlUDmj29LsJ9nzjuJG7NY\noPXD8MgmqN7emHBmZi+4dNjdkQlRpEkCKGRzN57kmcW7aVcrmLnj2hSNuXtLijKh8OBiGDgNYiKN\ngeV+fVeuBoS4AUkAhWjqumP8c9l+utWvwIxRrfD1zNVYfCIvlIKm98OjW6BuH1jzinE1EHPM3ZEJ\nUeRIAigkH6+N5K3vD9GvSQhThrfEyy5z4RQov/IwZBbcOwMuHzauBrZ8Kj2FhMhGEkAh+GTdMf79\nw2H6N63M+/c3w15SxvIv6pSCxoONtoFq7WDFM/D5IIg76+7IhCgS5ExUwD795Thvmt/8372vacmZ\nyKU4CagMw7+Evu/C6U0wpR3sWSRXA6LUk7NRAZr+63FeX3GQvo1DeP/+ZnLydyeloNU4476B8vXg\nq4dh4XBIjHZ3ZEK4jZyRCsjM9Sd47buD9G5UifeHysm/yAiuBWO+h+7/gqOr4KM2sO8rd0clhFvI\nWakAzPrtBK9+e4CeDSvywQPNpc6/qLFYocPjMPFXKBsGS8bAolGQdNndkQlRqOTMlI+01kz75Rgv\nf3OA7g0qMvmBFnLyL8rK14Vxq6DrS3B4hXE1cGBZzvsJUULkZkrIUKXUz0qpg0qp/UqpJ8zyIKXU\nKqXUUXNd1ixXSqkPlFKRSqk9SqkW2Y41ytz+qFJq1I3eszhyujQvLd/PGysO0adxJT4a1gIPm5z8\nizyrDe54Csavg8CqsGgkfD4YTm10d2RCFLjcnKEcwNNa6/oYk8E/qpRqgDHV4xqtdTiwxnwO0Btj\nvt9wYDwwBYyEAbwEtAFaAy9lJo3iLjndwYS525iz8RTj76zJhw/Iyb/YqdgAHloN3V6Bczvgs14w\ns7fRTiC9hUQJleNZSmt9Xmu9w3ycgDEhfBVgADDb3Gw2cI/5eAAwRxs2AWWUUiFAT4z5hGO11leA\nVUCvfP00bhCdkMr9n2zip0PR/GtAQ/7Wpz4Wi0xXWCxZ7dDxr/DXvdDrbbh6GuYNhk/ugH1fypAS\nosTJ09dUpVQY0BzYDFTUWp8HI0kAFczNqgBnsu0WZZbdqLzYOnoxgYEfbSAyOpFPR0Ywol2Yu0MS\n+cHDF9pOhMd3woCPwZEGS8bChxEQucbd0QmRb3KdAJRSfsCXwF+11vE32/Q6Zfom5X98n/FKqW1K\nqW2XLl3KbXiFbsOxywyasoF0p4tFE9rRtX5Fd4ck8pvNA5o/CI9shvvmgrIadxIvmwSpce6OTojb\nlqsEoJSyY5z852mtMztNXzSrdjDXmXfURAGh2XavCpy7Sfk1tNbTtNYRWuuI8uXL5+WzFJpfjlxi\n9MytVArwYukj7WlcVaZwLNEsFmjQ37iJrMNfYdc8Y07iIz+6OzIhbktuegEpYAZwUGv9braXlgOZ\nPXlGAcuylY80ewO1BeLMKqKVQA+lVFmz8beHWVasbDkRy/i526hVwY/FE9tRtazM4lVq2L2g+ytG\nY7FXIHwxBJZOhORYd0cmxC3JzRVAB2AE0EUptctc+gBvAd2VUkeB7uZzgBXAcSAS+BR4BEBrHQv8\nC9hqLq+aZcXG7jNXGTtrK1XKeDN3XGvK+Hi4OyThDlVawoR1cOezxphCH7eF/V9LbyFR7ChdhP9o\nIyIi9LZt29wdBgAHz8czdJo5efuE9jKFozCc323MR3xhL1TvAD1fh8rN3R2VKOWUUtu11hE5bSed\n1XPh+KVERszYjLfdyhcPtZWTv/hdSFN4eC30e8+YgnJaJ6NaSIacFsWAJIAcnIlN5sHpmwGY93Ab\nQoOkzl/8gdUGEWONbqMdnzQGl5vcEn5+A9IS3R2dEDckCeAmLsan8uD0zSSlOZgztg21yvu5OyRR\nlHkFQLeXYdJWqNcH1r1tJIK9S6R9QBRJkgBu4EpSOsOnbyYmMY3ZY1vToHKAu0MSxUXZ6jB4pjHQ\nXEAIfDkOvrjPuLNYiCJEEsB1JKU5GDNrK6dik/l0VATNq5WIIYtEYQttDQ+tgV5vwcnfjNFGN34E\nToe7IxMCkATwJ2kOJxPmbmdP1FUmP9Cc9rXKuTskUZxZrND2L/DoZgi7A1b+DaZ3NXoPCeFmkgCy\ncbo0Ty7cxfrIy7x9bxN6Nqzk7pBESVEmFIYthMGfQfw5mNYZfvwnZKS4OzJRikkCMGmt+fvSvazY\ne4F/9K3PkIjQnHcSIi+UgkaDYNIWY4yhDR/A1I5werO7IxOllCQA09s/HGbB1jNM6lybh+6o6e5w\nREnmXRb6T4YRX4MjHWb2hJV/h/Rkd0cmShlJAMAn644xdd0xHmxTjad71HF3OKK0qNUZHtkAEWNg\n44fG1YDMRCYKUalPAEt3RvHm94fo1ySEVwc0whj7TohC4ulv3EU8cjm4MuCz3vDDi5B6sxHXhcgf\npToBbDh2meeW7KFdzWDeva8ZVpnJS7hLzbvgLxuh1TjY9DG8Wx9WPAuXj7o7MlGCldoEcPRiAhPm\nbics2JepI1rKHL7C/Tz9oO9/YfxaqNcPts8yZiGbOwiOrASXy80BipKmVI4GGp2QysCPjNm8lj7S\nXsb0F0VTYrSRBLbOgMQLEFQTWj0MzYaBdxl3RyeKMBkN9AaS0x08NHsbsUnpzBgVISd/UXT5VYC7\nnoMn9xlDS/iWh5Uvwn/rwfLH4Pwed0coijmbuwMoTE6X5vH5u9h3No5pIyJoUlW+RYliwGqHRvca\ny7ldsG0G7FkMO+ZAaBto9RA0GAA2T3dHKoqZ3EwJOVMpFa2U2petLEgptUopddRclzXLlVLqA6VU\npFJqj1KqRbZ9RpnbH1VKjbreexUkrTX/+vYAqw9e5OX+DenWQCZxF8VQ5WbGPQRPH4Seb0LSZfjq\nYXi3Aax6CWKOuTtCUYzkpgpoFtDrD2UvAGu01uHAGvM5QG8g3FzGA1PASBjAS0AboDXwUmbSKCwz\nfzvJrA0neahjDUa2CyvMtxYi/3mXhXaPwKRtMGIpVGsLGybD5BYwq59xhZCR6u4oRRGXYxWQ1voX\npVTYH4oHAJ3Mx7OBtcDzZvkcbbQsb1JKlVFKhZjbrsqcA1gptQojqcy/7U+QCz8fjub17w7Qq2El\n/tanfmG8pRCFw2KBWl2MJf487JpnVA199ZCRJJoMhRYjoWIDd0cqiqBbbQOoqLU+D6C1Pq+UqmCW\nVwHOZNsuyiy7UfmfKKXGY1w9UK1atVsM73eR0Qk8/sVO6lUK4N37m2KRvv6ipAoIgTufgY5Pwclf\nYPtso71g8xQIrg11e0PdvsYw1Raru6MVRUB+NwJf7+yqb1L+50KtpwHTwOgGejvBXE1O56HZ2/C0\nW/h0VAQ+HqWqzVuUVhYL1OxkLEkxsP8rOPw9bJpqVBP5BEOdXlC3j3Hl4CE94UqrWz0jXlRKhZjf\n/kOAaLM8Csg+jGZV4JxZ3ukP5Wtv8b1zJcPp4tEvdnDuairzx7ehShnvgnw7IYom32Bo/bCxpMZD\n5Go4vAIOfWtUF9l9jCuDhoOgdjewe7k7YlGIbjUBLAdGAW+Z62XZyicppRZgNPjGmUliJfBGtobf\nHsCLtx52zl779gC/RcbwzuAmtKweVJBvJUTx4BVgDEfdaBA4M+DUBjjwNRxYBvu+BM8AqNfXSAY1\nO4HNw90RiwKWYwJQSs3H+PZeTikVhdGb5y1gkVJqHHAaGGJuvgLoA0QCycAYAK11rFLqX8BWc7tX\nMxuEC8K8zaeYvfEUD3WsIeP6C3E9Vrsx/lDNu6D3O3BiHez7Cg59A7vnGw3IzYcbdx6Xre7uaEUB\nKXFDQWw8FsOIGZvpULscM0e3kgHehMgLRzoc+8lIAge/AbTRVtBmIoR1NCa1EUVeboeCKFGtopHR\niTwybzvVg32YPKy5nPyFyCubB9TtZSxxUcY4RNtnGW0GFRtBmwnQeAjYpU2tJCgxYwHtjYrjvk82\nYrUopo9qRYCX3d0hCVG8BVaFbi/BUweMu4/BGIPov/Xg+xcg+qB74xO3rURUAW06HsNDs7cR6G3n\n84faUKOcbyFEJ0QpozWcXA/bZhrVQ64MCG0LLUdDw3vkqqAIyW0VULFPAKsPXOSRL3ZQLciHueNa\nExIof4RCFLiky0Y7wfZZEBMJXoHQ5H4I7wFVI4xGZOE2pSIBLN0ZxTOL99CocgCfjWlNkK90WxOi\nUGkNp34z7jo+sAycaUZ5ubrGHcehrY0RS4PDjRvURKEo8Qlg9oaTvLR8P+1qBvPpqAj8PEtUe7YQ\nxU9aIpzbAWe2GEvUFki5Yrx2zd3HncFDqmkLUonuBTR5zVH+u+oI3RtUZPIDzfGyy7gmQridpx/U\nuNNYwLg6iIk0ksHxtb/ffWzzMm40q9vHuAvZr8JNDioKUrFLAO+vPsL7q48yqEUV/n1vE2xWuawU\nokhSCsqFG0vzB3+/+/jwCji0Ao78AN8oqNjQGM46tK2xLiM3bxaWYlUF9L/VR3lv9REGt6zKv+9t\nIiN7ClFcaQ0X9xuD1J36DaK2Qnqi8VpAFTMhtIFKTYwE4RXg3niLmRJXBfTBGuPkf2+LqrwtJ38h\nijeloFIjY+FZcDogej+c3gynN8Kpjcb4RJnK1oBKjSGkiZEUqkQYA92J21IsEsCHPx3l3VVHjGqf\nwU3kDl8hShqrDUKaGkub8cYVQvw5uLgPLuyBC3uN5eBycwdlbJs5GU5oGxm87hYU+SqgMe8s4J2V\nhxnUvArvDGkqJ38hSrO0BLiwD06th8ifjJ5GLgfYfY2ximp1hrA7oEKDUt3ttER0A61Wt7G2DHyL\ngc2r8B85+Qsh/ig13rg7+dhPxhJ7zCj3KgPV2/++VGpqXGWUEiUiAXiGhOu/vLeI/97XTE7+Qoic\nXT1t9DQ6ud5YZyYEDz+o0sIY0K5iQ2MpX7/EToBTIhJAcK1Q/cG3/8ZusWJRFqzKilIKq7KS4cog\nKSOJ5IxkkhzGOsWRQnJGMsmO5N+fZ3us0fjYfPCx++Bt88bHZqy9bd542jzxtHriYfHAw2osnlZP\nvGxe+Hv442/3x9/DHz8PP/zt/vjafclwZZCYkUhieqKxNh+nOdOwWWzGcSwe2C32rGM6XU7SnGlZ\nS6ojlXRXOhnODGwWGzaLDbvFbixWOzaLDR+bD4GegcbiYay9bLn/w9Vak+pMJS4tjqSMJPw9/An2\nCsaay3lhUx2ppDnT0FrjwoVLu4zH2oVSCj+7H942b5QMFSyKmvjzcHoDnPwNzu00BrBzpBivKYsx\nV3KFBhBUw+h9FBhqDIIXWNUY3qKY/k0X2QSglOoF/A+wAtO11m/daFvvGt669su1czymh8UDH7sP\nvnZf48T+hxN85nOAFEdKVqLITBApjhTSnemkOdNId6ZnPc5wZeTTp85/nlZPAjwC8LB6/J4slA27\n1UgeTpeT+PR44tLiiE+P/9NnsSorwd7BVPSpSAWfCpT3Lo+v3ZeraVeJSY0hNiXWWKfGkpL5D3MT\nVmXNSo6ZidLT6olFWbBgQSmFRVlQGGsvm1dWgvWyel2zzkzKmY+9bF54W73JcGVckzzTnGmkOdJw\naEfWcbPW2d7vehQKD6tHVhyZS+Z7+tp9sVlyrjJwaRcpjhSc2pn1BeJGidDhclzz96fRBHgEEOAZ\ngN0io9cWCpcTYk8YjcsX9xtL9H6IO2sMbpedhx+UqW7ey1DHWMrXMZJGEb+TuUgmAKWUFTgCdMeY\nJ3gr8IDW+sD1tm/esrlevX41Lu3CqZ3XrDNP+j42H+zWgvnncWkXqY5UEtITSMxIJCE94ZrHHlYP\n42rAwzfrqsDPww8vqxcZrgwjmZjf7tNdRmKxWqx4Wb2Mk4/VK+vKI/OkneHKyFocLgcZzgySHcnE\npcVxNe0qcelxxkk9LZ749HjSnel/3seVgQULAZ4BWSeYQI9AAjwD8LP7EZ8Wz8Xki0QnR/++pEST\nkpFCWa+yBHkFGYt3UNZjL6sXSqmsE2zmSdblcmVd/WT/2SSmJ5LuTMfF71cLmY8dLgfpznRSnamk\nOlJJdabi0q4C+R3eDm+bN352P3ztvvh7+ONl8yIlI4UkRxJJ6UlZV56aa/+H7Ba7kQysHtgsNtKd\n6SRnJJPuSr/he/nYfP70e/L3MP+m7H74efhlXWlZlTXrd5D52GqxYlXW39fmY5uy/anMqqzYLLas\nfTMfZ98eICkjKetvPj7d+HtLSE8g3Zme9cUj6yrXanzmsp5lCfYOJsAjoHhdEbpckBRtzIEQd8Zc\nRxnJIuYoXDkJ2f9GA0PBPwR8yxvdUX3KgW85Y+0TbNwV7eFrJBEPP+O5zavQriiK6n0ArYFIrfVx\nAHPu4AHAdRNA5rdUd7Eoi5Fk7D5UpGLBv5/VUmDJLDe01m75p81MCinOFCMhOFKzvimnOFKykkTm\nCScrgdqMtV3Z0fyeZNBkfVm4EZd2ke5KJ82RRqozNSshpTnSSHYkZ1XnJWUkZT1OcaQQ6BVIZVtl\n/Dz88LH54Ofhh6/NF4uyZCX57FeS6a50PK2e170qBbISefartfi0eM4mns06ASdlJN30sxRFdoud\nct7lKOddjmDvYPzt/sbP2/zZZP8ZZX4pyrza87R64m3zzroqykywWuus37PD5fhzVar5ZSjzb+OP\nV5YeVo9rEl1m4rSpP1S9etixVwzDFlIbq+qKRqMd6ejEC+j48+iE85BwAWtaHB7xR/GI3o49LR4P\npwMPDTatsaGxarCisWmjusOKBYunP/iUBa+y4F3GWPuUAa9AbDYf7DZv7HYvrDYvsHqAzROU1Uwc\n6jrr2/s9FXYCqAKcyfY8CmPyeFEEuOsbm1Lq/9u7nxCryjCO49/fjHMxKi3/RWimgQtdlG1CsIVJ\nhJVUi4KiwEXgpsCgCGsTBS7aWJs2UqKLiqSyJIQSM2plaRoqJhZIhOJFa1Bp/txzz9Pifc/1zjg2\nym3OPdz3+YyXc8977x3feYb3POd93znnDUNX/QPMqPkVn+OZGUPZUJjzyv6haU3MbGyvOA/bpjVp\n5k0yy8JreZMszy6/Fl9vWlt5235ueeuzRS9kRm3GmG2tv3a5h9vWAx3OhhkcGeTc0LnW4/zQec5c\nOnWfnmgAAATGSURBVMPJxsnWvFoxzzajFoa+MgsH8wujF6gP1VsnAUWPScWXLm+n9U0b05Ou9deY\nOX0mAxpgNB9lOBvmfON8K8EPZUM0mo0rYtSx6cD0GjDnOj7UAOqQ1eES4TGOzBgwxiSSPqDfii30\nYcQ0EB6xI3o9rbjsBDBR3cb0nyWtB9YDLFy4sIw6OVdpklo9Uff/KnoTxdBp+9BrI29cMb8U/oXD\nWDNvhiHemAyLod5G3hiTWItkk+UZVxtyt7xBlo2QZSM0siEazRGy5iij2TC5NWlaTt5K+Hlra0X/\nKPaOjKLHdOKafv6yE8CfQPudnhYAp9vfYGZbgC0QLgQrr2rOudT0qa/1F3q9ZDPbrul9ZV8q9xOw\nRNJiSTXgaWDXJJ9xzjk3BUrtAZhZJulF4GvCvMhWMztWZh2cc84FpV8bbWa7gd1l/7/OOefGSvdu\nSc45lzhPAM45lyhPAM45lyhPAM45lyhPAM45l6hK3w5a0kWu9ZK23jYHONftSlSAx8FjUPA4BFeL\nw51mNneyD1d9iZwT13JHu14n6YDHweMAHoOCxyHoNA4+BOScc4nyBOCcc4mqegLY0u0KVITHIfA4\neAwKHoegozhUehLYOefc1Kl6D8A559wUqWwCkLRG0glJv0na2O36lEXSVkl1SUfbymZJ2iPpZNze\n2s06TjVJd0jaJ+m4pGOSNsTy1OIwXdKPkn6JcXgzli+WtD/G4ZN4a/WeJqlf0iFJX8X9FGNwStIR\nSYclHYhlHbWJSiaAuHj8e8DDwDLgGUnLulur0mwD1owr2wjsNbMlwN6438sy4GUzWwqsAF6Iv//U\n4jACrDaze4DlwBpJK4C3gXdiHP4Gnu9iHcuyATjetp9iDAAeMLPlbX/62VGbqGQCoG3xeDMbBYrF\n43uemX0P/DWu+HFge3y+HXii1EqVzMzOmNnP8flFQsOfT3pxMDMrVowdiA8DVgOfxvKej4OkBcCj\nwPtxXyQWg//QUZuoagKYaPH4+V2qSxXcZmZnIBwcgXldrk9pJC0C7gX2k2Ac4tDHYaAO7AF+BwbN\nWiuap9A23gVeBfK4P5v0YgAh+X8j6WBcOx06bBNVvRJ40sXjXe+TdBPwGfCSmV0IJ35pMbMmsFzS\nLcBOYOlEbyu3VuWRtBaom9lBSauK4gne2rMxaLPSzE5LmgfskfRrp9+wqj2ASRePT8xZSbcDxG29\ny/WZcpIGCAf/D83s81icXBwKZjYIfEeYE7lFUnHy1uttYyXwmKRThKHg1YQeQUoxAMDMTsdtnXAy\ncB8dtomqJgBfPH6sXcC6+Hwd8GUX6zLl4hjvB8BxM9vc9lJqcZgbz/yRdAPwIGE+ZB/wZHxbT8fB\nzF4zswVmtohwHPjWzJ4loRgASLpR0s3Fc+Ah4CgdtonKXggm6RFCpi8Wj9/U5SqVQtLHwCrCXf7O\nAm8AXwA7gIXAH8BTZjZ+orhnSLof+AE4wuVx39cJ8wApxeFuwsReP+FkbYeZvSXpLsLZ8CzgEPCc\nmY10r6bliENAr5jZ2tRiEH/enXF3GvCRmW2SNJsO2kRlE4BzzrmpVdUhIOecc1PME4BzziXKE4Bz\nziXKE4BzziXKE4BzziXKE4BzziXKE4BzziXKE4BzziXqXxgfx7Dp7r/iAAAAAElFTkSuQmCC\n",
"text/plain": [
- "<matplotlib.figure.Figure at 0x109efdbe0>"
+ "<matplotlib.figure.Figure at 0x11c0ffdd8>"
]
},
"metadata": {},
@@ -342,23 +324,19 @@
{
"cell_type": "code",
"execution_count": 9,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [],
"source": [
- "param_set = dict(height=50, # Height and width are constant\n",
- " width=50,\n",
- " # Vary density from 0.01 to 1, in 0.01 increments:\n",
- " density=np.linspace(0,1,101)[1:])"
+ "fixed_params = dict(height=50, # Height and width are constant\n",
+ " width=50)\n",
+ "# Vary density from 0.01 to 1, in 0.01 increments:\n",
+ "variable_params = dict(density=np.linspace(0,1,101)[1:])"
]
},
{
"cell_type": "code",
"execution_count": 10,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [],
"source": [
"# At the end of each model run, calculate the fraction of trees which are Burned Out\n",
@@ -369,13 +347,12 @@
{
"cell_type": "code",
"execution_count": 11,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [],
"source": [
"# Create the batch runner\n",
- "param_run = BatchRunner(ForestFire, param_set, model_reporters=model_reporter)"
+ "param_run = BatchRunner(ForestFire, variable_parameters=variable_params, \n",
+ " fixed_parameters=fixed_params, model_reporters=model_reporter)"
]
},
{
@@ -388,15 +365,13 @@
{
"cell_type": "code",
"execution_count": 12,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
- "100%|██████████| 100/100 [00:08<00:00, 4.78it/s]\n"
+ "100it [00:06, 6.45it/s]\n"
]
}
],
@@ -414,9 +389,7 @@
{
"cell_type": "code",
"execution_count": 13,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [],
"source": [
"df = param_run.get_model_vars_dataframe()"
@@ -425,77 +398,76 @@
{
"cell_type": "code",
"execution_count": 14,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [
{
"data": {
"text/html": [
- "<div style=\"max-height:1000px;max-width:1500px;overflow:auto;\">\n",
+ "<div>\n",
+ "<style>\n",
+ " .dataframe thead tr:only-child th {\n",
+ " text-align: right;\n",
+ " }\n",
+ "\n",
+ " .dataframe thead th {\n",
+ " text-align: left;\n",
+ " }\n",
+ "\n",
+ " .dataframe tbody tr th {\n",
+ " vertical-align: top;\n",
+ " }\n",
+ "</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
- " <th>BurnedOut</th>\n",
- " <th>Run</th>\n",
" <th>density</th>\n",
- " <th>height</th>\n",
- " <th>width</th>\n",
+ " <th>Run</th>\n",
+ " <th>BurnedOut</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
- " <th>0</th>\n",
- " <td> 0.933628</td>\n",
- " <td> 63</td>\n",
- " <td> 0.64</td>\n",
- " <td> 50</td>\n",
- " <td> 50</td>\n",
+ " <th>6</th>\n",
+ " <td>0.01</td>\n",
+ " <td>0</td>\n",
+ " <td>0.000000</td>\n",
" </tr>\n",
" <tr>\n",
- " <th>1</th>\n",
- " <td> 1.000000</td>\n",
- " <td> 86</td>\n",
- " <td> 0.87</td>\n",
- " <td> 50</td>\n",
- " <td> 50</td>\n",
+ " <th>98</th>\n",
+ " <td>0.02</td>\n",
+ " <td>1</td>\n",
+ " <td>0.000000</td>\n",
" </tr>\n",
" <tr>\n",
- " <th>2</th>\n",
- " <td> 0.793289</td>\n",
- " <td> 58</td>\n",
- " <td> 0.59</td>\n",
- " <td> 50</td>\n",
- " <td> 50</td>\n",
+ " <th>73</th>\n",
+ " <td>0.03</td>\n",
+ " <td>2</td>\n",
+ " <td>0.030303</td>\n",
" </tr>\n",
" <tr>\n",
- " <th>3</th>\n",
- " <td> 1.000000</td>\n",
- " <td> 81</td>\n",
- " <td> 0.82</td>\n",
- " <td> 50</td>\n",
- " <td> 50</td>\n",
+ " <th>75</th>\n",
+ " <td>0.04</td>\n",
+ " <td>3</td>\n",
+ " <td>0.021053</td>\n",
" </tr>\n",
" <tr>\n",
- " <th>4</th>\n",
- " <td> 0.052219</td>\n",
- " <td> 30</td>\n",
- " <td> 0.31</td>\n",
- " <td> 50</td>\n",
- " <td> 50</td>\n",
+ " <th>81</th>\n",
+ " <td>0.05</td>\n",
+ " <td>4</td>\n",
+ " <td>0.030303</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
- " BurnedOut Run density height width\n",
- "0 0.933628 63 0.64 50 50\n",
- "1 1.000000 86 0.87 50 50\n",
- "2 0.793289 58 0.59 50 50\n",
- "3 1.000000 81 0.82 50 50\n",
- "4 0.052219 30 0.31 50 50"
+ " density Run BurnedOut\n",
+ "6 0.01 0 0.000000\n",
+ "98 0.02 1 0.000000\n",
+ "73 0.03 2 0.030303\n",
+ "75 0.04 3 0.021053\n",
+ "81 0.05 4 0.030303"
]
},
"execution_count": 14,
@@ -517,9 +489,7 @@
{
"cell_type": "code",
"execution_count": 15,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [
{
"data": {
@@ -533,9 +503,9 @@
},
{
"data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAX4AAAEACAYAAAC08h1NAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAGvxJREFUeJzt3X+MXWWdx/H3dwqzGbNhsY6pK0hAra52kQxErOLuXCMz\nd2STYtvNIorbBaWsWUDNKAWbwCQrS1iZ2IAJ0Iowu0S7G6HruGFnGFlutSyytkKppUW6SsIPbZyi\nxnUnaZv57h/33Llnzv0x595zf809n1dy0/vjOec8czrzued+n+eca+6OiIikR0+7OyAiIq2l4BcR\nSRkFv4hIyij4RURSRsEvIpIyCn4RkZRJHPxm9g0zO2pmByq8/gkz229mz5rZE2b2nqTbFBGR+jXi\niP9+YKTK6z8D/tzd3wP8PbC9AdsUEZE6JQ5+d/8B8Osqrz/p7r8NHj4FnJl0myIiUr9W1/g/BTzS\n4m2KiEjIKa3akJl9CLgKuKhV2xQRkVItCf5gQHcHMOLuJWUhM9MFg0RE6uDuVusyTS/1mNlZwMPA\nFe5+pFI7d9fNnVtuuaXtfeiUm/aF9oX2RfVbvRIf8ZvZt4BBoN/MXgJuAU4Nwvxe4Gbg9cDdZgZw\nwt0vTLpdERGpT+Lgd/fLl3j908Cnk25HREQaQ2fudphMJtPuLnQM7Ysi7Ysi7YvkLEmdqGGdMPNO\n6IeIyHJiZngnDu6KiEhnUfCLiKSMgl9EJGUU/CIiKaPgFxFJGQW/iEjKKPhFRFJGwS8ikjIKfhGR\nlFHwi4ikjIJfRCRlFPwiIimj4BcRSRkFv4hIyij4RURSRsEvIpIyCn4RkZRR8IuIpIyCX0QkZRT8\nIiIpo+AXEUmZRMFvZt8ws6NmdqBKmzvN7AUz229mA0m2JyIiyZ2ScPn7gbuAfyr3opldArzd3Veb\n2fuAu4G1CbcpIrLI9PQ04+PbARgd3Uw2m23aOsLtBgfPZ/fuH5fcHx3dDFC2XbXX6mlXD3P3uhcG\nMLOzge+6+7llXrsHeNzd/yV4fBgYdPejkXaetB8i0lkaEcbV1gf5UJydPcbBg/s5fnwbAL29n2PN\nmvPo739D7PAcHDyfW2+9i7m520vWUa0dXA9cHdzfAdwZLP9F4ETQpwOxXyuu71x6ez8HnMrx41+p\n0m4cd7da92Wzg/+7wG3u/l/B4+8BW9x9X6Sdgl9kmSp3BDw7e5SDB38ahBb09W1h69brKobuUke5\n5cK9GIoAXwAeDO5fAdxRoV35oO7p+Tzz81cFy02H1nGgSjuACWAyuL8O2BR6/h7gSWBjja9NAg8B\n7wf+NnitUruH6wr+pKWeOKKdKpvwY2NjC/czmQyZTKZ5PRKRmlQ6ep+enmb9+k0LR8AzM4VgfRX4\nCoWgmpuDm28eZX5+HDjAzMw/UgjT4jLnsnv3xwgf5Ybb5cP9TUCW48fvoRiKBduDf+9YeL58u0JQ\n37nw/Pw85MO4sJ7COjZWadcOueD2DPB83WtpdvC/Arwl9PjM4LkS4eAXkc4RDffduz+2UAaZnT0W\nPB8N1jeXrGd+fjXlwrS4zB2RoC7XbjtQqWT0ah0/XVFPzwvMz08suZ5iO1j8CeL6hTbFcs4EcE7s\n14rrm6C39zDwRY4fJ9TuzuD+V4N2h+v4SZsf/JPAtcBOM1sL/CZa3xeRzjY+vj0U7tMcP34KTz99\nJQA9PaMVltpMvlxC0K5QIknqVUpDMR+ma9a8A4CDB8PPL25XKajzpajPs3v3JLOzK0LrOKdiO4DB\nwRtCJasbFp4fHf1nIFyyivdacX0/Z3R055LtZmZq34OQsMZvZt8CBoF+4ChwC3AqgLvfG7T5GjAC\n/B640t1LhqJV4xfpXMPDG5mZKdSXo7XmL9DT8w3m578aPF48OBkeIC0OitY3oFkI9/7+VSVjAdHy\nU7lBYKg+thAefK40c6cRg9SNZGbtGdxtBAW/SPMknV2zuNQTrZlPMDCwg/7+VUB9YVrLNMhOCt1O\noOAXkRLR+nxf3xZ27ZqoK/zzUydLZ+vUsz5pDAW/iJRYXKYBmGBoaJJHH30IqK+k0ej5+VK/eoO/\nFdM5RaQDVZ6KeS579myqeCSfzWYV9sucgl+kC4VLM729xVktfX1bGB3NT0VcPFunID+tcm4u/7oC\nvjsp+EW6QLRkE738wMDA/cElDFSPFwW/yLIXLdk89lhhznzh7FXo7y/W9QtGRzezZ88m5uYKzxRP\nHgp/MpDuo+AXWeaiJZu4lxXIZrPs2jUROUGocPKQPhl0MwW/SBcKX1ag2tF7dKB269aWdE/aTMEv\nssxFSzbRywro6F2iNI9fpAtobn066QQuEVmS3iC6S73Bry9bF1lGpqenGR7eyPDwRqanp2tedv36\nTczMrGNmZh3r12+quI4k25FlwN3bfst3Q0SqmZqa8r6+VQ4PODzgfX2rfGpqKvbyQ0MbgmU9uD3g\nQ0MbGr4daZ0gO2vOXA3uiiwT0WmbzTq7tlXbkfZR8IukRLnZPzpJK50U/CLLRNLgjp6wVWmap94g\nup9m9YgsI62alaPZP8uDpnOKpIzCWRT8IinSqG/WkuVNwS+SIkt9s5akg07gEhGRWDSrR2QZ0swb\nSUKlHpEOUM9ArQZ3pW01fjMbAbYBK4Cvu/vtkdf7gQeBN5H/hHGHuz8QaaPgl9TSQK3Uqy3Bb2Yr\ngOeBi4FXgB8Bl7v7oVCbMeAP3P2m4E3geWCVu58MtVHwS2ppoFbq1a7B3QuBI+7+orufAHYCl0ba\n/AI4Lbh/GnAsHPoiUmrfvv26MqY0TdLgPwN4KfT45eC5sB3AGjN7FdgPfDbhNkW6yujoZvr6tgAT\nwe16Xnvto0teOlmkXkln9cSpz3wJeMbdM2b2NmDGzM5z99+FG42NjS3cz2QyZDKZhF0TWR7C19DZ\nt28/r712NXAHoCtjymK5XI5cLpd4PUlr/GuBMXcfCR7fBMyHB3jN7BHgVnd/Inj8GLDF3feG2qjG\nL4Lq/VKbemv8SY/49wKrzexs4FXgMuDySJvD5Ad/nzCzVcA7gZ8l3K5IV9L8fGmFRkzn/AjF6Zz3\nufttZnYNgLvfG8zkuR84i/yYwm3u/s3IOnTELxLQ/HyJS9fqERFJGV2rR0REYlHwi4ikjIJfRCRl\nFPwiIimj4BcRSRkFv4hIyij4RURSRsEvIpIyCn4RkZRR8IuIpIyCX0QkZRT8IiIpo+AXEUkZBb+I\nSMoo+EVEUkbBLyKSMgp+EZGUUfCLiKSMgl9EJGUU/CIiKaPgFxFJGQW/iEjKKPhFRFImcfCb2YiZ\nHTazF8xsS4U2GTN72sx+Yma5pNsUEZH6mbvXv7DZCuB54GLgFeBHwOXufijU5nTgCSDr7i+bWb+7\nz0bW40n6ISKSRmaGu1utyyU94r8QOOLuL7r7CWAncGmkzceBh9z9ZYBo6IuISGslDf4zgJdCj18O\nngtbDaw0s8fNbK+ZfTLhNkVEJIFTEi4fpz5zKnA+8GHgdcCTZvZDd38h3GhsbGzhfiaTIZPJJOya\niEh3yeVy5HK5xOtJWuNfC4y5+0jw+CZg3t1vD7XZAvS5+1jw+OvAlLt/O9RGNX4RkRq1q8a/F1ht\nZmebWS9wGTAZafMd4INmtsLMXge8D3gu4XZFRKROiUo97n7SzK4FpoEVwH3ufsjMrglev9fdD5vZ\nFPAsMA/scHcFv4hImyQq9TSsEyr1iIjUrF2lHhERWWYU/CIiKaPgFxFJGQW/iEjKKPhFRFJGwS8i\nkjIKfhGRlFHwi7TI9PQ0w8MbGR7eyPT0dLu7IymmE7hEWmB6epr16zcxN5e/jFVf3xZ27Zogm822\nuWeynOkELpEONj6+PQj9TUD+DWB8fPuSy+lTgjRD0ssyi0iTRD8l7NmzSZ8SpCEU/CItMDq6mT17\nNjE3l3/c17eF0dGJqsss/pQAc3P55xT8kpRKPSItkM1m2bVrgqGhSYaGJtm69TrGx7erhCNtocFd\nkRaLO9CrAWFZSr2Duwp+kRYbHt7IzMw6CiUcyH8SePTRh0raTk9PLwwCj45uVujLIvUGv2r8Ih0s\nm80q7KXhFPwiLVbPQK9II6nUI9IGKuFII6jGLyKSMjpzV0REYlHwi4ikjIJfRCRlFPwiIimTOPjN\nbMTMDpvZC2a2pUq795rZSTPbkHSbIiJSv0TBb2YrgK8BI8C7gcvN7F0V2t0OTAE1j0CLiEjjJD3i\nvxA44u4vuvsJYCdwaZl21wHfBn6VcHsiIpJQ0uA/A3gp9Pjl4LkFZnYG+TeDu4OnNGFfRKSNkl6y\nIU6IbwNudHc3M6NCqWdsbGzhfiaTIZPJJOyaiEh3yeVy5HK5xOtJdOauma0Fxtx9JHh8EzDv7reH\n2vyMYtj3A/8HXO3uk6E2OnNXRKRG7Tpzdy+w2szONrNe4DJgMtzA3d/q7ue4+znk6/yfCYe+yHKg\n776VbpKo1OPuJ83sWmAaWAHc5+6HzOya4PV7G9BHkbbSd99Kt9FF2kSWsNQXp+hKm9Iu+iIWkTbQ\npwFZjhT8Ikuo9sUp4+Pbg9DPfxqYm8s/p+CXTqZr9YgsIZvNsmtXvrwzNDS55BH9vn37NQgsHU01\nfpEEoqUeuB64GjiXvr4tKvtIU+kbuETapDC4u2/ffl577aPAHcEriweBRRpN38Al0ibZbJZHH32I\nCy44Dzi33d0RWZIGd0UapNogsEgnUalHpIE0p19aSTV+EZGUUY1fRERiUfCLiKSMgl9EJGUU/CIi\nKaPgFxFJGQW/SIi+cEXSQNM5RQLR6+7oWjvS6TSPXyShpb5wRaTTaB6/iIjEouAXCYyObqavbwsw\nAUwE19rZXLatxgJkOVOpRyQkzrV2NBYgnUI1fpEW0ViAdArV+EVEJBZdj1+kRrruvix3iUs9ZjYC\nbANWAF9399sjr38CuAEw4HfAZ9z92UgblXpkWdF196UTtKXGb2YrgOeBi4FXgB8Bl7v7oVCb9wPP\nuftvgzeJMXdfG1mPgl9EpEbtqvFfCBxx9xfd/QSwE7g03MDdn3T33wYPnwLOTLhNERFJIGnwnwG8\nFHr8cvBcJZ8CHkm4TZFlQ/P9pRMlHdyNXZ8xsw8BVwEXlXt9bGxs4X4mkyGTySTsmkgySev40fn+\ne/Zs0nx/SSSXy5HL5RKvJ2mNfy35mv1I8PgmYL7MAO97gIeBEXc/UmY9qvFLR2nESVqa7y/N1q4a\n/15gtZmdbWa9wGXAZKRjZ5EP/SvKhb5IJxof3x6E/iYg/wZQOPoXWe4SlXrc/aSZXQtMk5/OeZ+7\nHzKza4LX7wVuBl4P3G1mACfc/cJk3RbpfJrvL51Kl2wQKaNR1+PRfH9pJl2rR6TBFNrS6RT8IjEp\n0KVbKPhFYtAllaWb1Bv8ukibdI1KR/Lh52dnj4Vm68DcXH4Gj4Jf0kTBL12h0slSwKLne3pG29ZH\nkU6h4JeOF6cmv3jeffFIPn+/+Pz8/AF6ej7P/Hx+OU2xlDRS8EtHix7J7979SdaseQf9/avqHJg9\nl/POezf9/fnzDAcHr2N8fDvj49s10Cvp4e5tv+W7Id1oamrKh4Y2+NDQBp+amqq53dDQBocHHDy4\nPeCw1uEB7+tbtdB2amrK+/pWBa8XX6v0fLVlRJaLIDtrz9x6Fmr0TcHfneIGa7V25YN/w8L9oaEN\ni9ZT7s2jljeV8PpEOl29wa9SjzRNpbp7tJxSrV30sgfwBeDBstvLZrOLZvIMD28E8uMCujCaSJG+\nbF3aphDO+/btr9gmm82ya1f+qpYDA/fT23sS+CUwEQzMbi673vXrNzEzs46ZmXWsX7+p7LXwR0c3\n09e3BZiouj6RrlPPx4RG31CppytFSzi9vW/0gYGLfGhog3/5y18OvTbqcFqsWnucMYNaSjhxxyBE\nOhGq8Us1jQi4etZRWGZgYNB7e09fCPeenjdEwnnUV658W0MCWLV7SQsFfxdLGtqNmL2SdB2lYby2\naeGs2TqSFgr+LtWIEGvEEXDSdZQuP+o9Pa9vWjirhCNpUG/wp2ZWTydekTHcp8HB89m9+8fA4v4t\nNTOmET9XI75bNrx8od/h9ZV+KcmDbN06yu7dk0G7xl4oLTzDR0Qi6nm3aPSN4Ii/WUdpnfjRf3Gf\nKg9uVjvSbsQ8+dIB2NN9YGBwYQC28P+xeDC28glSvb2ne2/vGytuq5H/vzqql7RjuZd66g3nRszy\naGWAFLa1cuXbgsD34ISkxf0rzH6JDorW8qYQ/pnincQ05dC/sK38m9HownbDbwSV+9C82n10P3ba\nm7lIq9Ub/B1T6ol7sk9YpSsy1vIRv/RaMB9jzZrz6O9/Q01ljziXBB4cPJ9bb70r2NY68icjDZVZ\n2wH273+O+fmrAejt/SIDAzuC69Ms/fNV2i+Fk5jCJzfNzh4LLbkduIPC/0HeJHAHc3Owe/dkS06E\nSnJRNpV3RGKo592i0TegrsHDuEfyAwMXVSw/VDvireVTR5wySOkUxsJ1ZxaXesq1K7cvKm23lvLQ\n4tJM6dH6UpdHiH4iqVbqiSPukbymbIp0wRF/6eBfssvlRo96e3s/x8DA/cGRfKWj5sVHvHGPIuNf\nEviekmVXrvwVF1zwcwYHb1gY6Jyd/VOefnrpn7FwVmvxE0X+SpP5M2HXxerr8eMEnyYmmZ1dwcGD\nX+T48ULr64GrKZ7Vmv//KN234U8kOxe2A7UP2sY9km/074tImnRM8FcKsWqXyy33xz84eN3CZQDm\n5q4gHHD9/ZOLyh3j49uZnT1Gb+/ngrB7tWQbs7PHFl3zpdK3OsVzUcm14L/5zWIwbt3KwrrzwVps\nFw7daBkkm81Gwvgc8qFdunw5/f2rSvYLELwZ/Rj4+aIAL/fmEd63QNNLLtHfl0bPChLpavV8TGj0\njcg8/loG7sKDltGZJ/myzVSMckf+UgLRklBv7xsjZYxK7U4vWa5cu0oDpEv9XHEuI1xunny5M2E7\n5byAaj+3Bm1F4qFds3qAEeAw8AKwpUKbO4PX9wMDZV5f9MPUGyxxr90edzbMwMBFFdZXWguvNAsn\nPD2ymZciaOX1aZodzpqmKRJPvcGfqNRjZiuArwEXA68APzKzSXc/FGpzCfB2d19tZu8D7gbW1ral\nA+zbt5/h4Y0VTxCqJF9Dn4xdCgif+FMo8Sz25rLLFcolw8MbOX58G9XKIM1QS8076clNzS6z6OQr\nkSar592icAPeD0yFHt8I3Bhpcw9wWejxYWBVpM2id7FqJzdFyy/1fKNSve2KpaPKs3/aWQbRkbJI\nutCOUg/wl8CO0OMrgLsibb4LfCD0+HvABZE2JT/Q4hOdypVbygdr0q/6q9QubglHZRARaZV6gz/p\nrB6P2c6WWm5sbGzhfiaTWfi4Pzy8kZmZ+B2KWyaop93iGTU7yy6vMoiINEsulyOXyyVej+XfNOpc\n2GwtMObuI8Hjm4B5d7891OYeIOfuO4PHh4FBdz8aauOV+lFuzjicCOro+Vp2rWfrioh0AzPD3aMH\n1ksvlzD4TwGeBz5MfhL8fwOXe+ng7rXufknwRrHN3ddG1lMx+CHe1R9FRNKmLcEfbPgjwDZgBXCf\nu99mZtcAuPu9QZuvkZ/2+XvgSnf/cWQdVYNfRERKtS34G0HBLyJSu3qDv6cZnRERkc6l4BcRSRkF\nv4hIyij4RURSRsEvIpIyCn4RkZRR8IuIpIyCX0QkZRT8IiIpo+AXEUkZBb+ISMoo+EVEUkbBLyKS\nMgp+EZGUUfCLiKSMgl9EJGUU/CIiKaPgFxFJGQW/iEjKKPhFRFJGwS8ikjIKfhGRlKk7+M1spZnN\nmNlPzexRMzu9TJu3mNnjZnbQzH5iZtcn666IiCSV5Ij/RmDG3d8BPBY8jjoBfN7d1wBrgb8zs3cl\n2GbXy+Vy7e5Cx9C+KNK+KNK+SC5J8K8DJoL7E8BHow3c/Zfu/kxw/3+BQ8CbE2yz6+mXukj7okj7\nokj7Irkkwb/K3Y8G948Cq6o1NrOzgQHgqQTbFBGRhE6p9qKZzQBvKvPS1vADd3cz8yrr+UPg28Bn\ngyN/ERFpE3OvmNfVFzQ7DGTc/Zdm9sfA4+7+J2XanQr8O/Af7r6twrrq64SISMq5u9W6TNUj/iVM\nApuA24N//y3awMwMuA94rlLoQ30dFxGR+iQ54l8J/CtwFvAi8Ffu/hszezOww93/wsw+CHwfeBYo\nbOgmd59K3HMREalL3cEvIiLLU0vP3DWzETM7bGYvmNmWCm3uDF7fb2YDrexfKy21L8zsE8E+eNbM\nnjCz97Sjn60Q5/ciaPdeMztpZhta2b9Wivk3kjGzp4OTInMt7mLLxPgb6TezKTN7JtgXf9OGbjad\nmX3DzI6a2YEqbWrLTXdvyQ1YARwBzgZOBZ4B3hVpcwnwSHD/fcAPW9W/Vt5i7ov3A38U3B9J874I\ntftP8hMFNra73238vTgdOAicGTzub3e/27gvxoDbCvsBOAac0u6+N2Ff/Bn5qfAHKrxec2628oj/\nQuCIu7/o7ieAncClkTYLJ4W5+1PA6WZW9fyAZWrJfeHuT7r7b4OHTwFntriPrRLn9wLgOvJTgn/V\nys61WJx98XHgIXd/GcDdZ1vcx1aJsy9+AZwW3D8NOObuJ1vYx5Zw9x8Av67SpObcbGXwnwG8FHr8\ncvDcUm26MfDi7IuwTwGPNLVH7bPkvjCzM8j/0d8dPNWtA1Nxfi9WAyuDa2DtNbNPtqx3rRVnX+wA\n1pjZq8B+4LMt6lunqTk3k0znrFXcP9bo1M5u/COP/TOZ2YeAq4CLmtedtoqzL7YBN7q7B1OEu3X6\nb5x9cSpwPvBh4HXAk2b2Q3d/oak9a704++JLwDPunjGztwEzZnaeu/+uyX3rRDXlZiuD/xXgLaHH\nbyH/zlStzZnBc90mzr4gGNDdAYy4e7WPestZnH1xAbAzn/n0Ax8xsxPuPtmaLrZMnH3xEjDr7nPA\nnJl9HzgP6Lbgj7MvPgDcCuDu/2NmPwfeCextSQ87R8252cpSz15gtZmdbWa9wGXkTwILmwT+GsDM\n1gK/8eL1gLrJkvvCzM4CHgaucPcjbehjqyy5L9z9re5+jrufQ77O/5kuDH2I9zfyHeCDZrbCzF5H\nfjDvuRb3sxXi7IvDwMUAQU37ncDPWtrLzlBzbrbsiN/dT5rZtcA0+RH7+9z9kJldE7x+r7s/YmaX\nmNkR4PfAla3qXyvF2RfAzcDrgbuDI90T7n5hu/rcLDH3RSrE/Bs5bGZT5E+KnCd/smTXBX/M34t/\nAO43s/3kD2JvcPfX2tbpJjGzbwGDQL+ZvQTcQr7kV3du6gQuEZGU0VcvioikjIJfRCRlFPwiIimj\n4BcRSRkFv4hIyij4RURSRsEvIpIyCn4RkZT5fz1XL4PwuEedAAAAAElFTkSuQmCC\n",
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXwAAAD8CAYAAAB0IB+mAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAGIlJREFUeJzt3X+MXeV95/H31+MBhg1h2NpV12OoWdVxSqGNkxHNytIW\nknTtEMn20myBCLXpZoPaiq5CkSWjVilL/8Aba8W2KtvE20ZpWjVAUuR6U1f+Y03UCoWIQYYQSLzr\nQgozToWbZdhVPcDYfPePO9e+vr4/ztyfc+e8XxLi3nPPPffxkf25z/0+z3lOZCaSpNVvzbAbIEka\nDANfkkrCwJekkjDwJakkDHxJKgkDX5JKwsCXpJIw8CWpJAx8SSqJtcP64HXr1uWmTZuG9fGSNJKe\neeaZf8zM9Z28d2iBv2nTJmZmZob18ZI0kiLi7zt9ryUdSSoJA1+SSsLAl6SSMPAlqSQMfEkqCQNf\nkkrCwJekkmgb+BHxxYh4LSK+0+T1iIjfj4gTEfHtiHh/75spSepWkQuvvgT8AfDlJq9/FNi89N/P\nAn+49H9JWraDx+bYf+Q4J+cX2DA5wZ7tW9i9darlfldOjBMB86cXL3i8YXKCm9+7nie+d2rk9mv2\n2iU/9hMf6PTcRpGbmEfEJuDrmXl9g9e+AHwjM7+y9Pw4cFNm/qDVMaenp9MrbaXR0G0I17+nVVj/\n09tnWDx7PpcCSGCyzX5l8YM/+Qxv/eB/Ryfv7cXSClPAqzXPZ5e2tQx8SaPh4LE57nv8eRYWzwIw\nN7/APY8+y2cefbZlCM8vLJ47xtz8Avc9/vy557XHq92v9nFVNnit0X5qrxeB3+ibpuHXbkTcBdwF\ncM011/TgoyX12/4jx8+Fc1UnIbyweJZ7H3uOswWqCuqPXszSmQWurnm+ETjZaMfMPJCZ05k5vX59\nR4u9SRqwk/MLPTuWYT9cvQj8Q8AvLc3W+SDwRrv6vaTRsWFyYthNUI+0LelExFeAm4B1ETEL/A4w\nDpCZnwcOA7cAJ4DTwK/0q7GSutPp4Ov4WAxsgHR8TfCuy9by+unFcwO2rfarn/my0mffdDtLp5ve\ndKFZOv3gLB1psOoHX+HC0Kx+AQBN9+s0hNdENC3nTHYwm6fVl9VqFxHPZOZ0R+818KVy2LbvKHNt\n6vGtwnxqcoIn936ooxBu9GUzMT7Gg7feUMrQ7kY3gT+0O15JGoxqQLcLe2ge9nB+8Hb31qllh3R1\n/yLlJPWPgS+tYo161p3qdvC2ky8K9ZaBL61Cy+nVFzExPnauvq/RZeBLq0zRXv1kweUJpiy/rBoG\nvjSimk2xbHRlbL36Adi5+YWLBmwdVF19nKUjjaBGvfhWM2xqNQvyonP0NVzO0pFKptX6Nq20Ks84\nqLr6GfjSCFru+jaWZwTe4lAaSUWnSAaVXr1hL7CHL42kPdu3tJ2JUx2YlaoMfGkE1V652myGjfPm\nVc/Al0ZU7SCrM2xUhIEvrQLOsFERDtpKUkkY+JJUEga+JJWEgS9JJWHgS1JJGPiSVBIGviSVhIEv\nSSVh4EtSSRj4klQSBr4klYSBL0klYeBLUkkY+JJUEga+JJWE6+FLI8QbnagbhXr4EbEjIo5HxImI\n2Nvg9Wsi4omIOBYR346IW3rfVKncDh6b477Hn2dufoEE5uYXuO/x5zl4bG7YTdOIaBv4ETEGPAx8\nFLgOuCMirqvb7beBxzJzK3A78N963VCp7PYfOX7RTcsXFs+y/8jxIbVIo6ZID/9G4ERmvpSZbwOP\nALvq9kng3UuPrwRO9q6JkgBOzi8sa7tUr0jgTwGv1jyfXdpW637gzoiYBQ4Dv9HoQBFxV0TMRMTM\nqVOnOmiuVF4bJieWtV2qVyTwo8G2rHt+B/ClzNwI3AL8aURcdOzMPJCZ05k5vX79+uW3ViqxPdu3\nMDE+dsG2ifEx9mzfMqQWadQUmaUzC1xd83wjF5dsPgXsAMjMb0bEZcA64LVeNFIS52bjOEtHnSoS\n+E8DmyPiWmCOyqDsJ+r2eQX4MPCliPhJ4DLAmo3UY7u3Thnw6ljbkk5mngHuBo4A36UyG+eFiHgg\nInYu7XYv8OmIeA74CvDJzKwv+0iShqjQhVeZeZjKYGztts/WPH4R2NbbpkmSesmlFSSpJAx8SSoJ\nA1+SSsLAl6SScLVMaYVxRUz1i4EvDUGzUK+uiFldJK26IiZg6KtrBr40YK1CvdWKmAa+umUNXxqw\nVqHuipjqJwNfGrBWoe6KmOonA18asFah3mhFzKBS9tm276h3t1JXDHxpwBqF+via4PTbZ7jn0We5\ndO0arrp8HKiEfXVRKm9pqG4Z+NKA7d46xYO33sDU5AQBTE6MQ8DrpxdJYH5hkTcX3+Gqy8cvuvGE\ntzRUN5ylIw1B7TLH2/YdZX5h8YLXFxbPXjSwW+UArjplD18asuUGuAO46pSBLw1ZswCfnBj3lobq\nKQNfGrJm96q9f+dPXVDrn5qc4MFbb/ACLHXMGr40ZO3uVWvAq1cMfGkF8F61GgRLOpJUEga+JJWE\ngS9JJWHgS1JJGPiSVBIGviSVhIEvSSVh4EtSSRj4klQSBr4klYSBL0klUSjwI2JHRByPiBMRsbfJ\nPr8YES9GxAsR8ee9baYkqVttF0+LiDHgYeDngVng6Yg4lJkv1uyzGbgP2JaZr0fEj/arwZKkzhTp\n4d8InMjMlzLzbeARYFfdPp8GHs7M1wEy87XeNlOS1K0igT8FvFrzfHZpW633AO+JiCcj4qmI2NGr\nBkqSeqPIevjRYFs2OM5m4CZgI/C3EXF9Zs5fcKCIu4C7AK655pplN1aS1LkiPfxZ4Oqa5xuBkw32\n+cvMXMzMl4HjVL4ALpCZBzJzOjOn169f32mbJUkdKBL4TwObI+LaiLgEuB04VLfPQeBmgIhYR6XE\n81IvGypJ6k7bwM/MM8DdwBHgu8BjmflCRDwQETuXdjsC/DAiXgSeAPZk5g/71WhJ0vJFZn05fjCm\np6dzZmZmKJ8tSaMqIp7JzOlO3uuVtpJUEga+JJWEgS9JJWHgS1JJGPiSVBJFrrSV1AMHj82x/8hx\nTs4vsGFygj3bt7B7a/0qJVL/GPjSABw8Nsd9jz/PwuJZAObmF7jv8ecBDH0NjCUdaQD2Hzl+Luyr\nFhbPsv/I8SG1SGVk4EsDcHJ+YVnbpX4w8KUB2DA5saztUj8Y+NIA7Nm+hYnxsQu2TYyPsWf7liG1\nSGXkoK00ANWBWWfpaJgMfGlAdm+dMuA1VJZ0JKkkDHxJKgkDX5JKwsCXpJIw8CWpJAx8SSoJA1+S\nSsLAl6SSMPAlqSQMfEkqCQNfkkrCwJekkjDwJakkDHxJKgkDX5JKwsCXpJIw8CWpJAoFfkTsiIjj\nEXEiIva22O/jEZERMd27JkqSeqFt4EfEGPAw8FHgOuCOiLiuwX5XAP8R+FavGylJ6l6RHv6NwInM\nfCkz3wYeAXY12O93gc8Bb/awfZKkHikS+FPAqzXPZ5e2nRMRW4GrM/PrrQ4UEXdFxExEzJw6dWrZ\njZUkda5I4EeDbXnuxYg1wEPAve0OlJkHMnM6M6fXr19fvJWSpK4VCfxZ4Oqa5xuBkzXPrwCuB74R\nEd8HPggccuBWklaWIoH/NLA5Iq6NiEuA24FD1Rcz843MXJeZmzJzE/AUsDMzZ/rSYklSR9oGfmae\nAe4GjgDfBR7LzBci4oGI2NnvBkqSemNtkZ0y8zBwuG7bZ5vse1P3zZIk9ZpX2kpSSRj4klQSBr4k\nlYSBL0klYeBLUkkY+JJUEga+JJWEgS9JJWHgS1JJGPiSVBIGviSVRKG1dCR15uCxOfYfOc7J+QU2\nTE6wZ/sWdm+dav9GqQ8MfKlPDh6b477Hn2dh8SwAc/ML3Pf48wCGvobCko7UJ/uPHD8X9lULi2fZ\nf+T4kFqksjPwpT45Ob+wrO1Svxn4Up9smJxY1nap3wx8qU/2bN/CxPjYBdsmxsfYs33LkFqksnPQ\nVuqT6sCss3S0Uhj4Uh/t3jplwGvFsKQjSSVh4EtSSRj4klQS1vC16rm8gVRh4GtVc3kD6TxLOlrV\nXN5AOs8evla1QS9vYPlIK5k9fK1qg1zeoFo+mptfIDlfPjp4bK7nnyV1wsDXqrac5Q0OHptj276j\nXLv3r9i27+iyg9rykVY6Szpa1Youb9CLwV1Xx9RKVyjwI2IH8HvAGPBHmbmv7vXfBP4DcAY4Bfz7\nzPz7HrdV6kiR5Q1a9c6LBv6GyQnmGoS7q2NqpWhb0omIMeBh4KPAdcAdEXFd3W7HgOnM/Gnga8Dn\net1QqZ960Tt3dUytdEV6+DcCJzLzJYCIeATYBbxY3SEzn6jZ/yngzl42Uuq3bnrntTNzrpwY57Lx\nNcyfXnSWjlacIoO2U8CrNc9nl7Y18yngr7tplDRojXrn42uC02+faTmIWz8zZ35hkTcX3+Gh297H\nk3s/ZNhrRSkS+NFgWzbcMeJOYBrY3+T1uyJiJiJmTp06VbyVUp/t3jrFg7fewNTkBAFMToxDwOun\nF89Nsbzn0WfZVBf+zszRKClS0pkFrq55vhE4Wb9TRHwE+C3g5zLzrUYHyswDwAGA6enphl8a0rDU\nDu5u23eU+YXFC16v/oWtncHjzByNkiKB/zSwOSKuBeaA24FP1O4QEVuBLwA7MvO1nrdS6qEiV8O2\nC+xqL96ZORolbUs6mXkGuBs4AnwXeCwzX4iIByJi59Ju+4F3AV+NiGcj4lDfWix1oejVsEUC++T8\ngjNzNFIKzcPPzMPA4bptn615/JEet0vqi6Lz7fds33LBhViNbJic8L61GileaatSKVpzrw3yufkF\nggtnKtTO4DHkNSoMfJXKcmrutYO49XPt/+ntM7x+ujKo6xr7GhUunqZS6bTmvnvrFE/u/RAv7/sY\n/+zStSyevXCSmVMxNQrs4WtkdbL2fC9q7k7F1Kgy8DWSulndsshiaq04FVOjypKORtIwr3B1KqZG\nlT18jaRhllWciqlRZeBraLq5/+uwyyrdloWkYbCko6Ho9v6vvSqrdHtbQ2mU2MNXYd30yOt1e4ep\nXpRVenFbQ2mUGPgqpNfh2IsafKuySpEvp17c1lAaJZZ0VEivZ8U0q7VvmJzousxStFzkfHqVjYGv\nQnodjs1q8De/d33Htf3qF8VnHn220JdTqy8daTUy8FVIr8Ox0R2mLhtfw5899UpHvyRqe/XN1H85\nOZ9eZWPgq5BG4RhUeuCdzm6prk/z0G3v460z75xbjKyRdr8kGpWc6tV/OdV/6UxNTvDgrTdYv9eq\n5aCtCmm1XHD9AO5yZ/MUCes1ES2XIm73hdCs5+58epWJga/CquG4bd/Ri0ontWWX5c7mKTIOcDaz\n5fGaXYgFlZ67V8JKBr460GoAt91snkY9/1ZhPRZxLuzrj9fuDlUT42OWaKQa1vC1bK0GcJt9GVR7\n5o1m3zQbPP2vt72Pd+rCvqrRHaqsx0utRTb5B9Vv09PTOTMzM5TPHgW9vKq1/nhXTowTAfOnF3ty\nhSqc701Xa/xFTU1O8OTeDzX98zYqH9W+11KNyiYinsnM6Y7ea+CvPK0CtZNwa3S8WrXHLvpF0+wL\npHr7v/o7QjUTwMv7PtaTtktl0E3gW9JZgTq9qrXZFartZsFUj72cBc0aTalMYH5hERKuuny80J+1\n3Tz+2lJNq7ZLas9B2xVoOVe1VnvajaZK3vPos3zm0WcLf2Yna8s0es/iO8nll6xlfulLoJmiFzlV\nZwddu/evGh7PpRCkYgx8itfLW+3X7LX67Te/dz1PfO9Uw1JI9fGaBjNT4OLecH25o/4dyynWtRtw\n3bbv6LLmv1f/vL2svw97DXxp1K3YwO8kXJdzjNp9iswbb7UfXDz3vNq7ru91/9lTr5x7z/zCYsPH\njcK+UW+4yAVLRVSP3WrAdbnz36vnu5djEc2O51IIUjErMvCXG67LDeja/dqVMWpLJvVq68f1x8i6\n/3diLIJ3Mpv+Yujk2AEtZ+m0GiBdzvz32mP2araRtxaUurMiZ+m0morX6EIcOD+9r90x6vdrVhcO\n4KHb3tcyAKv7QXfB3urYL+/7WNM6/XLV/9mrGs24abauTaNZNb2eQiqpuW5m6azIHn6rQbhGYQ8X\n15mLDnw2K0kkcO9jzzX9vNr3Vz+/16prw7eq09erfiHUfzE0K33UH39+YZGJ8TGuuny8Yeg3qpe7\nHo00GlZk4Lca7Gtlbn6BPV99jv/0P15oGoxJpfdfHTxt1WtuF/a1Idrul8By1dbVixw3oOVgcbNe\nd7OS1qVr1zAxPma9XFpFVmRJp93FNv2w3FLJZF0dvNUXSPX5VMFZOrUB3azkVKtZqaaIdiUtSzXS\nytL3kk5E7AB+DxgD/igz99W9finwZeADwA+B2zLz++2O227K4mXja5rWkpvV8jtV9EgT42P8wgem\n+Itn5i4YEP6LZ+aWfbVqEe1+7XTb6241y8ZSjbS6tO3hR8QY8L+AnwdmgaeBOzLzxZp9fh346cz8\n1Yi4Hfi3mXlbq+P+xHU/k+O/8J9b9uIbhWt1eyfrtnSqfrZMs8/tpqfdTKNfO7W/GHqxxo6rTEqj\no989/BuBE5n50tKHPQLsAl6s2WcXcP/S468BfxARkS2+Tf7h/77JujYlm4XFszzxvVPnwr1Rj7lI\n6afor4HJiXHeOvNO2/C7p8nVq/244rPfUxGd6iiVR5HAnwJerXk+C/xss30y80xEvAH8CPCPtTtF\nxF3AXQBj715fqIEn5xealhbqw6rRwl3NfiXUmxgf4/6dP3XB8ZqF36Cv+Ox3acXSjVQORQI/Gmyr\n7y4X2YfMPAAcAHjXxi2FyuZFFteqv+CqUWBP//g/bzpeUB/s7cLPKz4ljaIigT8LXF3zfCNwssk+\nsxGxFrgS+D+tDvpj776M8bppf/U6CdFWvwYsg0gqsyKB/zSwOSKuBeaA24FP1O1zCPhl4JvAx4Gj\nrer3AJOXj/PbdbX5Vr3ulcYyiKRR0zbwl2rydwNHqEzL/GJmvhARDwAzmXkI+GPgTyPiBJWe/e1F\nPtzQlKTBKTQPPzMPA4frtn225vGbwL/rbdMkSb3kHa8kqSQMfEkqCQNfkkrCwJekkjDwJakkDHxJ\nKgkDX5JKYmg3QImI/wccH8qHrzzrqFtorsQ8F+d5Ls7zXJy3JTOv6OSNw7zF4fFO13RebSJixnNR\n4bk4z3NxnufivIhofKvAAizpSFJJGPiSVBLDDPwDQ/zslcZzcZ7n4jzPxXmei/M6PhdDG7SVJA2W\nJR1JKom+B35E7IiI4xFxIiL2Nnj90oh4dOn1b0XEpn63aVgKnIvfjIgXI+LbEfE/I+LHh9HOQWh3\nLmr2+3hEZESs2hkaRc5FRPzi0t+NFyLizwfdxkEp8G/kmoh4IiKOLf07uWUY7ey3iPhiRLwWEd9p\n8npExO8vnadvR8T7Cx04M/v2H5Ubpvwd8C+BS4DngOvq9vl14PNLj28HHu1nm4b1X8FzcTNw+dLj\nXyvzuVja7wrgb4CngOlht3uIfy82A8eAq5ae/+iw2z3Ec3EA+LWlx9cB3x92u/t0Lv418H7gO01e\nvwX4ayr3E/8g8K0ix+13D/9G4ERmvpSZbwOPALvq9tkF/MnS468BH46IRjdFH3Vtz0VmPpGZp5ee\nPkXl/sGrUZG/FwC/C3wOeHOQjRuwIufi08DDmfk6QGa+NuA2DkqRc5HAu5ceX8nF99deFTLzb2h9\nX/BdwJez4ilgMiL+Rbvj9jvwp4BXa57PLm1ruE9mngHeAH6kz+0ahiLnotanqHyDr0Ztz0VEbAWu\nzsyvD7JhQ1Dk78V7gPdExJMR8VRE7BhY6waryLm4H7gzImap3IXvNwbTtBVnuXkC9P9K20Y99fpp\nQUX2WQ0K/zkj4k5gGvi5vrZoeFqei4hYAzwEfHJQDRqiIn8v1lIp69xE5Vff30bE9Zk53+e2DVqR\nc3EH8KXM/C8R8a+o3Ev7+sx8p//NW1E6ys1+9/Bngatrnm/k4p9g5/aJiLVUfqa1+ikzqoqcCyLi\nI8BvATsz860BtW3Q2p2LK4DrgW9ExPep1CgPrdKB26L/Rv4yMxcz82Uqa1BtHlD7BqnIufgU8BhA\nZn4TuIzKOjtlUyhP6vU78J8GNkfEtRFxCZVB2UN1+xwCfnnp8ceBo7k0KrHKtD0XS2WML1AJ+9Va\np4U25yIz38jMdZm5KTM3URnP2JmZHa8hsoIV+TdykMqAPhGxjkqJ56WBtnIwipyLV4APA0TET1IJ\n/FMDbeXKcAj4paXZOh8E3sjMH7R7U19LOpl5JiLuBo5QGYH/Yma+EBEPADOZeQj4Yyo/y05Q6dnf\n3s82DUvBc7EfeBfw1aVx61cyc+fQGt0nBc9FKRQ8F0eAfxMRLwJngT2Z+cPhtbo/Cp6Le4H/HhH3\nUClhfHI1dhAj4itUSnjrlsYrfgcYB8jMz1MZv7gFOAGcBn6l0HFX4bmSJDXglbaSVBIGviSVhIEv\nSSVh4EtSSRj4klQSBr4klYSBL0klYeBLUkn8f8pQDCMBaWJFAAAAAElFTkSuQmCC\n",
"text/plain": [
- "<matplotlib.figure.Figure at 0x10ad75c18>"
+ "<matplotlib.figure.Figure at 0x11c183c50>"
]
},
"metadata": {},
@@ -561,15 +531,13 @@
{
"cell_type": "code",
"execution_count": 16,
- "metadata": {
- "collapsed": false
- },
+ "metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
- "100%|██████████| 500/500 [00:37<00:00, 6.89it/s]\n"
+ "500it [00:06, 71.59it/s] \n"
]
},
{
@@ -584,9 +552,9 @@
},
{
"data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAX4AAAEACAYAAAC08h1NAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzt3X18XGWd9/HPNZkGUtpC00B5KJQaEARK77QV6sLeiStJ\nCvdapdlVeXADuoAisNAUSi2rddsuq0sri+4tDypUVNx9ibjhvjUhuCRreSFrbSmlUuWhcoM8vAwB\noVBMQ373H+ecmTNnziSTTDJ5mO/79cqr58xcc+bktP3NNb/rd67LmRkiIlI6EmN9AiIiUlwK/CIi\nJUaBX0SkxCjwi4iUGAV+EZESo8AvIlJiCg78zrlvO+decc7tzPH8Bc65Hc65x51zDzvnTi30PUVE\nZPhGosd/J7B0gOefBf6nmZ0KrANuH4H3FBGRYSo48JvZz4HXBnj+ETP7o7/7KDCn0PcUEZHhK3aO\n/9PAT4r8niIiEpIs1hs55z4IfAo4o1jvKSIi2YoS+P0B3TuApWaWlRZyzmnCIBGRYTAzN9TXjHqq\nxzl3DPAj4EIzezpXOzPTjxlf/OIXx/wcxsuProWuha7FwD/DVXCP3zl3D1ALVDnnnge+CEzxg/lt\nwBeAmcA3nHMA+83stELfV0REhqfgwG9m5w3y/N8Cf1vo+4iIyMjQnbvjTF1d3Vifwriha5Gma5Gm\na1E4V0ieaMROwjkbD+chIjKROOew8Ti4KyIi44sCv4hIiVHgFxEpMQr8IiIlRoFfRKTEKPCLiJQY\nBX4RkRKjwC8iUmIU+EVESowCv4hIiVHgFxEpMQr8IiIlRoFfRKTEKPCLiJQYBX4RkRKjwC8iUmIU\n+EVESowCv4hIiVHgFxEpMQr8IiIlRoFfRKTEFBT4nXPfds694pzbOUCbW5xzTznndjjnagp5PxER\nKVyywNffCXwN+E7ck865c4DjzOx459zpwDeAJQW+p4hITu3t7WzceDsALS2XAqT2a2sX0tW1LfVc\nY2Nj7OvM3qCz83EA5s8/iueeewOAFSsu5qmnnuJ73/spAIceWsZLL/UCUFlp9PQ4AM46y+vjPvjg\ndgCmTfsTe/eWA1BTMxeA7dufA+DAA9/mnXemZm3n2244nJkN+8UAzrljgfvNbH7Mc7cCD5nZv/n7\nu4FaM3sl0s4KPQ8RGd8uuuiiVMAMB9OZM+GZZ14HvIBZV1fHpk13Zj0XDayvvvpqTFDsBfqBUwHw\nkhHvYvY//LPYDlTgJTt6gEoAqqtn8uyz/w+z6f4xEsAtwE68/uqp/usfB94FakL7n/W37/BfA3AV\n8BbwfuA5fzv8XB/wv/3jR193CTAfuByvb35LpN0d/v4twEWYmRvouscys4J+gGOBnTmeux/4s9D+\ng8CimHYmIhNDc3OzJZOHWTJ5mDU3N1tbW5vV1y+3+vrl1tzcbJWV1VZZWW1HHHGEQaVBpVVWVhrM\nMLjLoCW0fZe/3RLanmIwx2BaTLsmf/uAHM+daFAVerzKP070vZpiXl/mby/x/zSDMyLHm2FwcOT4\nZxgsD73G/O1DYo5noccsx+uW+9vh14XbHRbaxmwYcbvQVE8+op9GsV37tWvXprbr6uqoq6sbvTMS\nkSzhVMeRR07n/vu3AF56Y/HixWzceDtPPPEYL73UTdBD3bz5Kr7znX/HbIF/lMeBqcDbpHur0NMT\n9GSbgSZ/u9V/zSXAHuAmvL5iB7AeuMH/szl0luuAHw7wHP5xwo/fENlvBXb45xZ+fIW/3xp67OXI\n8W4FPhPzvrPJdmLM8QrVCewF7sO7ZsMz2oH/98DRof05/mNZwoFfREZOOKBHc9xbt25l06Y76e19\nm3fe6aWvb6P/qsuB9wDTuOGGfySRSNDf/3XgReALpAPfTsy+hRcMAVYCVUA32QH4Vv/PV4D/8p8P\nXnOCv/0o6YC8jqGbE/PYIUM8xqXAhf52RR7tK4B5eGmawNXAp2OOB+lUz+aY1wUfkJvxPkSD54J2\nt+ClmR4Czs3rt4kz2oG/FbgC+IFzbgnwukXy+yIyetrb2zn33Gb27fsysJOOjq8Q9MIfeugC+vr2\n4gWSF4CNeAG3Ha/XvtI/ykr6+w/E++/8B7z8cuBh4Gaye8BxBYO/xQtofyT7Q+Em/7m3Q49dTHZQ\nrPfb9eR47tiYx4MgG+xfgpeIiLZ7K9Tubf/36Iu0ezzmdf3At4A/EXy4OfcO8E3SQ59v430YJKip\nqQZg+/YVABx4YC/vvBPe9sY3ampOGKTdCoaroMDvnLsHqAWqnHPPA18EpgCY2W1m9hPn3DnOuafx\nrurFhbyfiAzNxo23+0E/SLGk0xt9feClNz5DujcOcDvZgfkaYJn/EwS++XjBPGoO8QE4CIyvx7zm\ndf+5N0Ovm+O/5jq8D5J9eAmDW0kkEsybdyjPPBMOhA8BMG1aP3v3tgBw1lmnAfDgg95+ZWWSnp67\nQu2819fUVDNr1qxUu+rqI3jtNYAkc+dWs3PndQBccMFfc/zxx7Npk/dt5MMfPpcXX3wTiH6bWgsQ\nqi76UUYF0UhxbujjujACVT0jQVU9IiMrSO/86lc76On5KF4++Jd4vdggoG/GC/A78Xr5F/r70Tz2\nZv+xR0L7LcDxeD3gKcC/+M+Fq1IuAaYBjurqmTz//Kv09p6IF+RfJPjmUVZ2DaeeehJVVbNpabmU\ne+65J1X9U1d3Ks7NAAYuxSxVzrlhVfUo8ItMMpnpnWCw9Bbgn0jn3iGdj38ytL8ZLw3yJ7JLDIPX\nbca7hafT376cdLnjdhKJg0gkklxwwdncddddGeeVTz295E+BX0QAaGhooqNjGen0Tnh7HulqkHl4\nAXyTvx/urV8BJEkmy5kxw9HT0wt81W+3Avg+0Ahsprr6Zl57zUt3rFhxMWvWrBnl31ACww38xSjn\nFJEx82po+1LgY8BJ/n47sJR0eWO4Vw+wir6+L7N379Ukk0Zf3614pYTv4JU5bqaiYhUXX3xlqve+\nePHiUf1tZGQo8ItMINHpCOJSJC0tl7JlSzP79oFXOnk5Xi37O36LoPTyKhKJB+jv/xpezj16871X\nh97bCzU1d1BV5dWq19Z+gq6uVn/7SjZs+JqfVoItW5q5777NSt2Md8O562ukf9CduyKDamtrs4qK\n2am7RisqZltbW1vqueDu2ba2Nlu/fr1VVlabc1NDd6hm30Hq3Ez/8RMNpvrbS/y7XdtS7errl8ee\nU3199p2nudrKyGMc37krIiMgszQT9u1Llwuec85H6O+fDsCDD95PWdlU+vreh1cz30yuO0jNTsCr\n1mkHzif8bcAbFH6ZiopVtLRsznqtTFwK/CIT3MUXX0p//wEE+Xmzy+nrS5B5N209sJD4m57Aq93f\nRLh2v7JyHYsW7aGlJXfqJjOthD4kJggFfpEJIleQXbq0i8x5Z+Lmk1nr/5k5R04icRf9/ZvxcvyZ\nFi1awAMP3DvgOTU2NnLffZtD4w7K708ECvwiE0SuIFtWluDddwd+bVnZMzjn6Ov7DOF6/AULTqGq\nqpXu7jJ27bqWXm9q+SH13BsbGxXsJxjV8YtMcBdddBGbN99H+oary0kkyunvvxmA8vJraW29GyB0\nY5cX3MMVOPlUDMn4ohu4REpYeJGTCy44m/POOy82iCu4Ty4K/CIiJWa4gb+gxdZFRGTiUeAXmaDa\n29tpaGiioaGJ9vb2sT4dmUCU6hGZQIIcfXf3q+zatYPeXm8AVwO1pUk5fpFJLnO6ZfBuzPouwSyZ\n9fWtPPDAvbS3t7Ns2Sf8ue+hvHw3ra0/UPCfhJTjF5nkMqdsaMarx7899Xx3tzcT5+rV6+jtTeLd\nxPUZenuTrF49nPVrZbJS4BcZZ4aWu38RbzGUlXjrw8Jzz71MeulE7wPCe0zEozt3RcYRL03zSXp7\n/xmArq5P0tp6N42NjbS0XEpX1yfo7Q3Wx30CmI63nGItVVVeunTu3Dn09GQed+7cOcX6FWQCUOAX\nGUdWr77RD/rePDu9vd5j6fz8FDJn0Dwfbx79q6it9RYEv/HG1f6Hh9eqvPxabrzx7qL9DjL+KdUj\nMgZypXOee+6FrLbBYxs33h76UGjGm6JhT2o7WAWrsbGR1ta7qa9vpb6+NfWNQSSgHr9IkUWrc8Kr\nVs2dezg9PStDrVcyd+4JQ34PTZwmA1HgFymyuAVVVq++0a+7T5JM7vPXt4Vkch+QpKGhidrahWzZ\nsio1LXN6cfTNmgdfhqTgwO+cWwrcDJQB3zSzL0eer8IrNj7cf7+bzOyuQt9XZDLZseMJ+vs3AlBe\n/mtqasoA2LVrKtu3XwzAli2rWLPmytB6t9f56Z2BF0sRiSoo8DvnyoCvA2cBvwd+6ZxrNbMnQ82u\nALab2Wr/Q+A3zrnvmllfIe8tMlFFF1RJJK6hv/9ThAd0q6pa/e1LCH8z6OpqzVgcZc0a789gzCA4\nvj4EZCCFDu6eBjxtZr8zs/3AD4CPRNq8BMzwt2cAryroSylrbGxkzZorqaxcR2XlOubNOwKvMmdw\n3d2vZg0KB2MGHR3L6OhYxrnnNmvuHhlQoameo4DnQ/svAKdH2twB/Kdz7kW8ouOPFfieIhNae3s7\nGzZ8LTW4u3fvtZSXX521+tXWrVvp6MhcI3fnznfp6/tXID0onGsRdvX6JZdCA38+E+x8HnjMzOqc\nc9VAh3NugZm9GW60du3a1HZdXR11dXUFnprI+BQN1L29UFNzZyq9E+TrvcHezDVy+/oeJhrgpXR0\ndnbS2dlZ8HEKDfy/B44O7R+N1+sP+zNgA4CZPeOc2wOcAGwNNwoHfpFSU1U1K8fC5vMJr5ELD2e1\nyLUIu0w+0U7xl770pWEdp9DAvxU43jl3LN6kIR8Hzou02Y03+Puwc242XtB/tsD3FZlwwlMqx6V2\noqIBvbz8WmA/vb2bM16XaxF2kVwKnpbZOXc26XLOb5nZjc65ywDM7Da/kudO4Bi8weQbzez7kWNo\nWmaZ1KI3bZWXX8vJJ7+XqqrZA1bhROfVBzTPvqRoPn6RcayhoYmOjmUE+fnw/PmghVNkeIYb+HXn\nrsgYG2gKB5HRoB6/SBEMlOrp7n7Vvzs3+9uAvgnIQNTjFxnHwgOw3nq5+9m+/RIAEomW2Nfom4CM\nFvX4RYosO9+/kkTi2/T3fxVIL5y+cePtGheQAWnNXZEJaz7z5h2RmsJhzZorBw3imqZBCqEev0iR\nZef7rwampJZbDHr8QMYyjOXl16YWVRmsSkhKg3L8IhNE9Iar7u4FGYO7+/bt5PzzP8fcuXPo738b\nCNbY3T8m5yuTj3r8ImMss/feDlxIepqGlXjLWTQSrfYJf2sIviUoz19a1OMXmaAyp2a4FS/oN4da\n3I4X+NM0TYMUQj1+kXFgw4YNbNp0J2+88SZ9fV8hnLv3Pgw+o169ZNGUDSITVGbaZifeEha3AAPP\n6aNyTlHgF5mg4ur6Kyt/zKJFC3IGdOX4BZTjFxn38u+hz2fRoj0DlmZq1S0phAK/SBEMNP2CFlKR\nYlOqR6QIhjItc23tQrq6tgG5vxko1SOgVI/IhNbY2EhjY2PeE7OpnFMKoR6/yChKL7f4Crt2/TZr\nWoZosNZUDDIU6vGLjDNxc/LU1NxJVdUs9dBlTCnwi4ySaOVNby9UVQ3ce9dArxSDAr/IOKLcvRSD\n5uMXGUHt7e00NDTR0NBEbe1CKipW4U27sNnvvV+a1S46j35jYyMPPHBv6ptBrnYiw6XBXZEREldi\nuWbNlVmlmfmWYqpkUwajKRtExli+FTkj3U5K15gtveicW+qc2+2ce8o5typHmzrn3Hbn3BPOuc5C\n31NERIavoMFd51wZ8HXgLOD3wC+dc61m9mSozSHAvwKNZvaCc66qkPcUGa/yrcgZ6XYiQ1VQqsc5\n9wHgi2a21N+/HsDM/inU5nLgcDP7wgDHUapHJoV8J2Ib6XZSmsYkx++c+yu8nvwl/v6FwOlmdmWo\nzVeBKcDJwHTgX8zs7shxFPhFRIZorO7czSdaTwEWAh8CpgKPOOd+YWZPhRutXbs2tV1XV0ddXV2B\npyYiMrl0dnbS2dlZ8HEK7fEvAdaGUj2rgX4z+3KozSqgwszW+vvfBNrM7IehNurxi4gM0VhV9WwF\njnfOHeucKwc+DrRG2vwHcKZzrsw5NxU4Hfh1ge8rIiLDVFCqx8z6nHNXAO1AGfAtM3vSOXeZ//xt\nZrbbOdcGPA70A3eYmQK/iMgY0Q1cIiIT1JjdwCUiIhOLAr/IBDLQ5G4i+VKqR2SC0KRtEqVJ2kQm\nOU3aJlHK8YuMQ0rNyHikwC8ySoLUTEfHMjo6lnHuuc2p4D+cD4SWlktzLuwiMhRK9YiMklypmZaW\nS4edq9ekbRI2VnP1iMgQRRdh37fPeyyfIN7Y2KhgLwVTqkdklCg1I+OVevwio6SxsZH77tscSs2k\n0zkDLbCidI6MNuX4RcZAruCuWn0ZCtXxi0wCqtWXoVAdv4iI5EU5fpFxRAusSzEo1SMyzmhwV/Kl\nHL/IOKeALiNNgV9kHFO1jowGBX6RcUzVOjIaVNUjIiJ5UVWPyCgK8vrd3a9QXn4tvb3e46rWkbGk\nVI/IKInm9cvLr+bkkxdQVTVLg7syIjQ7p8g4E52Fs7cXqqqU15expxy/iEiJKTjwO+eWOud2O+ee\ncs6tGqDd+51zfc655YW+p8hEoGmZZbwqKMfvnCsDfgOcBfwe+CVwnpk9GdOuA3gbuNPM7o08rxy/\nTEq6aUtG01jl+E8Dnjaz3/kn8QPgI8CTkXZXAj8E3l/g+4mMa3GBXsFexptCA/9RwPOh/ReA08MN\nnHNH4X0Y/AVe4FfXXialaBXPli3NujtXxqVCA38+Qfxm4HozM+ecA2K/lqxduza1XVdXR11dXYGn\nJlJccWvprl69TqkeGTGdnZ10dnYWfJxCc/xLgLVmttTfXw30m9mXQ22eJR3sq/Dy/JeYWWuojXL8\nMuFlT8uwkkTi2/T3fxXQ/Dwy8sZkrh7nXBJvcPdDwIvAfxMzuBtqfydwv5n9KPK4Ar9MeNFUTyLR\nQn//RsLz81RWrmPRogXq/cuIGJO5esysD7gCaAd+DfybmT3pnLvMOXdZIccWmWiCxdXr61upr29l\nwYJTstr09BxKR8cyzj23mfb29jE4S5ERuHPXzH4K/DTy2G052l5c6PuJjAf5lGk2NdWze/eq1Gpa\nsBL4LtDIvn3emIB6/TIWNGWDyBDlqt4BIo+vYs2aK+nqauVXv9pBT08zoEAvY0+BX2SI4qp3gt5/\n9PGuLm9unvSHxXwge3ZO3eglxaTAL1IEQf4/HdzT1T2q/5diU+AXyUO4R15bu5AtW9K5+/Lyq+nu\nXpDazjXnfq67eHN9g1Dgl9GiwC8yiOweeTp33939Crt2TWH7dq9uobz8Wmpq7qCqanZGr15kPFHg\nFxlEXI/83nvvpKpqFs899zK9vf9MIXPut7RcypYtzalvEFqdS0abAr/IMDz22E7MNuHdt1iYgfL/\nIqNBSy+KDCKa6nHuasw+DdyEd+/ihf62pmWQ4hqTKRtGigK/jHfhwd1HHnmEvXtvJDwnTzJ5Nx/8\n4JkqxZSiUuAXKZKFC89k+/bfEPTyYSU1NSewbduWsTwtKUFabF1kFIV7/E1NZ7Nr1y56e28FIJnc\nByRpaGiitnYhXV3bAN2IJeOXevwig4jm+CsqgnLObX4552/9yp6dwB3ALal2yvfLaBqT2TlFSkFm\nOaf3AdDVtY0HHriXqqrZoXLOPXhBP90u+JYgMp4o8IuIlBjl+EUGMdANVi0tl9LV9Ul/moZ5wFWp\n15WXX0tLy91FP1+RwSjwiwxi8Bus9gO3Aq8Dff528LjI+KPBXZFhCKp8vHn2P4pX2tkEhNfc9Vbj\nCqZv0NTLMtJUzikywnIF6swqn2V4K2vVD3osTb0s44UCv0iMgQJ1dNI2z1rgDMI5/vBYgKZelvFE\ngV8kxlADdWXlH1i0aA+1tdfR1dUKaLI1Gb8U+KWkDSfvHlfl8/3vp4P8mjX5vUZTL8uYMbMx//FO\nQ6S42trarKJitsFdBndZRcVsa2trG/S54Pn6+uVWX7884/HB3m+orxEZiB87hxxz1eOXSSmfnvxA\n6ZxoCeeRRy7l/PM/B8CKFRezePHiIZ9TrqUXRYqt4MDvnFsK3AyUAd80sy9Hnr8AuA5wwJvAZ83s\n8ULfVySX6MBsV9cnOfnk9/rLIeZfRhkE6g0bNnDDDV8hmIPnhhuuIpl8l76+f/WP/wlOPnkBVVWz\nVKYpE8NwviZYOkVTBjwNHAtMAR4D3hdp8wHgYH97KfCLmOOM4pchKTX19cv9FI35P3cZLIlN55SX\nH5pK55SXHxqbgqmsrI453hx/u82gKmdKSGQ0McxUT6Fz9ZwGPG1mvzOz/cAPgI9EPlgeMbM/+ruP\nAnMKfE+RYfgD0Mq+fRdGJk4L7rq9FdjPPffcw6xZxzFr1nFs2LAhj+PejnfzliZmk4mj0FTPUcDz\nof0XgNMHaP9p4CcFvqfIgKIVNHA58B689XEfpLt7PuDl83t7bya9UPpfsXnzfYRTOuDl9INtT5Dq\n2cxIrLkrUmyFBv6851lwzn0Q+BTeXS5Z1q5dm9quq6ujrq6uwFOTUhUemN2y5efs23cg3t21ACt5\n+eXf09DQxK9+tQNvYrXAo6SnVfZs2rSOV199OrUNsGLFdSxevJiNG2/n2Wff4JlnMj8UamuvG71f\nTkpaZ2cnnZ2dBR+noLl6nHNLgLVmttTfXw30W/YA76nAj4ClZvZ0zHGskPMQyWXWrOPo6fl7wvPn\nwDXAV/39q4BLgPmhx9Ntp0//Am+88VzO4zc0NNHRMQ9vLn6AedTX79H8PFIUYzVXz1bgeOfcsXjf\neT8OnBc5sWPwgv6FcUFfZDTNnTuHnp7ooycQ7tVXVq5j0aI9PPHEwbz0Umbv/bDD8hmSmk96/d3N\nBB8Cmp9Hxq3hjAiHf4Czgd/gVfes9h+7DLjM3/4m8Cqw3f/575hjjPxwt4h5lTvJ5KxU1Y1zBxu0\nZFTo1NcvN7OgGqjJoNr/OcMqK6sHvOFqoBu94qqLgvcSGQmMUVUPZvZTMzvBzI4zsxv9x24zs9v8\n7b81s1lmVuP/nFboe4rE2bBhQ2xFTiLxLkHlTllZH+Xl38HrmW/2p064FPBSMeXlPwMOBSqA7fT0\n/D0dHctYtuwTLFxYR0NDE+3t7aljB+MJ9fWt1Ne3qkcvE4Lm45dJIXqTFVzF+vXX0dW1jY6OzDny\nq6u/wmuv/QmAD3/4TF588U0AamsX8g//8C/+GrrgDQh/19++kCCdk+8i6nGLtOuDQUbScHP8Cvwy\nYYUHTn/xi1/y5pvrCAd4L3e/IBL4V5JIfJv+/uzB3USihf7+jWQOBLf627kXWMn3HDW4KyNNC7HI\npJFPsGxvb2fZsk/Q23ui/8ibsceK1vQnEnf5QT88l34rcBP9/bfGHKGwOn3NzyPjkQK/jCv5VsKs\nXr2O3t4k8Bn/kasIL4ICVzFz5hw2brydNWuuTM2R3919Ctu353r3M0gkrqG/39srL7+Wk09+LwC7\ndl3rL6iuKZVlEhjOiPBI/6CqnpIXTFkcNy9OXCVMXLuKikOtsrLapk8/xhKJA/z5eZZYefkhqUqb\n5uZmgxmpKhxvuyVVkdPc3GyVldVWWVlt69evzzo/Taks4wmallkmmiCl0939Kjt3bqWvbz7wJ2Dn\noK+Nq88/8cST2Latk4ULz2T79rcJvg309q5k9ep1NDY28qMfPYSX0w9y9/WUlX2Hv/iLP6e29ko2\nbPha6tvGhg2rWLx4cSpdo5SNTBYK/DImMlM6O/Fu8QinbQDm50yrNDXVs317ZmqnqcmbKuG5514m\nPXGaZ+fO62hoaOKtt/5I9IYr537OAw/cS0NDU2R+/p2cf/7nWLRogQZmZVJR4JcxkbkIShPROXKS\nyev44AfPTAX9hoYmwCu57Ora5s+zU0+6534JXV3bWLMm/ttAX9976OhYhnM/w5u0LXAVc+fG3Z3b\nDmymp+cmOjp0161MLgr8MiwDVd7kei78eHf3KwMev6LiQAC2bt2akX7p6AjKL5fhfTOoAWaljtnQ\n0MQbb/SQOdAb1OM34g0pXQ3cAEAi0ceMGTNpaGiitnYhW7as8iuAbsX7IPI+WILpnBX4ZTJQHb8M\nWbTyprz86tQKVLW1CzMCdXDTEhB5zbX097/t5/VfxyubTN98FdTWO7cCs01k19ZfSvimKricsrID\nePfdoD7/CmAm3pjBJwmndmpq7qSqalZkbAHKy3fzhS+spKtrG4888gh7974FnOS/7tfU1Mxn27Yt\nI3MRRUbAcOv4x7yix0axqmf9+vWxFRpSmMw5aNoMKkNz4VRmVdsE1TCZj7eYc4eEqmumGsz2V7Y6\nwKDWYLnBKTGrXy33f8KPx7U7xa/YSVfxhOfSqak5I2P1LKiympozzMysunp+1nPV1fPH8rKLZGGY\nVT1jHvRtlAL/+vXrs8r2FPzj5VuqmFlyGUx0VhsJuEuyAnwyeZglk4dFJkeLtguWMzwxptzy4Mj+\niX7b8PHmxByvOnUO06cfk/X7xZWEVlZWD/qcyHgx3MA/aXP8mzbdSdyiGmvWrCnqeYz3W/azFyaP\nXzg8s12QXwd4JnLEM4C/87d3AnfQ1xdO4fxf4BDgyZizCQZZryf891Zefg3Tpq1j//5e3nrL6O+/\nPnQ88Kp0XiO92Ar+9gmp55cs2ZM1xULcIHAw0DvQcyIT3nA+LUb6h1Ho8Y+HHttAU/aOpXAPv6Ym\n3GPPvXB43BTDlZXVlkgcnJUSgQP9Hnh22ifd058WSfVU+e8ft1D6zNBrW0Kpnha/V+9Np+zcNAtu\n2vJSR+kbs+Ku+0CLree7ELvIWEKpnkyjnerJJz1SjPnYw+MYzc3Ng55T9MMokZgVCqbxH5bZHxDp\n5yoqDjUIB9wZ/v5d5uXso0G8NrWdTE7zU0CV5s2Dn/3h46V5mvzzOzryd1plMD91vJqaM1K///r1\n64eUvoprp7t1ZbxT4I8xWoO7cT35uEAzGoE/HIwGmn4gmZyVCoRtbW2pa+EF2pMsvdjIMaFjxPWo\nT/GPd1CDrphqAAANTklEQVTMezVZemD2FP/1B1k69/4ey/42cIYFeffc536w1dTUWn39cnNuaswx\n2kIfJLMH7NWLTGbDDfwq5xwGb53VzGl601P65i5hHGg+9uhYAJDaD25aipYfwhN4i53t8feDtV/v\nxSt7vBX4DMnk5+jrKyOzXLIe+HBo+4fABuArkXb7genAAcDHBnivdcAC4Nekc/THAR+NvOZhvDt0\ns9e3DW7aCn5fgJ/9rJP+/rhyznszXjMex09ERtukKOfM9dU6+vhYfQUfaCIxr7ebnSI566yzUlUt\nzc3NsefuLQ94kHl58TmWSEz195f4Pemp/ntEe8kHWbiU0ts+ItRbX+6fz2Ex51sd2p7jb8fl16tC\nPfnMap10fr3FP8YS8/L7QeqnKnJ+QUXOEoNDst6rsrI669tUOr8fvdbZ6TulZqTUMNFTPbkG06KP\nJ5OzLJk8OGM/nNIYamniYPngoF1NzRn++y6xaMlhIjHTT2EE6ZMmv11TJFBPtbKyman9srKZNm3a\nEX46I5r2mGqZwS84/kClk5lB0ct/L7f4QdZw4J8ZeW3c8WZa7g+gcNrnoNDx7jKvJv9w/2eKBWMB\nzk3JOkYwRhH9gPHWyQ3uEzjYEomDUx+k4b+n8TiQLjKaJnzgjxs8rKmpjX082rsOglN5+SEZC2tH\nqzTCgT78YRKdmjezsuMQ//izs4I4VFoyeZhVV1f7++GKkvdYdk87LrAeniMwR3/HU2JeH3fTUm1M\n0D5ggEA9w7ye+XLzPiiig6fhfPoplmsQOP3tInyOmQO14Q9p7+81czwh/kavuzJe4/195Fd1pIXN\nZbIbbuAfN3X8zz33QtZjO3c+idm7Ma33RvaPBJrp7fVy2kE+uLd3J3/915dy3HHz2LHjl/T3nwpA\nR8d/AjeTzhvvBH4M7GHfvjP5y7/8G2bMmM4BB7wbWexjJXC4vz0VuIm+PnjmmcuBAyPtyoH3DHLe\nAFUxj8W13Yc37cDH8HL34OXOo7XrU/AmPXPAH/Dy4YcCPQTz03jH6gQexblezPrwavNvJT1l8Q68\n6xPOm0/Dy6035TjnqNsJz5LZ1wdVVa2pmTCjs2TCnqwVsyoqVnHjjd64SENDE7296b+3ffvQ/Dki\nwzBuAv/cuYfT05MZxPr6qlLb4cfhHbxAAemJuprw5nwJeLMrvvnmTf6KS9vxbi6aD6yItLsDb06W\nF4HH6ev7LD098/Em8wp/QIAXzCBz2t9b/WMHM0U2A3fjTR4Wfq/fxfwuJ4S2c/2O4XbhD6KrCU8k\n5m0/jDeIegfpQdqfAWcD5u/Po7LyxyxatIDu7lfYvn2Jf4w/kA7G7Xhz4QQDycHNUpv942dOiex9\nYGymvHw3EKxWlXvZwrgA39LiBfj77tscGujOb0bMXMcTkRjD+Zow0j+kcvxBWiWoB2+zzIHKYDvI\ntc+NSWEc6D8XvaU/nI4I595PtNwlh7ly3tHH445xYuicjg6lSKK/S67t8LUIbkbKng4hczqDIDUT\nlx7LJ0WSmZopLz8kVVbpzV0TPsem1DQI0TGS8LhIOKUWzbsPdTB2sDy+Bnel1DBWOX5gKbAbeApY\nlaPNLf7zO4CamOfNLNdcMAPd0BOXa467GzQuDx0EsYEqXuIGMQ/xf8KPHxxzjHCuPcjFT7PsKpeW\nrPdJJmdZIpEeM3CuwqZPP9pgVsz7BBU04Q+67OtSVnZozoqpcDANB/tou+HcyTrSwVjBXSRtTAI/\nUAY8DRyLl1x+DHhfpM05wE/87dOBX8Qcx8y89VCTycOsrOxQg4QfJCsNElZWdqglk4eZcxWhoBZX\nfhg/8BsewM0cTI3rHQeBOvwhE/TEK/3Hg2kJ5uQI/Mst84NkuaW/oQRlmul1YWGqTZ9+dGjwOd3j\nD9aMjbsbOf4DMvsbyEAzSw6nEkpBV2TsjVXg/wDQFtq/Hrg+0uZW4OOh/d3A7EibyF2oJ8UEuJNC\n20FZ4Rkx7YJb/4Oge7gfnMMBvMnS3wzievVBGeTcHB8KLRau4kkmD8pZJRTedi5ak97in1/mguAD\nVaiE70b20i+Zx0smD7Np046wRGJa6PwOVqAWmYSGG/gLHdw9Cng+tP+C36sfrM0cIGMJpu9976ek\nZ9NsITqzpvdYsB+soPQ2mQtnXwJ8m8xB0V68qp8OgsFO567G7EOh180HWkgkoL//T/57AbRRVnYN\n7/qFRclkC/Pnn0hV1R5qaz+furu0pWUtkL7T9sgjz+X++38M/JgPf/hcXnxxD7CH7u5T/YHmwHwq\nKw9i0aIjaWlZm9cg5po1a1IzjKZnzPSeq6j4Lvfd9x0aGxsjdwLnd2wRKQ2FBn7Ls130luKs1737\n7l7gPrwSxf0DHmz69BksWbKYhx7a4k9fEC4JLCdc5TJ9+r+zZMlJ1NZeSFeX93ht7cqYVaK+R2Nj\nIxs2bGDTpnUArFjxeRYvXhwKoN/LCKDRGZ4HC67ZgXoV3/9+dtVKvhUqA1XANDY2KtiLTDKdnZ10\ndnYWfqDhfE0IfoAlZKZ6VhMZ4MVL9XwitF9wqie4TT8u5+0Nig4+ADnW0z4ony4ihWKYqZ6CJmlz\nziWB3wAfwiva/m/gPDN7MtTmHOAKMzvHObcEuNnMlkSOY2bGRRdd5Kd84NBDy3jppV4AqqsP4bXX\nvLYrVlycsZiK10O/M/VcZg9dE3eJyOQ13EnaCp6d0zl3Nt5dTmXAt8zsRufcZQBmdpvf5ut4ZZ9v\nAReb2bbIMazQ8xARKTVjFvhHggK/iMjQDTfwJ0bjZEREZPxS4BcRKTEK/CIiJUaBX0SkxCjwi4iU\nGAV+EZESo8AvIlJiFPhFREqMAr+ISIlR4BcRKTEK/CIiJUaBX0SkxCjwi4iUGAV+EZESo8AvIlJi\nFPhFREqMAr+ISIlR4BcRKTEK/CIiJUaBX0SkxCjwi4iUGAV+EZESM+zA75yrdM51OOd+65x7wDl3\nSEybo51zDznndjnnnnDOXVXY6YqISKEK6fFfD3SY2XuBn/n7UfuBa8zsZGAJ8Dnn3PsKeM9Jr7Oz\nc6xPYdzQtUjTtUjTtShcIYF/GbDZ394MfDTawMxeNrPH/O29wJPAkQW856Snf9RpuhZpuhZpuhaF\nKyTwzzazV/ztV4DZAzV2zh0L1ACPFvCeIiJSoORATzrnOoDDY55aE94xM3PO2QDHmQb8EPg7v+cv\nIiJjxJnljNcDv9C53UCdmb3snDsCeMjMToxpNwX4P8BPzezmHMca3kmIiJQ4M3NDfc2APf5BtALN\nwJf9P38cbeCcc8C3gF/nCvowvBMXEZHhKaTHXwn8O3AM8DvgY2b2unPuSOAOM/tfzrkzgf8CHgeC\nN1ptZm0Fn7mIiAzLsAO/iIhMTEW9c9c5t9Q5t9s595RzblWONrf4z+9wztUU8/yKabBr4Zy7wL8G\njzvnHnbOnToW51kM+fy78Nu93znX55xbXszzK6Y8/4/UOee2+zdFdhb5FIsmj/8jVc65NufcY/61\nuGgMTnPUOee+7Zx7xTm3c4A2Q4ubZlaUH6AMeBo4FpgCPAa8L9LmHOAn/vbpwC+KdX7F/MnzWnwA\nONjfXlrK1yLU7j/xCgWaxvq8x/DfxSHALmCOv1811uc9htdiLXBjcB2AV4HkWJ/7KFyLP8crhd+Z\n4/khx81i9vhPA542s9+Z2X7gB8BHIm1SN4WZ2aPAIc65Ae8PmKAGvRZm9oiZ/dHffRSYU+RzLJZ8\n/l0AXIlXEvyHYp5ckeVzLc4H7jWzFwDMrLvI51gs+VyLl4AZ/vYM4FUz6yviORaFmf0ceG2AJkOO\nm8UM/EcBz4f2X/AfG6zNZAx4+VyLsE8DPxnVMxo7g14L59xReP/pv+E/NFkHpvL5d3E8UOnPgbXV\nOffJop1dceVzLe4ATnbOvQjsAP6uSOc23gw5bhZSzjlU+f5njZZ2Tsb/5Hn/Ts65DwKfAs4YvdMZ\nU/lci5uB683M/BLhyVr+m8+1mAIsBD4ETAUecc79wsyeGtUzK758rsXngcfMrM45Vw10OOcWmNmb\no3xu49GQ4mYxA//vgaND+0fjfTIN1GaO/9hkk8+1wB/QvQNYamYDfdWbyPK5FouAH3gxnyrgbOfc\nfjNrLc4pFk0+1+J5oNvM9gH7nHP/BSwAJlvgz+da/BmwAcDMnnHO7QFOALYW5QzHjyHHzWKmerYC\nxzvnjnXOlQMfx7sJLKwV+BsA59wS4HVLzwc0mQx6LZxzxwA/Ai40s6fH4ByLZdBrYWbvMbN5ZjYP\nL8//2UkY9CG//yP/AZzpnCtzzk3FG8z7dZHPsxjyuRa7gbMA/Jz2CcCzRT3L8WHIcbNoPX4z63PO\nXQG0443Yf8vMnnTOXeY/f5uZ/cQ5d45z7mngLeDiYp1fMeVzLYAvADOBb/g93f1mdtpYnfNoyfNa\nlIQ8/4/sds614d0U2Y93s+SkC/x5/rv4R+BO59wOvE7sdWbWM2YnPUqcc/cAtUCVc+554It4Kb9h\nx03dwCUiUmK09KKISIlR4BcRKTEK/CIiJUaBX0SkxCjwi4iUGAV+EZESo8AvIlJiFPhFRErM/wct\nu82Vedy9LgAAAABJRU5ErkJggg==\n",
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXwAAAD8CAYAAAB0IB+mAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAE2hJREFUeJzt3X/o3Vd9x/Hnu6lRhqkbJmalSfx2mA5DGVS/NB3CrGhG\nWiH5p9NUilaKAUcdTCdkZFSpFDpluIndNNtKV0Fr9Q8NNlKKVhQxId/SWWxK4LvYma+Vtna1KxSN\nme/9cW+S29v743Pv/dyf5/mA0vu599zPPTkkr3vuOedzPpGZSJIW30XTroAkaTIMfEkqhIEvSYUw\n8CWpEAa+JBXCwJekQhj4klQIA1+SCmHgS1IhLp7WB2/cuDGXlpam9fGSNJceeeSRX2bmpmHeO7XA\nX1paYmVlZVofL0lzKSL+e9j3OqQjSYUw8CWpEAa+JBXCwJekQhj4klQIA1+SCmHgS1Ih+gZ+RNwd\nEc9ExE+6vB4R8bmIWI2IxyLiLfVXU5I0qioXXt0DfB64t8vr1wHbm//tBP6l+X9JGsnOOx7i6RfP\nTLsaM2X9H77prcO+t2/gZ+b3I2KpR5G9wL3ZuBv60Yj4/Yi4NDN/MWylJC2e9vDevGE9xw7uMtQn\nqI6tFS4DTrccrzWfM/ClwgwS3k+/eIalAw+MuUZqVUfgR4fnsmPBiP3AfoBt27bV8NGSRtEroM/1\nwPu9x576/Khjlc4asLXleAvwVKeCmXkoM5czc3nTpqE2e5NUk34B/fSLZ9h5x0N933Oup27Yz746\nAv8w8P7map1rgBccv5dmX5WA7hTuml99h3Qi4ivAtcDGiFgDPgG8CiAzvwAcAa4HVoGXgA+Oq7KS\nJm/pwANdh3c0X6KxuGbylpeX0/3wpfFYpHF2v2xeLiIeyczlYd47tRugSBqPXuPs09Qa3N2+kDRe\nBr60AGap514lvA336TDwpTlXR9g/eee7zz8e5JfA5g3r7anPEQNfmnN19+zbQ7wXw32+uFumVLjN\nG9a/7PjYwV2veK7K+zT77OFLc6qOoZxuQzDtzznJuhgMfGkOVQn7fkMzreP2/Rjui8EhHWkOVQl7\nQ1rt7OFLC6a1596tl+/4e5ns4UsLrNMErL3/ctnDl+bQID13w13n2MOX5pA9dw3DHr40pwx3Dcoe\nviQVwsCXpEIY+JJUCMfwpTniFgcahT18aU50u7FJ+43GpW4MfGlOdNtOYVZufKLZZ+BLUiEcw5dm\ngGPzmgR7+NKUVR2b77bhmRuhqSoDX5qyqmPzbqegUTmkI80Rw12jsIcvSYWwhy9NSLeJWW9Sokmx\nhy9NQK+JWcfmNSn28KUJ6Dcxa7hrEuzhS1IhDHxJKkSlwI+I3RFxMiJWI+JAh9e3RcTDEfFoRDwW\nEdfXX1VpfnnRlGZB38CPiHXAXcB1wA7gxojY0Vbs74D7M/MqYB/wz3VXVJpnTsxqFlSZtL0aWM3M\nUwARcR+wFzjRUiaBS5qPXwc8VWclpUVguGvaqgzpXAacbjleaz7X6pPATRGxBhwBPtLpRBGxPyJW\nImLl2WefHaK6kqRhVQn86PBcth3fCNyTmVuA64EvRcQrzp2ZhzJzOTOXN23aNHhtJUlDqxL4a8DW\nluMtvHLI5hbgfoDM/BHwGmBjHRWUJNWjyhj+cWB7RFwO/JzGpOz72sr8DHgncE9EvJlG4DtmI43I\nffJVp749/Mw8C9wKPAg8QWM1zuMRcXtE7GkW+xjwoYj4MfAV4ObMbB/2kTQA72GrulXaWiEzj9CY\njG197raWxyeAt9VbNals3sNWdfNKW0kqhIEvSYUw8KUZ5XYMqpuBL80ot2NQ3dwPX5phhrvqZA9f\nkgph4EtSIQx8SSqEY/jSjHE7BY2LPXxphridgsbJwJdmiNspaJwMfEkqhIEvSYUw8KUZ4nYKGicD\nX5ohbqegcXJZpjRjDHeNiz18SSqEgS9JhXBIR5oCr6bVNNjDlybMq2k1LQa+NGFeTatpMfAlqRAG\nviQVwsCXJsyraTUtBr40YV5Nq2lxWaY0BYa7psEeviQVwsCXpEIY+JJUiEqBHxG7I+JkRKxGxIEu\nZd4TESci4vGI+HK91ZQkjarvpG1ErAPuAnYBa8DxiDicmSdaymwH/hZ4W2Y+HxFvGFeFJUnDqdLD\nvxpYzcxTmXkGuA/Y21bmQ8Bdmfk8QGY+U281JUmjqhL4lwGnW47Xms+1ugK4IiJ+GBFHI2J3XRWU\nJNWjyjr86PBcdjjPduBaYAvwg4i4MjN/9bITRewH9gNs27Zt4MpKkoZXpYe/BmxtOd4CPNWhzDcz\n87eZ+VPgJI0vgJfJzEOZuZyZy5s2bRq2zpKkIVQJ/OPA9oi4PCLWA/uAw21lvgG8AyAiNtIY4jlV\nZ0UlSaPpG/iZeRa4FXgQeAK4PzMfj4jbI2JPs9iDwHMRcQJ4GPh4Zj43rkpLkgYXme3D8ZOxvLyc\nKysrU/lsSZpXEfFIZi4P816vtJWkQhj4klQIA1+SCmHgS1IhDHxJKoSBL0mFMPAlqRAGviQVwsCX\npEIY+JJUCANfkgph4EtSIQx8SSqEgS9JhTDwJakQBr4kFcLAl6RCGPiSVAgDX5IKYeBLUiEMfEkq\nhIEvSYUw8CWpEAa+JBXCwJekQhj4klQIA1+SCnHxtCsgzaOddzzE0y+eOX+8ecN6jh3cNcUaSf3Z\nw5cG1B72AE+/eIaddzw0pRpJ1djDlwbUHva9nveXgGZJpR5+ROyOiJMRsRoRB3qUuyEiMiKW66ui\nNJ/8JaBZ0zfwI2IdcBdwHbADuDEidnQotwH4K+BY3ZWU5tEgvwSkSajSw78aWM3MU5l5BrgP2Nuh\n3KeATwO/rrF+0szZvGH9QM9Ls6JK4F8GnG45Xms+d15EXAVszcxv9TpRROyPiJWIWHn22WcHrqw0\nC44d3PWKcHdsXvOgyqRtdHguz78YcRHwWeDmfifKzEPAIYDl5eXsU1yaWVXCffOG9R2Hb/wloGmp\n0sNfA7a2HG8Bnmo53gBcCXwvIp4ErgEOO3Gr0vlLQLOmSg//OLA9Ii4Hfg7sA9537sXMfAHYeO44\nIr4H/E1mrtRbVWn+GO6aJX17+Jl5FrgVeBB4Arg/Mx+PiNsjYs+4KyhJqkelC68y8whwpO2527qU\nvXb0aknzw4urNC/cWkEagRdXaZ4Y+NIIvLhK88TAl6RCGPiSVAgDXxqB2yxonhj40gi8uErzxP3w\npREZ7poX9vAlqRAGviQVwsCXpEIY+JJUCANfkgph4EtSIQx8SSqEgS9JhfDCK6kL97nXorGHL3Xg\nPvdaRAa+1IH73GsRGfiSVAgDX5IKYeBLHbjPvRaRgS914D73WkQuy5S6MNy1aOzhS1IhDHxJKoRD\nOlIFXnWrRWDgS330uuq2PfT9YtAsc0hH6qPqVbdux6BZZ+BLNXE7Bs26SoEfEbsj4mRErEbEgQ6v\nfzQiTkTEYxHxnYh4Y/1VlSSNom/gR8Q64C7gOmAHcGNE7Ggr9iiwnJl/Anwd+HTdFZWmxatutSiq\n9PCvBlYz81RmngHuA/a2FsjMhzPzpebhUWBLvdWUpqfqVbd+MWjWVVmlcxlwuuV4DdjZo/wtwLdH\nqZQ0a6qstDl2cJerdDTTqgR+dHguOxaMuAlYBt7e5fX9wH6Abdu2VayiND8Md82yKoG/BmxtOd4C\nPNVeKCLeBRwE3p6Zv+l0osw8BBwCWF5e7vilIU2SPXKVpMoY/nFge0RcHhHrgX3A4dYCEXEV8EVg\nT2Y+U381pfq5bl6l6Rv4mXkWuBV4EHgCuD8zH4+I2yNiT7PYZ4DXAl+LiP+MiMNdTifNDNfNqzSV\ntlbIzCPAkbbnbmt5/K6a6yVN1dKBBxze0cLxSlupC4d3tGjcPE0Lqcpk7OYN6/sO3zi8o0ViD18L\np99k7M47HmLpwAOGuYpj4Gvh9JqM7fRlIJXCwFdRBg17t0XQIjHwpaYq++VI88xJW03NuK5yrTIZ\n24nhrkVnD19TMc6rXDvtbtmPQzcqgYGvqeg1sbp04IGRg//YwV08eee7K5V16EalcEhHM6nbTcJb\njbLW3pBXiQx8zaxe4/DdhoSWDjxw/vhcqLsjptRg4GtihlkD321PmyrnqfIrQSqJga+xquNCp1GC\n24uspAuctNXYVA37KitkDG5pdPbw9TJ1jndXDelO4+z9DLvWXiqZPXydN807QA2yjPJc+Sq/DFxf\nL11gD1/n1XUHqEF66522M+i2jLJd+y8PV+NIvRn4qtWgYd8eyKMsozTcpd4MfNWqX9hXCe9RL7aS\n1JmBPwcmFXKDDKcMY5Ax+k56zTEY+lJ/Bn5NxhXKg4TcqHXotlqm/QrWYc8/rH7DRK7Wkaox8Bk9\nKIfteVb53KoTqXV9MZz7f5Wx+E7nr/tXgneokupT/LLMOpYiDrPzY6+edNXdIlvL1fHF0Ot93bSX\n67RcchJr+SX1V3wPv66liP0+41xPuGqPteqXTpVynYZjOp3nXLlBe+OdfjGMOl4/CNfaS9UUE/h1\nj7EPOtQwzA20h+1lj2rQ801zItVVOlJ1RQT+IBORg55nECUNT9T1Z3U/e6k+MxX441rpMmz4DPqF\nsGj67Vczif1s3M9eqs/MBH5da6w7hcMkLdKmXoOsMJpWPSRVNzOrdOqYPO32pTFJw95Au+p76vwC\n6/W5vT6nStg7kSrNnpnp4Q9iVtdmnwu5cz3SKkNCnYYnuv352sv2On+V4Zhua+77DZnUsX2CpMmr\nFPgRsRv4J2Ad8G+ZeWfb668G7gXeCjwHvDczn+x33qrBPQ9j6YOGXK9li1XHreua0KwznCe5HFPS\nYPoGfkSsA+4CdgFrwPGIOJyZJ1qK3QI8n5lvioh9wN8D7+113id+8b9smlIv/ck73911rH/Qm3D0\nCstRrjqtujukE5qSqqrSw78aWM3MUwARcR+wF2gN/L3AJ5uPvw58PiIiM7PbSc/+rutLE9EtFPv9\n6hgkUCcRyNMI93FvsiZpPKoE/mXA6ZbjNWBntzKZeTYiXgBeD/yytVBE7Af2A6y7ZNOQVR5Nv1Cq\nO0AXsbftLwtpPlUJ/OjwXHv3vEoZMvMQcAjg1ZduH1sX/1z4GErjYztK86dK4K8BW1uOtwBPdSmz\nFhEXA68D/qfnB1/U6TtiuLH09ve37/woSaoW+MeB7RFxOfBzYB/wvrYyh4EPAD8CbgC+22v8HuDN\nl17Curax4F5LBVvZU5ekwUWfXG4Uirge+EcayzLvzsw7IuJ2YCUzD0fEa4AvAVfR6NnvOzfJ283y\n8nKurKyM/AeQpJJExCOZuTzMeyutw8/MI8CRtudua3n8a+AvhqmAJGkyZmZrBUnSeBn4klQIA1+S\nCmHgS1IhDHxJKoSBL0mFMPAlqRCVLrwaywdHvAicnMqHz56NtG00VzDb4gLb4gLb4oI/zswNw7xx\nmne8Ojns1WKLJiJWbIsG2+IC2+IC2+KCiBh6iwKHdCSpEAa+JBVimoF/aIqfPWtsiwtsiwtsiwts\niwuGboupTdpKkibLIR1JKsTYAz8idkfEyYhYjYgDHV5/dUR8tfn6sYhYGnedpqVCW3w0Ik5ExGMR\n8Z2IeOM06jkJ/dqipdwNEZERsbArNKq0RUS8p/l34/GI+PKk6zgpFf6NbIuIhyPi0ea/k+unUc9x\ni4i7I+KZiPhJl9cjIj7XbKfHIuItlU6cmWP7j8YNU/4L+CNgPfBjYEdbmb8EvtB8vA/46jjrNK3/\nKrbFO4Dfaz7+cMlt0Sy3Afg+cBRYnna9p/j3YjvwKPAHzeM3TLveU2yLQ8CHm493AE9Ou95jaos/\nA94C/KTL69cD36ZxP/FrgGNVzjvuHv7VwGpmnsrMM8B9wN62MnuB/2g+/jrwzojofMPb+da3LTLz\n4cx8qXl4lMb9gxdRlb8XAJ8CPg38epKVm7AqbfEh4K7MfB4gM5+ZcB0npUpbJHBJ8/HreOX9tRdC\nZn6f3vcF3wvcmw1Hgd+PiEv7nXfcgX8ZcLrleK35XMcymXkWeAF4/ZjrNQ1V2qLVLTS+wRdR37aI\niKuArZn5rUlWbAqq/L24ArgiIn4YEUcjYvfEajdZVdrik8BNEbFG4y58H5lM1WbOoHkCjP9K2049\n9fZlQVXKLILKf86IuAlYBt4+1hpNT8+2iIiLgM8CN0+qQlNU5e/FxTSGda6l8avvBxFxZWb+asx1\nm7QqbXEjcE9m/kNE/CnwpWZb/G781ZspQ+XmuHv4a8DWluMtvPIn2PkyEXExjZ9pvX7KzKsqbUFE\nvAs4COzJzN9MqG6T1q8tNgBXAt+LiCdpjFEeXtCJ26r/Rr6Zmb/NzJ/S2INq+4TqN0lV2uIW4H6A\nzPwR8Boa++yUplKetBt34B8HtkfE5RGxnsak7OG2MoeBDzQf3wB8N5uzEgumb1s0hzG+SCPsF3Wc\nFvq0RWa+kJkbM3MpM5dozGfsycyh9xCZYVX+jXyDxoQ+EbGRxhDPqYnWcjKqtMXPgHcCRMSbaQT+\nsxOt5Ww4DLy/uVrnGuCFzPxFvzeNdUgnM89GxK3AgzRm4O/OzMcj4nZgJTMPA/9O42fZKo2e/b5x\n1mlaKrbFZ4DXAl9rzlv/LDP3TK3SY1KxLYpQsS0eBP48Ik4A/wd8PDOfm16tx6NiW3wM+NeI+Gsa\nQxg3L2IHMSK+QmMIb2NzvuITwKsAMvMLNOYvrgdWgZeAD1Y67wK2lSSpA6+0laRCGPiSVAgDX5IK\nYeBLUiEMfEkqhIEvSYUw8CWpEAa+JBXi/wGQK172GF9HZwAAAABJRU5ErkJggg==\n",
"text/plain": [
- "<matplotlib.figure.Figure at 0x10a8b6f60>"
+ "<matplotlib.figure.Figure at 0x11cbe6630>"
]
},
"metadata": {},
@@ -594,7 +562,8 @@
}
],
"source": [
- "param_run = BatchRunner(ForestFire, param_set, iterations=5, model_reporters=model_reporter)\n",
+ "param_run = BatchRunner(ForestFire, variable_params, fixed_params, \n",
+ " iterations=5, model_reporters=model_reporter)\n",
"param_run.run_all()\n",
"df = param_run.get_model_vars_dataframe()\n",
"plt.scatter(df.density, df.BurnedOut)\n",
@@ -605,9 +574,9 @@
"metadata": {
"anaconda-cloud": {},
"kernelspec": {
- "display_name": "Python 3",
+ "display_name": "Python [mesa_dev]",
"language": "python",
- "name": "python3"
+ "name": "Python [mesa_dev]"
},
"language_info": {
"codemirror_mode": {
@@ -619,7 +588,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.4.2"
+ "version": "3.5.3"
},
"widgets": {
"state": {},
@@ -627,5 +596,5 @@
}
},
"nbformat": 4,
- "nbformat_minor": 0
+ "nbformat_minor": 1
}
diff --git a/mesa/batchrunner.py b/mesa/batchrunner.py
index 897255c6..e5f23652 100644
--- a/mesa/batchrunner.py
+++ b/mesa/batchrunner.py
@@ -6,11 +6,33 @@ Batchrunner
A single class to manage a batch run or parameter sweep of a given model.
"""
-from itertools import product
+import collections
+import copy
+from itertools import product, count
import pandas as pd
from tqdm import tqdm
+def combinations(*items):
+ """
+ A small fix to handle dictionary type parameters in cartesian product.
+ """
+ prepared = [(item,) if isinstance(item, collections.Mapping) else item
+ for item in items]
+ yield from (param for param in product(*prepared))
+
+
+class VariableParameterError(TypeError):
+ MESSAGE = ('variable_parameters must map a name to a sequence of values. '
+ 'These parameters were given with non-sequence values: {}')
+
+ def __init__(self, bad_names):
+ self.bad_names = bad_names
+
+ def __str__(self):
+ return self.MESSAGE.format(self.bad_names)
+
+
class BatchRunner:
""" This class is instantiated with a model class, and model parameters
associated with one or more values. It is also instantiated with model and
@@ -23,19 +45,26 @@ class BatchRunner:
entire DataCollector object.
"""
- def __init__(self, model_cls, parameter_values, iterations=1,
- max_steps=1000, model_reporters=None, agent_reporters=None,
- display_progress=True):
+ def __init__(self, model_cls, variable_parameters=None,
+ fixed_parameters=None, iterations=1, max_steps=1000,
+ model_reporters=None, agent_reporters=None, display_progress=True):
""" Create a new BatchRunner for a given model with the given
parameters.
Args:
model_cls: The class of model to batch-run.
- parameter_values: Dictionary of parameters to their values or
- ranges of values. For example:
+ variable_parameters: Dictionary of parameters to lists of values.
+ The model will be run with every combination of these paramters.
+ For example, given variable_parameters of
{"param_1": range(5),
- "param_2": [1, 5, 10],
- "const_param": 100}
+ "param_2": [1, 5, 10]}
+ models will be run with {param_1=1, param_2=1},
+ {param_1=2, param_2=1}, ..., {param_1=4, param_2=10}.
+ fixed_parameters: Dictionary of parameters that stay same through
+ all batch runs. For example, given fixed_parameters of
+ {"constant_parameter": 3},
+ every instantiated model will be passed constant_parameter=3
+ as a kwarg.
iterations: The total number of times to run the model for each
combination of parameters.
max_steps: The upper limit of steps above which each run will be halted
@@ -51,8 +80,8 @@ class BatchRunner:
"""
self.model_cls = model_cls
- self.parameter_values = {param: self.make_iterable(vals)
- for param, vals in parameter_values.items()}
+ self.variable_parameters = self._process_parameters(variable_parameters)
+ self.fixed_parameters = fixed_parameters or {}
self.iterations = iterations
self.max_steps = max_steps
@@ -67,36 +96,42 @@ class BatchRunner:
self.display_progress = display_progress
+ def _process_parameters(self, params):
+ params = copy.deepcopy(params)
+ bad_names = []
+ for name, values in params.items():
+ if (isinstance(values, str) or
+ not hasattr(values, "__iter__")):
+ bad_names.append(name)
+ if bad_names:
+ raise VariableParameterError(bad_names)
+ return params
+
def run_all(self):
""" Run the model at all parameter combinations and store results. """
- params = self.parameter_values.keys()
- param_ranges = self.parameter_values.values()
- run_count = 0
-
- if self.display_progress:
- pbar = tqdm(total=len(list(product(*param_ranges))) * self.iterations)
-
- for param_values in list(product(*param_ranges)):
- kwargs = dict(zip(params, param_values))
- for _ in range(self.iterations):
+ param_names, param_ranges = zip(*self.variable_parameters.items())
+ run_count = count()
+ total_iterations = self.iterations
+ for param_range in param_ranges:
+ total_iterations *= len(param_range)
+ with tqdm(total_iterations, disable=not self.display_progress) as pbar:
+ for param_values in product(*param_ranges):
+ kwargs = dict(zip(param_names, param_values))
+ kwargs.update(self.fixed_parameters)
model = self.model_cls(**kwargs)
- self.run_model(model)
- # Collect and store results:
- if self.model_reporters:
- key = tuple(list(param_values) + [run_count])
- self.model_vars[key] = self.collect_model_vars(model)
- if self.agent_reporters:
- agent_vars = self.collect_agent_vars(model)
- for agent_id, reports in agent_vars.items():
- key = tuple(list(param_values) + [run_count, agent_id])
- self.agent_vars[key] = reports
- if self.display_progress:
- pbar.update()
-
- run_count += 1
- if self.display_progress:
- pbar.close()
+ for _ in range(self.iterations):
+ self.run_model(model)
+ # Collect and store results:
+ model_key = param_values + (next(run_count),)
+ if self.model_reporters:
+ self.model_vars[model_key] = self.collect_model_vars(model)
+ if self.agent_reporters:
+ agent_vars = self.collect_agent_vars(model)
+ for agent_id, reports in agent_vars.items():
+ agent_key = model_key + (agent_id,)
+ self.agent_vars[agent_key] = reports
+ pbar.update()
def run_model(self, model):
""" Run a model object to completion, or until reaching max steps.
@@ -126,38 +161,36 @@ class BatchRunner:
return agent_vars
def get_model_vars_dataframe(self):
- """ Generate a pandas DataFrame from the model-level variables collected.
+ """ Generate a pandas DataFrame from the model-level variables
+ collected.
"""
- index_col_names = list(self.parameter_values.keys())
- index_col_names.append("Run")
- records = []
- for key, val in self.model_vars.items():
- record = dict(zip(index_col_names, key))
- for k, v in val.items():
- record[k] = v
- records.append(record)
- return pd.DataFrame(records)
+ return self._prepare_report_table(self.model_vars)
def get_agent_vars_dataframe(self):
""" Generate a pandas DataFrame from the agent-level variables
collected.
"""
- index_col_names = list(self.parameter_values.keys())
- index_col_names += ["Run", "AgentID"]
+ return self._prepare_report_table(self.agent_vars,
+ extra_cols=['AgentId'])
+
+ def _prepare_report_table(self, vars_dict, extra_cols=None):
+ """
+ Creates a dataframe from collected records and sorts it using 'Run'
+ column as a key.
+ """
+ extra_cols = ['Run'] + (extra_cols or [])
+ index_cols = list(self.variable_parameters.keys()) + extra_cols
+
records = []
- for key, val in self.agent_vars.items():
- record = dict(zip(index_col_names, key))
- for k, v in val.items():
- record[k] = v
+ for param_key, values in vars_dict.items():
+ record = dict(zip(index_cols, param_key))
+ record.update(values)
records.append(record)
- return pd.DataFrame(records)
-
- @staticmethod
- def make_iterable(val):
- """ Helper method to ensure a value is a non-string iterable. """
- if hasattr(val, "__iter__") and not isinstance(val, str):
- return val
- else:
- return [val]
+
+ df = pd.DataFrame(records)
+ rest_cols = set(df.columns) - set(index_cols)
+ ordered = df[index_cols + list(sorted(rest_cols))]
+ ordered.sort_values(by='Run', inplace=True)
+ return ordered
diff --git a/mesa/time.py b/mesa/time.py
index 75bac083..ad5af978 100644
--- a/mesa/time.py
+++ b/mesa/time.py
@@ -139,7 +139,7 @@ class StagedActivation(BaseScheduler):
shuffle_between_stages = False
stage_time = 1
- def __init__(self, model, stage_list=["step"], shuffle=False,
+ def __init__(self, model, stage_list=None, shuffle=False,
shuffle_between_stages=False):
""" Create an empty Staged Activation schedule.
@@ -154,7 +154,7 @@ class StagedActivation(BaseScheduler):
"""
super().__init__(model)
- self.stage_list = stage_list
+ self.stage_list = stage_list or ["step"]
self.shuffle = shuffle
self.shuffle_between_stages = shuffle_between_stages
self.stage_time = 1 / len(self.stage_list)
| Passing dictionary argument to BatchRunner
Not sure if it is an issue and not intended behaviour but nevertheless decided to notify about it.
When you pass a dictionary value into batch runner it only takes keys and not values. So you'll not get a cartesian product of your values. For example, if you pass something like this:
``` python
class MoneyModel(Model):
def __init__(self, n, grid_params): # grid_params is intended to be a dictionary
super().__init__(self)
...
def main():
...
n_iters, n_steps = 5, 200
params = {
"grid_params": {"height": 10, "width": 10},
"n": range(10, 500, 10)}
batch = BatchRunner(MoneyModel, params,
iterations=n_iters, max_steps=n_steps,
model_reporters={"Gini": compute_gini})
batch.run_all()
```
You'll get issue in there: https://github.com/projectmesa/mesa/blob/master/mesa/batchrunner.py#L71, i.e. variable `kwargs` will be equal to `{'grid_params': 'width', 'n': 10}` (or any other dictionary key) instead of original value.
| projectmesa/mesa | diff --git a/tests/test_batchrunner.py b/tests/test_batchrunner.py
index 2e9242c4..e9b9904b 100644
--- a/tests/test_batchrunner.py
+++ b/tests/test_batchrunner.py
@@ -1,27 +1,28 @@
"""
Test the BatchRunner
"""
+from functools import reduce
+from operator import mul
import unittest
from mesa import Agent, Model
-from mesa.batchrunner import BatchRunner
from mesa.time import BaseScheduler
+from mesa.batchrunner import BatchRunner
+
NUM_AGENTS = 7
class MockAgent(Agent):
"""
- Minimalistic model for testing purposes
+ Minimalistic agent implementation for testing purposes
"""
- def __init__(self, unique_id, val):
+ def __init__(self, unique_id, model, val):
+ super().__init__(unique_id, model)
self.unique_id = unique_id
self.val = val
def step(self):
- """
- increment val by 1
- """
self.val += 1
@@ -29,18 +30,34 @@ class MockModel(Model):
"""
Minimalistic model for testing purposes
"""
- def __init__(self, model_param, agent_param):
- """
- Args:
- model_param (any): parameter specific to the model
- agent_param (int): parameter specific to the agent
- """
- self.schedule = BaseScheduler(None)
- self.model_param = model_param
+ def __init__(self, variable_model_param, variable_agent_param,
+ fixed_model_param=None, schedule=None, **kwargs):
+ super().__init__()
+ self.schedule = BaseScheduler(None) if schedule is None else schedule
+ self.variable_model_param = variable_model_param
+ self.variable_agent_param = variable_agent_param
+ self.fixed_model_param = fixed_model_param
+ self.n_agents = kwargs.get('n_agents', NUM_AGENTS)
+ self.running = True
+ self.init_agents()
+
+ def init_agents(self):
+ for i in range(self.n_agents):
+ self.schedule.add(MockAgent(i, self, self.variable_agent_param))
+
+ def step(self):
+ self.schedule.step()
+
+
+class MockMixedModel(Model):
+
+ def __init__(self, **other_params):
+ super().__init__()
+ self.variable_name = other_params.get('variable_name', 42)
+ self.fixed_name = other_params.get('fixed_name')
self.running = True
- for i in range(NUM_AGENTS):
- a = MockAgent(i, agent_param)
- self.schedule.add(a)
+ self.schedule = BaseScheduler(None)
+ self.schedule.add(MockAgent(1, self, 0))
def step(self):
self.schedule.step()
@@ -51,44 +68,95 @@ class TestBatchRunner(unittest.TestCase):
Test that BatchRunner is running batches
"""
def setUp(self):
- """
- Create the model and run it for some steps
- """
- self.model_reporter = {"model": lambda m: m.model_param}
- self.agent_reporter = {
+ self.mock_model = MockModel
+ self.model_reporters = {
+ "reported_variable_value": lambda m: m.variable_model_param,
+ "reported_fixed_value": lambda m: m.fixed_model_param
+ }
+ self.agent_reporters = {
"agent_id": lambda a: a.unique_id,
- "agent_val": lambda a: a.val}
- self.params = {
- 'model_param': range(3),
- 'agent_param': [1, 8],
+ "agent_val": lambda a: a.val
+ }
+ self.variable_params = {
+ "variable_model_param": range(3),
+ "variable_agent_param": [1, 8]
}
+ self.fixed_params = None
self.iterations = 17
- self.batch = BatchRunner(
- MockModel,
- self.params,
+ self.max_steps = 3
+
+ def launch_batch_processing(self):
+ batch = BatchRunner(
+ self.mock_model,
+ variable_parameters=self.variable_params,
+ fixed_parameters=self.fixed_params,
iterations=self.iterations,
- max_steps=3,
- model_reporters=self.model_reporter,
- agent_reporters=self.agent_reporter)
- self.batch.run_all()
+ max_steps=self.max_steps,
+ model_reporters=self.model_reporters,
+ agent_reporters=self.agent_reporters)
+ batch.run_all()
+ return batch
+
+ @property
+ def model_runs(self):
+ """
+ Returns total number of batch runner's iterations.
+ """
+ return (reduce(mul, map(len, self.variable_params.values())) *
+ self.iterations)
def test_model_level_vars(self):
"""
Test that model-level variable collection is of the correct size
"""
- model_vars = self.batch.get_model_vars_dataframe()
- rows = len(self.params['model_param']) * \
- len(self.params['agent_param']) * \
- self.iterations
- assert model_vars.shape == (rows, 4)
+ batch = self.launch_batch_processing()
+ model_vars = batch.get_model_vars_dataframe()
+ expected_cols = (len(self.variable_params) +
+ len(self.model_reporters) +
+ 1) # extra column with run index
+
+ self.assertEqual(model_vars.shape, (self.model_runs, expected_cols))
def test_agent_level_vars(self):
"""
Test that agent-level variable collection is of the correct size
"""
- agent_vars = self.batch.get_agent_vars_dataframe()
- rows = NUM_AGENTS * \
- len(self.params['agent_param']) * \
- len(self.params['model_param']) * \
- self.iterations
- assert agent_vars.shape == (rows, 6)
+ batch = self.launch_batch_processing()
+ agent_vars = batch.get_agent_vars_dataframe()
+ expected_cols = (len(self.variable_params) +
+ len(self.agent_reporters) +
+ 2) # extra columns with run index and agentId
+
+ self.assertEqual(agent_vars.shape,
+ (self.model_runs * NUM_AGENTS, expected_cols))
+
+ def test_model_with_fixed_parameters_as_kwargs(self):
+ """
+ Test that model with fixed parameters passed like kwargs is
+ properly handled
+ """
+ self.fixed_params = {'fixed_model_param': 'Fixed', 'n_agents': 1}
+ batch = self.launch_batch_processing()
+ model_vars = batch.get_model_vars_dataframe()
+ agent_vars = batch.get_agent_vars_dataframe()
+
+ self.assertEqual(len(model_vars), len(agent_vars))
+ self.assertEqual(len(model_vars), self.model_runs)
+ self.assertEqual(model_vars['reported_fixed_value'].unique(), ['Fixed'])
+
+ def test_model_with_variable_and_fixed_kwargs(self):
+ self.mock_model = MockMixedModel
+ self.model_reporters = {
+ 'reported_fixed_param': lambda m: m.fixed_name,
+ 'reported_variable_param': lambda m: m.variable_name
+ }
+ self.fixed_params = {'fixed_name': 'Fixed'}
+ self.variable_params = {'variable_name': [1, 2, 3]}
+ batch = self.launch_batch_processing()
+ model_vars = batch.get_model_vars_dataframe()
+ expected_cols = (len(self.variable_params) +
+ len(self.model_reporters) +
+ 1)
+ self.assertEqual(model_vars.shape, (self.model_runs, expected_cols))
+ self.assertEqual(model_vars['reported_fixed_param'].iloc[0],
+ self.fixed_params['fixed_name'])
diff --git a/tests/test_visualization.py b/tests/test_visualization.py
index 553b68e2..e46f78dc 100644
--- a/tests/test_visualization.py
+++ b/tests/test_visualization.py
@@ -24,7 +24,7 @@ class MockModel(Model):
self.grid = Grid(width, height, torus=True)
for (c, x, y) in self.grid.coord_iter():
- a = MockAgent(x + y * 100, x * y * 3)
+ a = MockAgent(x + y * 100, self, x * y * 3)
self.grid.place_agent(a, (x, y))
self.schedule.add(a)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": -1,
"issue_text_score": 0,
"test_score": -1
},
"num_modified_files": 6
} | 0.8 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | coverage==7.8.0
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
execnet==2.1.1
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
-e git+https://github.com/projectmesa/mesa.git@5a3a62c6dd9dcb3b310a5526d37dc4c7bcfc43c0#egg=Mesa
numpy==2.0.2
packaging @ file:///croot/packaging_1734472117206/work
pandas==2.2.3
pluggy @ file:///croot/pluggy_1733169602837/work
pytest @ file:///croot/pytest_1738938843180/work
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
python-dateutil==2.9.0.post0
pytz==2025.2
six==1.17.0
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
tornado==6.4.2
tqdm==4.67.1
typing_extensions==4.13.0
tzdata==2025.2
| name: mesa
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.8.0
- execnet==2.1.1
- numpy==2.0.2
- pandas==2.2.3
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- six==1.17.0
- tornado==6.4.2
- tqdm==4.67.1
- typing-extensions==4.13.0
- tzdata==2025.2
prefix: /opt/conda/envs/mesa
| [
"tests/test_batchrunner.py::TestBatchRunner::test_agent_level_vars",
"tests/test_batchrunner.py::TestBatchRunner::test_model_level_vars",
"tests/test_batchrunner.py::TestBatchRunner::test_model_with_fixed_parameters_as_kwargs",
"tests/test_batchrunner.py::TestBatchRunner::test_model_with_variable_and_fixed_kwargs"
]
| []
| [
"tests/test_visualization.py::TestModularServer::test_canvas_render_model_state",
"tests/test_visualization.py::TestModularServer::test_text_render_model_state",
"tests/test_visualization.py::TestModularServer::test_user_params"
]
| []
| Apache License 2.0 | 1,441 | [
"examples/Schelling/analysis.ipynb",
"docs/tutorials/intro_tutorial.ipynb",
"docs/tutorials/intro_tutorial.rst",
"examples/forest_fire/Forest Fire Model.ipynb",
"mesa/batchrunner.py",
"examples/forest_fire/Forest",
"mesa/time.py"
]
| [
"examples/Schelling/analysis.ipynb",
"docs/tutorials/intro_tutorial.ipynb",
"docs/tutorials/intro_tutorial.rst",
"examples/forest_fire/Forest Fire Model.ipynb",
"mesa/batchrunner.py",
"re",
"mesa/time.py"
]
|
|
rorodata__firefly-31 | e87d457f64da90c4993b19c9fa47c1393baec891 | 2017-07-08 19:54:09 | e87d457f64da90c4993b19c9fa47c1393baec891 | diff --git a/firefly/app.py b/firefly/app.py
index b53fced..3a22377 100644
--- a/firefly/app.py
+++ b/firefly/app.py
@@ -79,7 +79,11 @@ class FireflyFunction(object):
if self.options.get("internal", False):
return self.make_response(self.function())
- kwargs = self.get_inputs(request)
+ try:
+ kwargs = self.get_inputs(request)
+ except ValueError as err:
+ return self.make_response({"error": str(err)}, status=400)
+
try:
validate_args(self.function, kwargs)
except ValidationError as err:
| Incorrect HTTP Error Codes
When a request is made to a firefly server with data in invalid format like:
```
curl -i -d '{"a": [5 8]}' https://0.0.0.0:8080/predict
```
instead of
```
curl -i -d '{"a": [5, 8]}' https://0.0.0.0:8080/predict
```
the server returns `500 Internal Server Error`. But in such cases, the server should return `400 Bad Request` or `422 Unprocessable Entity` as the payload data is not in the required format. | rorodata/firefly | diff --git a/tests/test_app.py b/tests/test_app.py
index 687cdc7..ab3db5c 100644
--- a/tests/test_app.py
+++ b/tests/test_app.py
@@ -88,6 +88,14 @@ class TestFireflyFunction:
assert response.status == '200 OK'
assert response.text == '9'
+ def test_call_for_bad_request(self):
+ def sum(a):
+ return sum(a)
+ func = FireflyFunction(sum)
+ request = Request.blank("/sum", POST='{"a": [3 8]}')
+ response = func(request)
+ assert response.status == '400 Bad Request'
+
@py2_only
def test_generate_signature(self):
def sample_function(x, one="hey", two=None, **kwargs):
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 1
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest>=3.1.1",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
chardet==3.0.4
-e git+https://github.com/rorodata/firefly.git@e87d457f64da90c4993b19c9fa47c1393baec891#egg=Firefly
gunicorn==19.7.1
idna==2.5
importlib-metadata==4.8.3
iniconfig==1.1.1
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
PyYAML==3.12
requests==2.18.1
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.21.1
WebOb==1.7.2
zipp==3.6.0
| name: firefly
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- chardet==3.0.4
- gunicorn==19.7.1
- idna==2.5
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- pyyaml==3.12
- requests==2.18.1
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.21.1
- webob==1.7.2
- zipp==3.6.0
prefix: /opt/conda/envs/firefly
| [
"tests/test_app.py::TestFireflyFunction::test_call_for_bad_request"
]
| []
| [
"tests/test_app.py::TestFirefly::test_generate_function_list",
"tests/test_app.py::TestFirefly::test_generate_function_list_for_func_name",
"tests/test_app.py::TestFirefly::test_function_call",
"tests/test_app.py::TestFirefly::test_auth_failure",
"tests/test_app.py::TestFirefly::test_http_error_404",
"tests/test_app.py::TestFireflyFunction::test_call",
"tests/test_app.py::TestFireflyFunction::test_generate_signature_py3"
]
| []
| Apache License 2.0 | 1,442 | [
"firefly/app.py"
]
| [
"firefly/app.py"
]
|
|
pre-commit__pre-commit-556 | 853cbecd4e15aeb59e4730320dc90fe16afa219e | 2017-07-09 03:28:10 | ce7481f75b3ece0d6d88a04f62a4c51665e0efb8 | asottile: test failures are expected, I'll fix those up | diff --git a/pre_commit/languages/all.py b/pre_commit/languages/all.py
index f441ddd..5546025 100644
--- a/pre_commit/languages/all.py
+++ b/pre_commit/languages/all.py
@@ -10,16 +10,18 @@ from pre_commit.languages import script
from pre_commit.languages import swift
from pre_commit.languages import system
-# A language implements the following constant and two functions in its module:
+# A language implements the following constant and functions in its module:
#
# # Use None for no environment
# ENVIRONMENT_DIR = 'foo_env'
#
-# def install_environment(
-# repo_cmd_runner,
-# version='default',
-# additional_dependencies=(),
-# ):
+# def get_default_version():
+# """Return a value to replace the 'default' value for language_version.
+#
+# return 'default' if there is no better option.
+# """
+#
+# def install_environment(repo_cmd_runner, version, additional_dependencies):
# """Installs a repository in the given repository. Note that the current
# working directory will already be inside the repository.
#
diff --git a/pre_commit/languages/docker.py b/pre_commit/languages/docker.py
index 7d3f8d0..59dc1b4 100644
--- a/pre_commit/languages/docker.py
+++ b/pre_commit/languages/docker.py
@@ -14,6 +14,7 @@ from pre_commit.xargs import xargs
ENVIRONMENT_DIR = 'docker'
PRE_COMMIT_LABEL = 'PRE_COMMIT'
+get_default_version = helpers.basic_get_default_version
def md5(s): # pragma: windows no cover
@@ -55,9 +56,7 @@ def build_docker_image(repo_cmd_runner, **kwargs): # pragma: windows no cover
def install_environment(
- repo_cmd_runner,
- version='default',
- additional_dependencies=(),
+ repo_cmd_runner, version, additional_dependencies,
): # pragma: windows no cover
assert repo_cmd_runner.exists('Dockerfile'), (
'No Dockerfile was found in the hook repository'
diff --git a/pre_commit/languages/golang.py b/pre_commit/languages/golang.py
index c0bfbcb..ee04ca7 100644
--- a/pre_commit/languages/golang.py
+++ b/pre_commit/languages/golang.py
@@ -14,6 +14,7 @@ from pre_commit.xargs import xargs
ENVIRONMENT_DIR = 'golangenv'
+get_default_version = helpers.basic_get_default_version
def get_env_patch(venv):
@@ -44,11 +45,7 @@ def guess_go_dir(remote_url):
return 'unknown_src_dir'
-def install_environment(
- repo_cmd_runner,
- version='default',
- additional_dependencies=(),
-):
+def install_environment(repo_cmd_runner, version, additional_dependencies):
helpers.assert_version_default('golang', version)
directory = repo_cmd_runner.path(
helpers.environment_dir(ENVIRONMENT_DIR, 'default'),
diff --git a/pre_commit/languages/helpers.py b/pre_commit/languages/helpers.py
index a6c93de..6af77e3 100644
--- a/pre_commit/languages/helpers.py
+++ b/pre_commit/languages/helpers.py
@@ -33,3 +33,7 @@ def assert_no_additional_deps(lang, additional_deps):
'For now, pre-commit does not support '
'additional_dependencies for {}'.format(lang),
)
+
+
+def basic_get_default_version():
+ return 'default'
diff --git a/pre_commit/languages/node.py b/pre_commit/languages/node.py
index ef557a1..b5f7c56 100644
--- a/pre_commit/languages/node.py
+++ b/pre_commit/languages/node.py
@@ -12,6 +12,7 @@ from pre_commit.xargs import xargs
ENVIRONMENT_DIR = 'node_env'
+get_default_version = helpers.basic_get_default_version
def get_env_patch(venv): # pragma: windows no cover
@@ -34,9 +35,7 @@ def in_env(repo_cmd_runner, language_version): # pragma: windows no cover
def install_environment(
- repo_cmd_runner,
- version='default',
- additional_dependencies=(),
+ repo_cmd_runner, version, additional_dependencies,
): # pragma: windows no cover
additional_dependencies = tuple(additional_dependencies)
assert repo_cmd_runner.exists('package.json')
diff --git a/pre_commit/languages/pcre.py b/pre_commit/languages/pcre.py
index 314ea09..faba539 100644
--- a/pre_commit/languages/pcre.py
+++ b/pre_commit/languages/pcre.py
@@ -2,18 +2,16 @@ from __future__ import unicode_literals
import sys
+from pre_commit.languages import helpers
from pre_commit.xargs import xargs
ENVIRONMENT_DIR = None
GREP = 'ggrep' if sys.platform == 'darwin' else 'grep'
+get_default_version = helpers.basic_get_default_version
-def install_environment(
- repo_cmd_runner,
- version='default',
- additional_dependencies=(),
-):
+def install_environment(repo_cmd_runner, version, additional_dependencies):
"""Installation for pcre type is a noop."""
raise AssertionError('Cannot install pcre repo.')
diff --git a/pre_commit/languages/python.py b/pre_commit/languages/python.py
index 634abe5..715d585 100644
--- a/pre_commit/languages/python.py
+++ b/pre_commit/languages/python.py
@@ -1,7 +1,6 @@
from __future__ import unicode_literals
import contextlib
-import distutils.spawn
import os
import sys
@@ -9,11 +8,13 @@ from pre_commit.envcontext import envcontext
from pre_commit.envcontext import UNSET
from pre_commit.envcontext import Var
from pre_commit.languages import helpers
+from pre_commit.parse_shebang import find_executable
from pre_commit.util import clean_path_on_failure
from pre_commit.xargs import xargs
ENVIRONMENT_DIR = 'py_env'
+get_default_version = helpers.basic_get_default_version
def bin_dir(venv):
@@ -39,10 +40,53 @@ def in_env(repo_cmd_runner, language_version):
yield
+def _get_default_version(): # pragma: no cover (platform dependent)
+ def _norm(path):
+ _, exe = os.path.split(path.lower())
+ exe, _, _ = exe.partition('.exe')
+ if find_executable(exe) and exe not in {'python', 'pythonw'}:
+ return exe
+
+ # First attempt from `sys.executable` (or the realpath)
+ # On linux, I see these common sys.executables:
+ #
+ # system `python`: /usr/bin/python -> python2.7
+ # system `python2`: /usr/bin/python2 -> python2.7
+ # virtualenv v: v/bin/python (will not return from this loop)
+ # virtualenv v -ppython2: v/bin/python -> python2
+ # virtualenv v -ppython2.7: v/bin/python -> python2.7
+ # virtualenv v -ppypy: v/bin/python -> v/bin/pypy
+ for path in {sys.executable, os.path.realpath(sys.executable)}:
+ exe = _norm(path)
+ if exe:
+ return exe
+
+ # Next try the `pythonX.X` executable
+ exe = 'python{}.{}'.format(*sys.version_info)
+ if find_executable(exe):
+ return exe
+
+ # Give a best-effort try for windows
+ if os.path.exists(r'C:\{}\python.exe'.format(exe.replace('.', ''))):
+ return exe
+
+ # We tried!
+ return 'default'
+
+
+def get_default_version():
+ # TODO: when dropping python2, use `functools.lru_cache(maxsize=1)`
+ try:
+ return get_default_version.cached_version
+ except AttributeError:
+ get_default_version.cached_version = _get_default_version()
+ return get_default_version()
+
+
def norm_version(version):
if os.name == 'nt': # pragma: no cover (windows)
# Try looking up by name
- if distutils.spawn.find_executable(version):
+ if find_executable(version) and find_executable(version) != version:
return version
# If it is in the form pythonx.x search in the default
@@ -54,11 +98,7 @@ def norm_version(version):
return os.path.expanduser(version)
-def install_environment(
- repo_cmd_runner,
- version='default',
- additional_dependencies=(),
-):
+def install_environment(repo_cmd_runner, version, additional_dependencies):
additional_dependencies = tuple(additional_dependencies)
directory = helpers.environment_dir(ENVIRONMENT_DIR, version)
diff --git a/pre_commit/languages/ruby.py b/pre_commit/languages/ruby.py
index d3896d9..26e303c 100644
--- a/pre_commit/languages/ruby.py
+++ b/pre_commit/languages/ruby.py
@@ -16,6 +16,7 @@ from pre_commit.xargs import xargs
ENVIRONMENT_DIR = 'rbenv'
+get_default_version = helpers.basic_get_default_version
def get_env_patch(venv, language_version): # pragma: windows no cover
@@ -97,9 +98,7 @@ def _install_ruby(runner, version): # pragma: windows no cover
def install_environment(
- repo_cmd_runner,
- version='default',
- additional_dependencies=(),
+ repo_cmd_runner, version, additional_dependencies,
): # pragma: windows no cover
additional_dependencies = tuple(additional_dependencies)
directory = helpers.environment_dir(ENVIRONMENT_DIR, version)
diff --git a/pre_commit/languages/script.py b/pre_commit/languages/script.py
index 762ae76..c4b6593 100644
--- a/pre_commit/languages/script.py
+++ b/pre_commit/languages/script.py
@@ -5,13 +5,10 @@ from pre_commit.xargs import xargs
ENVIRONMENT_DIR = None
+get_default_version = helpers.basic_get_default_version
-def install_environment(
- repo_cmd_runner,
- version='default',
- additional_dependencies=(),
-):
+def install_environment(repo_cmd_runner, version, additional_dependencies):
"""Installation for script type is a noop."""
raise AssertionError('Cannot install script repo.')
diff --git a/pre_commit/languages/swift.py b/pre_commit/languages/swift.py
index 4d171c5..a27dfac 100644
--- a/pre_commit/languages/swift.py
+++ b/pre_commit/languages/swift.py
@@ -10,6 +10,7 @@ from pre_commit.util import clean_path_on_failure
from pre_commit.xargs import xargs
ENVIRONMENT_DIR = 'swift_env'
+get_default_version = helpers.basic_get_default_version
BUILD_DIR = '.build'
BUILD_CONFIG = 'release'
@@ -29,9 +30,7 @@ def in_env(repo_cmd_runner): # pragma: windows no cover
def install_environment(
- repo_cmd_runner,
- version='default',
- additional_dependencies=(),
+ repo_cmd_runner, version, additional_dependencies,
): # pragma: windows no cover
helpers.assert_version_default('swift', version)
helpers.assert_no_additional_deps('swift', additional_dependencies)
diff --git a/pre_commit/languages/system.py b/pre_commit/languages/system.py
index c9e1c5d..3148079 100644
--- a/pre_commit/languages/system.py
+++ b/pre_commit/languages/system.py
@@ -5,13 +5,10 @@ from pre_commit.xargs import xargs
ENVIRONMENT_DIR = None
+get_default_version = helpers.basic_get_default_version
-def install_environment(
- repo_cmd_runner,
- version='default',
- additional_dependencies=(),
-):
+def install_environment(repo_cmd_runner, version, additional_dependencies):
"""Installation for system type is a noop."""
raise AssertionError('Cannot install system repo.')
diff --git a/pre_commit/manifest.py b/pre_commit/manifest.py
index 888ad6d..081f3c6 100644
--- a/pre_commit/manifest.py
+++ b/pre_commit/manifest.py
@@ -7,6 +7,7 @@ from cached_property import cached_property
import pre_commit.constants as C
from pre_commit.clientlib import load_manifest
+from pre_commit.languages.all import languages
logger = logging.getLogger('pre_commit')
@@ -38,4 +39,10 @@ class Manifest(object):
@cached_property
def hooks(self):
- return {hook['id']: hook for hook in self.manifest_contents}
+ ret = {}
+ for hook in self.manifest_contents:
+ if hook['language_version'] == 'default':
+ language = languages[hook['language']]
+ hook['language_version'] = language.get_default_version()
+ ret[hook['id']] = hook
+ return ret
| Detect the python version when creating `default` py_envs
Given two separate virtualenvs, one running with python3 and the other running with python2, and each with the appropriate pre-commit installed (`pip install pre-commit` in the first, `pip3 install pre-commit` in the second), the check-ast plugin fails to parse Python 2 (or 3) syntax correctly, depending on which virtualenv pre-commit was installed in first.
Suggestions:
- the ~/.pre-commit cache should instead be a per virtualenv cache, rather than per user.
- instead of maintaining the cache, consider just using pip dependencies instead.
Example failure in Python 2 venv if Python 3 venv installs pre-commit first:
```
p2/main.py: failed parsing with python3.4:
Traceback (most recent call last):
File "/home/ubuntu/.pre-commit/repo_3mr61_f/py_env-default/lib/python3.4/site-packages/pre_commit_hooks/check_ast.py", line 23, in check_ast
ast.parse(open(filename, 'rb').read(), filename=filename)
File "/usr/lib/python3.4/ast.py", line 35, in parse
return compile(source, filename, mode, PyCF_ONLY_AST)
File "p2/main.py", line 178
except socket.error, e:
^
SyntaxError: invalid syntax
```
Example failure in Python 3 venv if Python 2 venv installs pre-commit first:
```
p3/__main__.py: failed parsing with python:
Traceback (most recent call last):
File "/home/ubuntu/.pre-commit/repoCzM4lg/py_env-default/local/lib/python2.7/site-packages/pre_commit_hooks/check_ast.py", line 23, in check_ast
ast.parse(open(filename, 'rb').read(), filename=filename)
File "/usr/lib/python2.7/ast.py", line 37, in parse
return compile(source, filename, mode, PyCF_ONLY_AST)
File "p3/__main__.py", line 30
"c=%s" % args.c, file=sys.stderr)
^
SyntaxError: invalid syntax
```
`rm -fr ~/.pre-commit` was run between each test.
| pre-commit/pre-commit | diff --git a/tests/languages/all_test.py b/tests/languages/all_test.py
index 73b89cb..dd1ed27 100644
--- a/tests/languages/all_test.py
+++ b/tests/languages/all_test.py
@@ -12,9 +12,7 @@ from pre_commit.languages.all import languages
def test_install_environment_argspec(language):
expected_argspec = inspect.ArgSpec(
args=['repo_cmd_runner', 'version', 'additional_dependencies'],
- varargs=None,
- keywords=None,
- defaults=('default', ()),
+ varargs=None, keywords=None, defaults=None,
)
argspec = inspect.getargspec(languages[language].install_environment)
assert argspec == expected_argspec
@@ -33,3 +31,12 @@ def test_run_hook_argpsec(language):
)
argspec = inspect.getargspec(languages[language].run_hook)
assert argspec == expected_argspec
+
+
[email protected]('language', all_languages)
+def test_get_default_version_argspec(language):
+ expected_argspec = inspect.ArgSpec(
+ args=[], varargs=None, keywords=None, defaults=None,
+ )
+ argspec = inspect.getargspec(languages[language].get_default_version)
+ assert argspec == expected_argspec
diff --git a/tests/manifest_test.py b/tests/manifest_test.py
index 7db886c..ada004f 100644
--- a/tests/manifest_test.py
+++ b/tests/manifest_test.py
@@ -11,8 +11,7 @@ from testing.util import get_head_sha
@pytest.yield_fixture
def manifest(store, tempdir_factory):
path = make_repo(tempdir_factory, 'script_hooks_repo')
- head_sha = get_head_sha(path)
- repo_path = store.clone(path, head_sha)
+ repo_path = store.clone(path, get_head_sha(path))
yield Manifest(repo_path, path)
@@ -76,3 +75,13 @@ def test_legacy_manifest_warn(store, tempdir_factory, log_warning_mock):
'If `pre-commit autoupdate` does not silence this warning consider '
'making an issue / pull request.'.format(path)
)
+
+
+def test_default_python_language_version(store, tempdir_factory):
+ path = make_repo(tempdir_factory, 'python_hooks_repo')
+ repo_path = store.clone(path, get_head_sha(path))
+ manifest = Manifest(repo_path, path)
+
+ # This assertion is difficult as it is version dependent, just assert
+ # that it is *something*
+ assert manifest.hooks['foo']['language_version'] != 'default'
diff --git a/tests/repository_test.py b/tests/repository_test.py
index f91642e..7131d75 100644
--- a/tests/repository_test.py
+++ b/tests/repository_test.py
@@ -442,7 +442,7 @@ def test_venvs(tempdir_factory, store):
config = make_config_from_repo(path)
repo = Repository.create(config, store)
venv, = repo._venvs
- assert venv == (mock.ANY, 'python', 'default', [])
+ assert venv == (mock.ANY, 'python', python.get_default_version(), [])
@pytest.mark.integration
@@ -452,7 +452,7 @@ def test_additional_dependencies(tempdir_factory, store):
config['hooks'][0]['additional_dependencies'] = ['pep8']
repo = Repository.create(config, store)
venv, = repo._venvs
- assert venv == (mock.ANY, 'python', 'default', ['pep8'])
+ assert venv == (mock.ANY, 'python', python.get_default_version(), ['pep8'])
@pytest.mark.integration
@@ -591,7 +591,8 @@ def test_control_c_control_c_on_install(tempdir_factory, store):
repo.run_hook(hook, [])
# Should have made an environment, however this environment is broken!
- assert os.path.exists(repo._cmd_runner.path('py_env-default'))
+ envdir = 'py_env-{}'.format(python.get_default_version())
+ assert repo._cmd_runner.exists(envdir)
# However, it should be perfectly runnable (reinstall after botched
# install)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 12
} | 0.15 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | aspy.yaml==1.3.0
cached-property==2.0.1
coverage==7.8.0
distlib==0.3.9
exceptiongroup==1.2.2
filelock==3.18.0
flake8==7.2.0
identify==2.6.9
iniconfig==2.1.0
mccabe==0.7.0
mock==5.2.0
nodeenv==1.9.1
packaging==24.2
platformdirs==4.3.7
pluggy==1.5.0
-e git+https://github.com/pre-commit/pre-commit.git@853cbecd4e15aeb59e4730320dc90fe16afa219e#egg=pre_commit
pycodestyle==2.13.0
pyflakes==3.3.1
pytest==8.3.5
pytest-env==1.1.5
PyYAML==6.0.2
six==1.17.0
tomli==2.2.1
virtualenv==20.29.3
| name: pre-commit
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- aspy-yaml==1.3.0
- cached-property==2.0.1
- coverage==7.8.0
- distlib==0.3.9
- exceptiongroup==1.2.2
- filelock==3.18.0
- flake8==7.2.0
- identify==2.6.9
- iniconfig==2.1.0
- mccabe==0.7.0
- mock==5.2.0
- nodeenv==1.9.1
- packaging==24.2
- platformdirs==4.3.7
- pluggy==1.5.0
- pycodestyle==2.13.0
- pyflakes==3.3.1
- pytest==8.3.5
- pytest-env==1.1.5
- pyyaml==6.0.2
- setuptools==18.4
- six==1.17.0
- tomli==2.2.1
- virtualenv==20.29.3
prefix: /opt/conda/envs/pre-commit
| [
"tests/languages/all_test.py::test_install_environment_argspec[docker]",
"tests/languages/all_test.py::test_install_environment_argspec[golang]",
"tests/languages/all_test.py::test_install_environment_argspec[node]",
"tests/languages/all_test.py::test_install_environment_argspec[pcre]",
"tests/languages/all_test.py::test_install_environment_argspec[python]",
"tests/languages/all_test.py::test_install_environment_argspec[ruby]",
"tests/languages/all_test.py::test_install_environment_argspec[script]",
"tests/languages/all_test.py::test_install_environment_argspec[swift]",
"tests/languages/all_test.py::test_install_environment_argspec[system]",
"tests/languages/all_test.py::test_get_default_version_argspec[docker]",
"tests/languages/all_test.py::test_get_default_version_argspec[golang]",
"tests/languages/all_test.py::test_get_default_version_argspec[node]",
"tests/languages/all_test.py::test_get_default_version_argspec[pcre]",
"tests/languages/all_test.py::test_get_default_version_argspec[python]",
"tests/languages/all_test.py::test_get_default_version_argspec[ruby]",
"tests/languages/all_test.py::test_get_default_version_argspec[script]",
"tests/languages/all_test.py::test_get_default_version_argspec[swift]",
"tests/languages/all_test.py::test_get_default_version_argspec[system]",
"tests/manifest_test.py::test_default_python_language_version",
"tests/repository_test.py::test_venvs",
"tests/repository_test.py::test_additional_dependencies",
"tests/repository_test.py::test_control_c_control_c_on_install"
]
| [
"tests/repository_test.py::test_switch_language_versions_doesnt_clobber",
"tests/repository_test.py::test_versioned_python_hook",
"tests/repository_test.py::test_run_a_ruby_hook",
"tests/repository_test.py::test_run_versioned_ruby_hook",
"tests/repository_test.py::test_run_ruby_hook_with_disable_shared_gems",
"tests/repository_test.py::test_golang_hook",
"tests/repository_test.py::test_additional_ruby_dependencies_installed",
"tests/repository_test.py::test_additional_golang_dependencies_installed"
]
| [
"tests/languages/all_test.py::test_ENVIRONMENT_DIR[docker]",
"tests/languages/all_test.py::test_ENVIRONMENT_DIR[golang]",
"tests/languages/all_test.py::test_ENVIRONMENT_DIR[node]",
"tests/languages/all_test.py::test_ENVIRONMENT_DIR[pcre]",
"tests/languages/all_test.py::test_ENVIRONMENT_DIR[python]",
"tests/languages/all_test.py::test_ENVIRONMENT_DIR[ruby]",
"tests/languages/all_test.py::test_ENVIRONMENT_DIR[script]",
"tests/languages/all_test.py::test_ENVIRONMENT_DIR[swift]",
"tests/languages/all_test.py::test_ENVIRONMENT_DIR[system]",
"tests/languages/all_test.py::test_run_hook_argpsec[docker]",
"tests/languages/all_test.py::test_run_hook_argpsec[golang]",
"tests/languages/all_test.py::test_run_hook_argpsec[node]",
"tests/languages/all_test.py::test_run_hook_argpsec[pcre]",
"tests/languages/all_test.py::test_run_hook_argpsec[python]",
"tests/languages/all_test.py::test_run_hook_argpsec[ruby]",
"tests/languages/all_test.py::test_run_hook_argpsec[script]",
"tests/languages/all_test.py::test_run_hook_argpsec[swift]",
"tests/languages/all_test.py::test_run_hook_argpsec[system]",
"tests/manifest_test.py::test_manifest_contents",
"tests/manifest_test.py::test_hooks",
"tests/manifest_test.py::test_legacy_manifest_warn",
"tests/repository_test.py::test_python_hook",
"tests/repository_test.py::test_python_hook_args_with_spaces",
"tests/repository_test.py::test_python_hook_weird_setup_cfg",
"tests/repository_test.py::test_run_a_node_hook",
"tests/repository_test.py::test_run_versioned_node_hook",
"tests/repository_test.py::test_system_hook_with_spaces",
"tests/repository_test.py::test_repo_with_legacy_hooks_yaml",
"tests/repository_test.py::test_missing_executable",
"tests/repository_test.py::test_missing_pcre_support",
"tests/repository_test.py::test_run_a_script_hook",
"tests/repository_test.py::test_run_hook_with_spaced_args",
"tests/repository_test.py::test_run_hook_with_curly_braced_arguments",
"tests/repository_test.py::test_pcre_hook_no_match",
"tests/repository_test.py::test_pcre_hook_matching",
"tests/repository_test.py::test_pcre_hook_case_insensitive_option",
"tests/repository_test.py::test_pcre_many_files",
"tests/repository_test.py::test_cwd_of_hook",
"tests/repository_test.py::test_lots_of_files",
"tests/repository_test.py::test_additional_dependencies_duplicated",
"tests/repository_test.py::test_additional_python_dependencies_installed",
"tests/repository_test.py::test_additional_dependencies_roll_forward",
"tests/repository_test.py::test_additional_node_dependencies_installed",
"tests/repository_test.py::test_reinstall",
"tests/repository_test.py::test_really_long_file_paths",
"tests/repository_test.py::test_config_overrides_repo_specifics",
"tests/repository_test.py::test_tags_on_repositories",
"tests/repository_test.py::test_local_repository",
"tests/repository_test.py::test_local_python_repo",
"tests/repository_test.py::test_hook_id_not_present",
"tests/repository_test.py::test_too_new_version",
"tests/repository_test.py::test_versions_ok[0.1.0]",
"tests/repository_test.py::test_versions_ok[0.15.0]"
]
| []
| MIT License | 1,443 | [
"pre_commit/languages/swift.py",
"pre_commit/languages/golang.py",
"pre_commit/languages/pcre.py",
"pre_commit/languages/helpers.py",
"pre_commit/languages/node.py",
"pre_commit/languages/ruby.py",
"pre_commit/languages/all.py",
"pre_commit/languages/system.py",
"pre_commit/manifest.py",
"pre_commit/languages/script.py",
"pre_commit/languages/docker.py",
"pre_commit/languages/python.py"
]
| [
"pre_commit/languages/swift.py",
"pre_commit/languages/golang.py",
"pre_commit/languages/pcre.py",
"pre_commit/languages/helpers.py",
"pre_commit/languages/node.py",
"pre_commit/languages/ruby.py",
"pre_commit/languages/all.py",
"pre_commit/languages/system.py",
"pre_commit/manifest.py",
"pre_commit/languages/script.py",
"pre_commit/languages/docker.py",
"pre_commit/languages/python.py"
]
|
lig__pyventory-10 | 1630f08e58c95e8a50c0256d13034022e4a75067 | 2017-07-10 15:39:16 | 1630f08e58c95e8a50c0256d13034022e4a75067 | diff --git a/.gitignore b/.gitignore
index 80f81e0..04ae275 100644
--- a/.gitignore
+++ b/.gitignore
@@ -2,4 +2,8 @@
!.gitignore
*~
*.pyc
-*.egg-info/
+
+/venv
+/build
+/dist
+/*.egg-info
diff --git a/pyventory/asset.py b/pyventory/asset.py
index d244bb8..03c31ec 100644
--- a/pyventory/asset.py
+++ b/pyventory/asset.py
@@ -2,7 +2,7 @@ from collections import OrderedDict, Mapping, Sequence
import six
-from pyventory.errors import ValueSubstitutionError
+from pyventory import errors
__all__ = ['Asset']
@@ -33,11 +33,21 @@ class Asset(object):
if not attr_name.startswith('_'))
for name, value in _vars.copy().items():
+
+ if value is NotImplemented:
+ if strict_format:
+ raise errors.PropertyIsNotImplementedError(
+ 'Var "{}" is not implemented in "{}" asset instance',
+ name, obj._name())
+ else:
+ del _vars[name]
+ continue
+
try:
_vars[name] = cls.__format_value(value, _vars)
except KeyError as e:
if strict_format:
- raise ValueSubstitutionError(
+ raise errors.ValueSubstitutionError(
'Var "{}" must be available for "{}" asset instance',
e.args[0], obj._name())
else:
diff --git a/pyventory/errors.py b/pyventory/errors.py
index 3616bd4..c83d2cf 100644
--- a/pyventory/errors.py
+++ b/pyventory/errors.py
@@ -12,3 +12,7 @@ class PyventoryError(Exception):
class ValueSubstitutionError(PyventoryError):
pass
+
+
+class PropertyIsNotImplementedError(PyventoryError):
+ pass
| NotImplemeted as property value
Allow defining `NotImplemented` as Asset property value.
Do not include such a property in group vars if not overridden.
Raise an exception if the value wasn't overridden in the host vars. | lig/pyventory | diff --git a/tests/unit/test_inventory.py b/tests/unit/test_inventory.py
index 7bf24ef..356ddbe 100644
--- a/tests/unit/test_inventory.py
+++ b/tests/unit/test_inventory.py
@@ -1,8 +1,7 @@
import pytest
import six
-from pyventory import Asset, ansible_inventory
-from pyventory.errors import ValueSubstitutionError
+from pyventory import Asset, ansible_inventory, errors
def test_allow_mixins_for_inventory_items():
@@ -209,7 +208,7 @@ def test_require_arguments_for_format_strings():
test_asset = TestAsset()
- with pytest.raises(ValueSubstitutionError):
+ with pytest.raises(errors.ValueSubstitutionError):
ansible_inventory(locals())
@@ -398,3 +397,57 @@ def test_multiple_children():
}
}
}'''
+
+
+def test_allow_notimplemented_value():
+
+ class BaseTestAsset(Asset):
+ foo = NotImplemented
+
+ class TestAsset(BaseTestAsset):
+ foo = 'bar'
+
+ test_asset = TestAsset()
+
+ result = six.StringIO()
+ ansible_inventory(locals(), out=result, indent=4)
+
+ # hack for py27 `json.dump()` behavior
+ result = '\n'.join([x.rstrip() for x in result.getvalue().split('\n')])
+
+ assert result == '''{
+ "BaseTestAsset": {
+ "children": [
+ "TestAsset"
+ ]
+ },
+ "TestAsset": {
+ "vars": {
+ "foo": "bar"
+ },
+ "hosts": [
+ "test_asset"
+ ]
+ },
+ "_meta": {
+ "hostvars": {
+ "test_asset": {
+ "foo": "bar"
+ }
+ }
+ }
+}'''
+
+
+def test_raise_notimplemented_value_in_host():
+
+ class BaseTestAsset(Asset):
+ foo = NotImplemented
+
+ class TestAsset(BaseTestAsset):
+ pass
+
+ test_asset = TestAsset()
+
+ with pytest.raises(errors.PropertyIsNotImplementedError):
+ ansible_inventory(locals())
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 3
} | 2.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
ordered-set==4.0.2
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
-e git+https://github.com/lig/pyventory.git@1630f08e58c95e8a50c0256d13034022e4a75067#egg=Pyventory
six==1.17.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: pyventory
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- ordered-set==4.0.2
- six==1.17.0
prefix: /opt/conda/envs/pyventory
| [
"tests/unit/test_inventory.py::test_allow_notimplemented_value",
"tests/unit/test_inventory.py::test_raise_notimplemented_value_in_host"
]
| []
| [
"tests/unit/test_inventory.py::test_allow_mixins_for_inventory_items",
"tests/unit/test_inventory.py::test_allow_host_specific_vars",
"tests/unit/test_inventory.py::test_allow_format_strings_as_values",
"tests/unit/test_inventory.py::test_allow_mapping_of_format_strings_as_values",
"tests/unit/test_inventory.py::test_allow_sequence_of_format_strings_as_values",
"tests/unit/test_inventory.py::test_strings_formatting_do_not_conflict_with_numbers",
"tests/unit/test_inventory.py::test_require_arguments_for_format_strings",
"tests/unit/test_inventory.py::test_inheritance_with_format",
"tests/unit/test_inventory.py::test_deep_multiple_inheritance_propagation",
"tests/unit/test_inventory.py::test_skip_non_asset_locals",
"tests/unit/test_inventory.py::test_multiple_children"
]
| []
| MIT License | 1,444 | [
".gitignore",
"pyventory/errors.py",
"pyventory/asset.py"
]
| [
".gitignore",
"pyventory/errors.py",
"pyventory/asset.py"
]
|
|
byu-oit__awslogin-4 | 1a3ae19323a9c8a507ab0e8589a444bd827917e5 | 2017-07-10 16:44:37 | 1a3ae19323a9c8a507ab0e8589a444bd827917e5 | diff --git a/README.md b/README.md
index 1ee5dff..0be4965 100644
--- a/README.md
+++ b/README.md
@@ -12,14 +12,16 @@ BYU used to use the great [aws-adfs](https://github.com/venth/aws-adfs) CLI tool
* Run `pip3 install byu-awslogin`
## Usage
-awslogin automatically sets up the default profile in your ~/.aws/config and ~/.aws/credentials files. **_If you already have a default profile you want to save in your ~/.aws files make sure to do that before running awslogin._**
+awslogin defaults to the default profile in your ~/.aws/config and ~/.aws/credentials files. **_If you already have a default profile you want to save in your ~/.aws files make sure to do that before running awslogin._**
Once you're logged in, you can execute commands using the AWS CLI or AWS SDK. Try running `aws s3 ls`.
Currently, awslogin tokens are only valid for 1 hour due to the assume_role_with_saml AWS API call has a max timeout of 1 hour.
To use it:
* Run `awslogin` and it will prompt you for the AWS account and role to use.
-* Run `awslogin --account <account name> --role <role name>` to skip the prompting for account and name. You could specify just one of the arcuments as well.
+* Run `awslogin --account <account name> --role <role name>` to skip the prompting for account and name. You could specify just one of the arguments as well.
+* Run `awslogin --profile <profile name>` to specifiy an alternative profile
+* Run `awslogin -- --help` for full help message
## Reporting bugs or requesting features
* Enter an issue on the github repo.
@@ -34,6 +36,5 @@ To use it:
* Add support for profiles
* Authenticate once for 8 hours and rerun `awslogin` to relogin
* Write tests
- * (Nate) index.py
* roles.py
* assume_role.py
diff --git a/VERSION b/VERSION
index 69010fa..6f16bd3 100644
--- a/VERSION
+++ b/VERSION
@@ -1,1 +1,1 @@
-0.9.14
\ No newline at end of file
+0.9.15
\ No newline at end of file
diff --git a/byu_awslogin/index.py b/byu_awslogin/index.py
index 6c7aff5..a9a3f44 100755
--- a/byu_awslogin/index.py
+++ b/byu_awslogin/index.py
@@ -3,16 +3,16 @@
import os
import fire
import getpass
-import subprocess
import configparser
from os.path import expanduser
from .adfs_auth import authenticate
from .assume_role import ask_which_role_to_assume, assume_role
from .roles import action_url_on_validation_success, retrieve_roles_page
-def cli(account=None, role=None):
+
+def cli(account=None, role=None, profile='default'):
# Get the federated credentials from the user
- cached_netid = load_last_netid()
+ cached_netid = load_last_netid(aws_file('config'), profile)
if cached_netid:
net_id_prompt = 'BYU Net ID [{}]: '.format(cached_netid)
else:
@@ -46,7 +46,6 @@ def cli(account=None, role=None):
####
# Ask user which role to assume
####
- #print(principal_roles)
account_name, role_name, chosen_role = ask_which_role_to_assume(account_names, principal_roles, account, role)
####
@@ -54,11 +53,11 @@ def cli(account=None, role=None):
####
aws_session_token = assume_role(*chosen_role, assertion)
- write_to_cred_file(aws_session_token)
- write_to_config_file(net_id, 'us-west-2')
+ check_for_aws_dir()
+ write_to_cred_file(aws_file('creds'), aws_session_token, profile)
+ write_to_config_file(aws_file('config'), net_id, 'us-west-2', profile)
print("Now logged into {}@{}".format(role_name, account_name))
- #proc = subprocess.Popen(args, env=os.environ)
# Overwrite and delete the credential variables, just for safety
username = '##############################################'
@@ -66,52 +65,53 @@ def cli(account=None, role=None):
del username
del password
+
def main():
fire.Fire(cli)
+def aws_file(file_type):
+ if file_type == 'creds':
+ return "{}/.aws/credentials".format(expanduser('~'))
+ else:
+ return "{}/.aws/config".format(expanduser('~'))
+
+
def open_config_file(file):
config = configparser.ConfigParser()
config.read(file)
return config
-def write_to_cred_file(aws_session_token):
- check_for_aws_dir()
- file = "{}/.aws/credentials".format(expanduser('~'))
+def check_for_aws_dir(directory="{}/.aws".format(expanduser('~'))):
+ if not os.path.exists(directory):
+ os.makedirs(directory)
+
+
+def write_to_cred_file(file, aws_session_token, profile):
config = open_config_file(file)
- config['default'] = {'aws_access_key_id': aws_session_token['Credentials']['AccessKeyId'],
- 'aws_secret_access_key': aws_session_token['Credentials']['SecretAccessKey'],
- 'aws_session_token': aws_session_token['Credentials']['SessionToken']
- }
+ config[profile] = {'aws_access_key_id': aws_session_token['Credentials']['AccessKeyId'],
+ 'aws_secret_access_key': aws_session_token['Credentials']['SecretAccessKey'],
+ 'aws_session_token': aws_session_token['Credentials']['SessionToken']
+ }
with open(file, 'w') as configfile:
config.write(configfile)
-def check_for_aws_dir():
- directory = "{}/.aws".format(expanduser('~'))
- if not os.path.exists(directory):
- os.makedirs(directory)
- file = "{}/config".format(directory)
-
-
-def write_to_config_file(net_id, region):
- check_for_aws_dir()
- file = "{}/.aws/config".format(expanduser('~'))
+def write_to_config_file(file, net_id, region, profile):
config = open_config_file(file)
- config['default'] = {'region': region, 'adfs_netid': net_id}
+ config[profile] = {'region': region, 'adfs_netid': net_id}
with open(file, 'w') as configfile:
config.write(configfile)
-def load_last_netid():
- file = "{}/.aws/config".format(expanduser('~'))
+def load_last_netid(file, profile):
config = open_config_file(file)
- if config.has_section('default') and config.has_option('default', 'adfs_netid'):
- return config['default']['adfs_netid']
+ if config.has_section(profile) and config.has_option(profile, 'adfs_netid'):
+ return config[profile]['adfs_netid']
else:
return ''
if __name__ == '__main__':
- main()
\ No newline at end of file
+ main()
| Support for profiles as cli option
I don't use the default profile at all. This requires me to know in which environment I'm operating, as well as prevent accidental/malicious script execution from executing in a default account, whatever it may be.
CLI option could be --profile <profilename> or just a profile name, e.g.
`awslogin --profile dev`
or
`awslogin dev`
| byu-oit/awslogin | diff --git a/test/byu_awslogin/test_index.py b/test/byu_awslogin/test_index.py
index d73a854..df88344 100644
--- a/test/byu_awslogin/test_index.py
+++ b/test/byu_awslogin/test_index.py
@@ -1,4 +1,79 @@
+import pytest
+import os.path
+import configparser
from byu_awslogin import index
+
[email protected]
+def aws_config_file(tmpdir):
+ return tmpdir.dirpath(".aws/config")
+
+
[email protected]
+def aws_cred_file(tmpdir):
+ return tmpdir.dirpath(".aws/credentials")
+
+
[email protected]
+def read_config_file(file):
+ config = configparser.ConfigParser()
+ config.read(file)
+ return config
+
+
[email protected]
+def profile():
+ return 'default'
+
[email protected]
+def fake_config_file(tmpdir):
+ dir = tmpdir.dirpath(".aws/")
+ file = dir + 'config'
+ if not os.path.exists(dir):
+ os.makedirs(dir)
+ config = configparser.ConfigParser()
+ config['default'] = {'region': 'fake-region-1', 'adfs_netid': 'fake_netid'}
+ with open(file, 'w') as write_file:
+ config.write(write_file)
+ return file
+
+
+def test_open_config_file(aws_config_file):
+ assert index.open_config_file(aws_config_file)
+
+
+def test_aws_file():
+ assert index.aws_file('config')
+
+
+def test_check_for_aws_dir(tmpdir):
+ dir = tmpdir.dirpath(".aws")
+ index.check_for_aws_dir(dir)
+ assert os.path.exists(dir)
+
+
+def test_write_to_cred_file(aws_cred_file, profile):
+ aws_session_token = {'Credentials': {'AccessKeyId': 'keyid', 'SecretAccessKey': 'secretkey', 'SessionToken': 'sessiontoken'}}
+ index.write_to_cred_file(aws_cred_file, aws_session_token, profile)
+ config = read_config_file(aws_cred_file)
+ assert config['default'] == {'aws_access_key_id': 'keyid',
+ 'aws_secret_access_key': 'secretkey',
+ 'aws_session_token': 'sessiontoken'
+ }
+
+
+def test_write_to_config_file(aws_config_file, profile):
+ net_id = 'fake_netid'
+ region = 'fakeRegion'
+ index.write_to_config_file(aws_config_file, net_id, region, profile)
+ config = read_config_file(aws_config_file)
+ assert config['default'] == {'region': 'fakeRegion', 'adfs_netid': 'fake_netid'}
+
+
+def test_load_last_netid(fake_config_file, profile):
+ assert index.load_last_netid(fake_config_file, profile) == 'fake_netid'
+
+
[email protected](reason="not sure how to test this yet")
def test_cli():
- assert True == True
+ pass
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 3
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y pandoc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | awscli==1.11.96
beautifulsoup4==4.6.0
boto3==1.4.4
botocore==1.5.59
-e git+https://github.com/byu-oit/awslogin.git@1a3ae19323a9c8a507ab0e8589a444bd827917e5#egg=byu_awslogin
certifi==2017.4.17
chardet==3.0.3
colorama==0.3.7
docutils==0.13.1
exceptiongroup==1.2.2
fire==0.7.0
idna==2.5
iniconfig==2.1.0
jmespath==0.9.3
lxml==5.3.1
nh3==0.2.21
packaging==24.2
pkginfo==1.12.1.2
pluggy==1.5.0
pyasn1==0.6.1
Pygments==2.19.1
pytest==8.3.5
python-dateutil==2.6.0
PyYAML==3.12
readme_renderer==43.0
requests==2.17.3
requests-toolbelt==1.0.0
rsa==3.4.2
s3transfer==0.1.10
six==1.10.0
termcolor==3.0.0
tomli==2.2.1
tqdm==4.67.1
twine==1.15.0
urllib3==1.21.1
| name: awslogin
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- awscli==1.11.96
- beautifulsoup4==4.6.0
- boto3==1.4.4
- botocore==1.5.59
- certifi==2017.4.17
- chardet==3.0.3
- colorama==0.3.7
- docutils==0.13.1
- exceptiongroup==1.2.2
- fire==0.7.0
- idna==2.5
- iniconfig==2.1.0
- jmespath==0.9.3
- lxml==5.3.1
- nh3==0.2.21
- packaging==24.2
- pkginfo==1.12.1.2
- pluggy==1.5.0
- pyasn1==0.6.1
- pygments==2.19.1
- pytest==8.3.5
- python-dateutil==2.6.0
- pyyaml==3.12
- readme-renderer==43.0
- requests==2.17.3
- requests-toolbelt==1.0.0
- rsa==3.4.2
- s3transfer==0.1.10
- six==1.10.0
- termcolor==3.0.0
- tomli==2.2.1
- tqdm==4.67.1
- twine==1.15.0
- urllib3==1.21.1
prefix: /opt/conda/envs/awslogin
| [
"test/byu_awslogin/test_index.py::test_aws_file",
"test/byu_awslogin/test_index.py::test_check_for_aws_dir",
"test/byu_awslogin/test_index.py::test_load_last_netid"
]
| [
"test/byu_awslogin/test_index.py::test_write_to_cred_file",
"test/byu_awslogin/test_index.py::test_write_to_config_file"
]
| [
"test/byu_awslogin/test_index.py::test_open_config_file"
]
| []
| Apache License 2.0 | 1,445 | [
"VERSION",
"byu_awslogin/index.py",
"README.md"
]
| [
"VERSION",
"byu_awslogin/index.py",
"README.md"
]
|
|
peterbe__hashin-42 | 150daa03f4ec17544d93aa9e66a06d1adc45bf26 | 2017-07-10 18:41:09 | 150daa03f4ec17544d93aa9e66a06d1adc45bf26 | diff --git a/README.rst b/README.rst
index 965e63c..984d968 100644
--- a/README.rst
+++ b/README.rst
@@ -185,6 +185,11 @@ put it directly into ``pip``.
Version History
===============
+0.11.0
+ * Cope with leading zeros in version numbers when figuring out what
+ the latest version is.
+ See https://github.com/peterbe/hashin/issues/39
+
0.10.0
* Latest version is now figured out by looking at all version numbers
in the list of releases from the JSON payload. The pre releases are
diff --git a/hashin.py b/hashin.py
index c6c778e..6d05a41 100755
--- a/hashin.py
+++ b/hashin.py
@@ -186,10 +186,10 @@ def get_latest_version(data):
for version in data['releases']:
v = parse(version)
if not v.is_prerelease:
- all_versions.append(v)
+ all_versions.append((v, version))
all_versions.sort(reverse=True)
# return the highest non-pre-release version
- return str(all_versions[0])
+ return str(all_versions[0][1])
def expand_python_version(version):
diff --git a/setup.py b/setup.py
index c90df58..d8ed11b 100644
--- a/setup.py
+++ b/setup.py
@@ -13,7 +13,7 @@ except ImportError:
setup(
name='hashin',
- version='0.10.0',
+ version='0.11.0',
description='Edits your requirements.txt by hashing them in',
long_description=open(path.join(_here, 'README.rst')).read(),
author='Peter Bengtsson',
| hashin cannot handle leading zeros in package version numbers
Pip can install packages with a missing leading zero in the version number e.g. `Unidecode==0.04.20`:
I was passing the output of `pip freeze` into `hashin` and encountered this problem.
```
/tmp > bin/pip install Unidecode==0.4.20 # or Unidecode==0.04.20
Collecting Unidecode==0.4.20
Using cached Unidecode-0.04.20-py2.py3-none-any.whl
Installing collected packages: Unidecode
Successfully installed Unidecode-0.4.20
/tmp > bin/pip freeze
Unidecode==0.4.20
```
But hashin cannot handle this.
```
/tmp > bin/hashin Unidecode==0.4.20
Traceback (most recent call last):
File "bin/hashin", line 11, in <module>
sys.exit(main())
File "/tmp/local/lib/python2.7/site-packages/hashin.py", line 400, in main
verbose=args.verbose,
File "/tmp/local/lib/python2.7/site-packages/hashin.py", line 96, in run
run_single_package(spec, *args, **kwargs)
File "/tmp/local/lib/python2.7/site-packages/hashin.py", line 119, in run_single_package
algorithm=algorithm
File "/tmp/local/lib/python2.7/site-packages/hashin.py", line 363, in get_package_hashes
raise PackageError('No data found for version {0}'.format(version))
hashin.PackageError: No data found for version 0.4.20
```
| peterbe/hashin | diff --git a/tests/test_cli.py b/tests/test_cli.py
index 831697d..a9e2757 100644
--- a/tests/test_cli.py
+++ b/tests/test_cli.py
@@ -91,6 +91,20 @@ class Tests(TestCase):
})
self.assertEqual(version, '0.999')
+ @mock.patch('hashin.urlopen')
+ def test_get_latest_version_non_pre_release_leading_zeros(self, murlopen):
+ version = hashin.get_latest_version({
+ 'info': {
+ 'version': '0.3',
+ },
+ 'releases': {
+ '0.04.13': {},
+ '0.04.21': {},
+ '0.04.09': {},
+ }
+ })
+ self.assertEqual(version, '0.04.21')
+
@mock.patch('hashin.urlopen')
def test_get_hashes_error(self, murlopen):
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 3
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"nose",
"mock",
"pytest"
],
"pre_install": [],
"python": "3.5",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
-e git+https://github.com/peterbe/hashin.git@150daa03f4ec17544d93aa9e66a06d1adc45bf26#egg=hashin
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
mock==5.2.0
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
nose==1.3.7
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: hashin
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- mock==5.2.0
- nose==1.3.7
prefix: /opt/conda/envs/hashin
| [
"tests/test_cli.py::Tests::test_get_latest_version_non_pre_release_leading_zeros"
]
| [
"tests/test_cli.py::Tests::test_as_library",
"tests/test_cli.py::Tests::test_run",
"tests/test_cli.py::Tests::test_run_case_insensitive",
"tests/test_cli.py::Tests::test_run_contained_names"
]
| [
"tests/test_cli.py::Tests::test_amend_requirements_content_new",
"tests/test_cli.py::Tests::test_amend_requirements_content_new_similar_name",
"tests/test_cli.py::Tests::test_amend_requirements_content_replacement",
"tests/test_cli.py::Tests::test_amend_requirements_content_replacement_2",
"tests/test_cli.py::Tests::test_amend_requirements_content_replacement_amonst_others",
"tests/test_cli.py::Tests::test_amend_requirements_content_replacement_amonst_others_2",
"tests/test_cli.py::Tests::test_amend_requirements_content_replacement_single_to_multi",
"tests/test_cli.py::Tests::test_expand_python_version",
"tests/test_cli.py::Tests::test_filter_releases",
"tests/test_cli.py::Tests::test_get_hashes_error",
"tests/test_cli.py::Tests::test_get_latest_version_non_pre_release",
"tests/test_cli.py::Tests::test_get_latest_version_simple",
"tests/test_cli.py::Tests::test_release_url_metadata_python"
]
| []
| MIT License | 1,446 | [
"README.rst",
"hashin.py",
"setup.py"
]
| [
"README.rst",
"hashin.py",
"setup.py"
]
|
|
peterbe__hashin-43 | 469e16c992dd8952871254a64355335a72afe35c | 2017-07-10 18:59:48 | 469e16c992dd8952871254a64355335a72afe35c | diff --git a/README.rst b/README.rst
index 984d968..15310ff 100644
--- a/README.rst
+++ b/README.rst
@@ -185,6 +185,11 @@ put it directly into ``pip``.
Version History
===============
+0.11.1
+ * Ability to run ``hashin --version`` to see what version of hashin is
+ installed.
+ See https://github.com/peterbe/hashin/issues/41
+
0.11.0
* Cope with leading zeros in version numbers when figuring out what
the latest version is.
diff --git a/hashin.py b/hashin.py
index 6d05a41..3c9159c 100755
--- a/hashin.py
+++ b/hashin.py
@@ -64,6 +64,13 @@ parser.add_argument(
action='append',
default=[],
)
+parser.add_argument(
+ '--version',
+ help='Version of hashin',
+ action='store_true',
+ default=False,
+)
+
major_pip_version = int(pip.__version__.split('.')[0])
if major_pip_version < 8:
@@ -408,6 +415,13 @@ def get_package_hashes(
def main():
+ if '--version' in sys.argv[1:]:
+ # Can't be part of argparse because the 'packages' is mandatory
+ # print out the version of self
+ import pkg_resources
+ print(pkg_resources.get_distribution('hashin').version)
+ return 0
+
args = parser.parse_args()
return run(
| hashin --version should say what version hashin is
Title says it all. | peterbe/hashin | diff --git a/tests/test_arg_parse.py b/tests/test_arg_parse.py
index e570635..cfdacdc 100644
--- a/tests/test_arg_parse.py
+++ b/tests/test_arg_parse.py
@@ -19,6 +19,7 @@ def test_everything():
python_version=['3.5'],
requirements_file='reqs.txt',
verbose=True,
+ version=False,
)
eq_(args, (expected, []))
@@ -37,6 +38,7 @@ def test_everything_long():
python_version=['3.5'],
requirements_file='reqs.txt',
verbose=True,
+ version=False,
)
eq_(args, (expected, []))
@@ -49,5 +51,6 @@ def test_minimal():
python_version=[],
requirements_file='requirements.txt',
verbose=False,
+ version=False,
)
eq_(args, (expected, []))
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 2
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"nose",
"mock",
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
-e git+https://github.com/peterbe/hashin.git@469e16c992dd8952871254a64355335a72afe35c#egg=hashin
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
mock==5.2.0
nose==1.3.7
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
pytest @ file:///croot/pytest_1738938843180/work
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
| name: hashin
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- mock==5.2.0
- nose==1.3.7
prefix: /opt/conda/envs/hashin
| [
"tests/test_arg_parse.py::test_everything",
"tests/test_arg_parse.py::test_everything_long",
"tests/test_arg_parse.py::test_minimal"
]
| []
| []
| []
| MIT License | 1,447 | [
"README.rst",
"hashin.py"
]
| [
"README.rst",
"hashin.py"
]
|
|
google__mobly-246 | 6760e850a2a5ec0d7ebafac2e3b0de3d261ff803 | 2017-07-11 06:52:34 | 9bb2ab41518a2f037178888f9e606fc42394ffb0 | dthkao:
Review status: 0 of 7 files reviewed at latest revision, 4 unresolved discussions.
---
*[mobly/base_test.py, line 90 at r1](https://reviewable.io:443/reviews/google/mobly/246#-KolBHB_6qgNLwbiX5DP:-KolBHB_6qgNLwbiX5DQ:b-3ck3ax) ([raw file](https://github.com/google/mobly/blob/2a5db1715f86f0c48c840f38fc594733fba709d1/mobly/base_test.py#L90)):*
> ```Python
> self.register_controller = configs.register_controller
> self.results = records.TestResult()
> self.summary_writer = configs.summary_writer
> ```
why not provide the option for someone to provide an alternate summary writer (e.g., xml)?
---
*[mobly/logger.py, line 172 at r1](https://reviewable.io:443/reviews/google/mobly/246#-KolAdOnosuYsrZtk8QR:-KolAdOnosuYsrZtk8QS:b43g1d3) ([raw file](https://github.com/google/mobly/blob/2a5db1715f86f0c48c840f38fc594733fba709d1/mobly/logger.py#L172)):*
> ```Python
> log.addHandler(fh)
> # Write logger output to files
> fh_info = logging.FileHandler(os.path.join(log_path, 'test_log.INFO'))
> ```
filenames to module constants
---
*[mobly/test_runner.py, line 302 at r1](https://reviewable.io:443/reviews/google/mobly/246#-Kol979fZAxltFe1-WH_:-Kol979fZAxltFe1-WHa:b-tmmajb) ([raw file](https://github.com/google/mobly/blob/2a5db1715f86f0c48c840f38fc594733fba709d1/mobly/test_runner.py#L302)):*
> ```Python
> log_path = os.path.join(self._log_dir, self._test_bed_name, start_time)
> summary_writer = records.TestSummaryWriter(
> os.path.join(log_path, 'test_summary.yml'))
> ```
official extension is '.yaml'. Also move string to constant?
---
*[mobly/test_runner.py, line 303 at r1](https://reviewable.io:443/reviews/google/mobly/246#-KolB5WY-9olsjILQnli:-KolB5WY-9olsjILQnlj:bnq3578) ([raw file](https://github.com/google/mobly/blob/2a5db1715f86f0c48c840f38fc594733fba709d1/mobly/test_runner.py#L303)):*
> ```Python
> summary_writer = records.TestSummaryWriter(
> os.path.join(log_path, 'test_summary.yml'))
> print(log_path)
> ```
debugging? why print?
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/246)*
<!-- Sent from Reviewable.io -->
xpconanfan:
Review status: 0 of 7 files reviewed at latest revision, 4 unresolved discussions.
---
*[mobly/base_test.py, line 90 at r1](https://reviewable.io:443/reviews/google/mobly/246#-KolBHB_6qgNLwbiX5DP:-KomaeyRNY3h__TUDDIn:b-wwysz7) ([raw file](https://github.com/google/mobly/blob/2a5db1715f86f0c48c840f38fc594733fba709d1/mobly/base_test.py#L90)):*
<details><summary><i>Previously, dthkao (David T.H. Kao) wrote…</i></summary><blockquote>
why not provide the option for someone to provide an alternate summary writer (e.g., xml)?
</blockquote></details>
That's out of the scope of this change, which focuses on output file changes.
---
*[mobly/logger.py, line 172 at r1](https://reviewable.io:443/reviews/google/mobly/246#-KolAdOnosuYsrZtk8QR:-KomcdAdKTVWxLwdeRa7:b-896fix) ([raw file](https://github.com/google/mobly/blob/2a5db1715f86f0c48c840f38fc594733fba709d1/mobly/logger.py#L172)):*
<details><summary><i>Previously, dthkao (David T.H. Kao) wrote…</i></summary><blockquote>
filenames to module constants
</blockquote></details>
Done.
---
*[mobly/test_runner.py, line 302 at r1](https://reviewable.io:443/reviews/google/mobly/246#-Kol979fZAxltFe1-WH_:-KomccE_WdEKHasrR22p:b-896fix) ([raw file](https://github.com/google/mobly/blob/2a5db1715f86f0c48c840f38fc594733fba709d1/mobly/test_runner.py#L302)):*
<details><summary><i>Previously, dthkao (David T.H. Kao) wrote…</i></summary><blockquote>
official extension is '.yaml'. Also move string to constant?
</blockquote></details>
Done.
---
*[mobly/test_runner.py, line 303 at r1](https://reviewable.io:443/reviews/google/mobly/246#-KolB5WY-9olsjILQnli:-Komb7m4w4kWy2Cxuhq5:b-896fix) ([raw file](https://github.com/google/mobly/blob/2a5db1715f86f0c48c840f38fc594733fba709d1/mobly/test_runner.py#L303)):*
<details><summary><i>Previously, dthkao (David T.H. Kao) wrote…</i></summary><blockquote>
debugging? why print?
</blockquote></details>
Done.
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/246)*
<!-- Sent from Reviewable.io -->
dthkao:
Review status: 0 of 7 files reviewed at latest revision, 4 unresolved discussions.
---
*[mobly/base_test.py, line 90 at r1](https://reviewable.io:443/reviews/google/mobly/246#-KolBHB_6qgNLwbiX5DP:-Kos4vjDDTDnA_P0TY78:b-tatxse) ([raw file](https://github.com/google/mobly/blob/2a5db1715f86f0c48c840f38fc594733fba709d1/mobly/base_test.py#L90)):*
<details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote>
That's out of the scope of this change, which focuses on output file changes.
</blockquote></details>
Is tat something we should track in a separate issue for followup?
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/246)*
<!-- Sent from Reviewable.io -->
xpconanfan:
Review status: 0 of 7 files reviewed at latest revision, 4 unresolved discussions.
---
*[mobly/base_test.py, line 90 at r1](https://reviewable.io:443/reviews/google/mobly/246#-KolBHB_6qgNLwbiX5DP:-Kos5x8wgLOKHkRA4289:b-grw326) ([raw file](https://github.com/google/mobly/blob/2a5db1715f86f0c48c840f38fc594733fba709d1/mobly/base_test.py#L90)):*
<details><summary><i>Previously, dthkao (David T.H. Kao) wrote…</i></summary><blockquote>
Is tat something we should track in a separate issue for followup?
</blockquote></details>
I do not see a use case atm. Feel free to file a request if you have a use case.
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/246)*
<!-- Sent from Reviewable.io -->
dthkao:
Review status: 0 of 7 files reviewed at latest revision, 4 unresolved discussions.
---
*[mobly/base_test.py, line 90 at r1](https://reviewable.io:443/reviews/google/mobly/246#-KolBHB_6qgNLwbiX5DP:-KosCErPuUc2M0VjbX4v:bud97ry) ([raw file](https://github.com/google/mobly/blob/2a5db1715f86f0c48c840f38fc594733fba709d1/mobly/base_test.py#L90)):*
<details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote>
I do not see a use case atm. Feel free to file a request if you have a use case.
</blockquote></details>
#259 filed.
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/246)*
<!-- Sent from Reviewable.io -->
k2fong:
Review status: 0 of 7 files reviewed at latest revision, 9 unresolved discussions.
---
*[mobly/base_test.py, line 90 at r2](https://reviewable.io:443/reviews/google/mobly/246#-KpqJ7C8fQWqsl9MIICt:-KpqJ5xoR-hXMaWo-8Gl:b-igzdwz) ([raw file](https://github.com/google/mobly/blob/6502b3ca6437dfc940de5dac8c31647cad271685/mobly/base_test.py#L90)):*
> self.summary_writer = configs.summary_writer
Since, there is a data object test_config to store the above. Will it make sense for BaseTestClass to store a single test_config reference instead having its own duplicate set of config entries (log_path, summary_writer, ...). Do so should reduce the need for the changes like summary_writer here.
---
*[mobly/logger.py, line 166 at r2](https://reviewable.io:443/reviews/google/mobly/246#-KpqJRO6K1z0lepTuRlf:-KpqJRO7iDVy9YCrxfyY:b-hedunp) ([raw file](https://github.com/google/mobly/blob/6502b3ca6437dfc940de5dac8c31647cad271685/mobly/logger.py#L166)):*
> ```Python
> filename = get_log_file_timestamp()
> utils.create_dir(log_path)
> # TODO(angli): Deprecate `test_run_details.txt` when we remove old output
> ```
TODO(xpconanfan)? else where as well
---
*[mobly/records.py, line 36 at r2](https://reviewable.io:443/reviews/google/mobly/246#-KpqNQUQ01G6rAxA3O2J:-KpqNQUQ01G6rAxA3O2K:b-247gvw) ([raw file](https://github.com/google/mobly/blob/6502b3ca6437dfc940de5dac8c31647cad271685/mobly/records.py#L36)):*
> Enums
'Enum' to 'Constants'? But is there some reason why not make TestSummaryEntryType an enum instead?
---
*[mobly/records.py, line 88 at r2](https://reviewable.io:443/reviews/google/mobly/246#-KpqOY-Xmtwp8s4NfqVN:-KpqOY-Xmtwp8s4NfqVO:b3326yt) ([raw file](https://github.com/google/mobly/blob/6502b3ca6437dfc940de5dac8c31647cad271685/mobly/records.py#L88)):*
> ```Python
> """
> new_content = copy.deepcopy(content)
> new_content['Type'] = entry_type
> ```
verify entry_type is a member of TestSummaryEntryType. But I suggest making TestSummaryEntryType as an enum so this kind of validation can be omitted.
---
*[mobly/test_runner.py, line 446 at r2](https://reviewable.io:443/reviews/google/mobly/246#-KpqR-M0YcLjp6_SpPD6:-KpqR-M0YcLjp6_SpPD7:bmi5x2f) ([raw file](https://github.com/google/mobly/blob/6502b3ca6437dfc940de5dac8c31647cad271685/mobly/test_runner.py#L446)):*
> ```Python
> """Writes out a json file with the test result info for easy parsing.
>
> # TODO(angli): Deprecate with old output format.
> ```
Seems like there is some inconsistency with the TODO() comment inside a docstring (with '#' vs without '#')
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/246)*
<!-- Sent from Reviewable.io -->
xpconanfan:
Review status: 0 of 7 files reviewed at latest revision, 9 unresolved discussions.
---
*[mobly/logger.py, line 166 at r2](https://reviewable.io:443/reviews/google/mobly/246#-KpqJRO6K1z0lepTuRlf:-KpqXqiBPjLL4G_vISDz:b-9kd1do) ([raw file](https://github.com/google/mobly/blob/6502b3ca6437dfc940de5dac8c31647cad271685/mobly/logger.py#L166)):*
<details><summary><i>Previously, k2fong wrote…</i></summary><blockquote>
TODO(xpconanfan)? else where as well
</blockquote></details>
Done.
Tracking with #270
---
*[mobly/records.py, line 36 at r2](https://reviewable.io:443/reviews/google/mobly/246#-KpqNQUQ01G6rAxA3O2J:-KpqY9PZPtCHRSznjEVb:b-896fix) ([raw file](https://github.com/google/mobly/blob/6502b3ca6437dfc940de5dac8c31647cad271685/mobly/records.py#L36)):*
<details><summary><i>Previously, k2fong wrote…</i></summary><blockquote>
> Enums
'Enum' to 'Constants'? But is there some reason why not make TestSummaryEntryType an enum instead?
</blockquote></details>
Done.
---
*[mobly/records.py, line 88 at r2](https://reviewable.io:443/reviews/google/mobly/246#-KpqOY-Xmtwp8s4NfqVN:-Kpqdk5r3N90oVeoNLVt:b-896fix) ([raw file](https://github.com/google/mobly/blob/6502b3ca6437dfc940de5dac8c31647cad271685/mobly/records.py#L88)):*
<details><summary><i>Previously, k2fong wrote…</i></summary><blockquote>
verify entry_type is a member of TestSummaryEntryType. But I suggest making TestSummaryEntryType as an enum so this kind of validation can be omitted.
</blockquote></details>
Done.
---
*[mobly/test_runner.py, line 446 at r2](https://reviewable.io:443/reviews/google/mobly/246#-KpqR-M0YcLjp6_SpPD6:-Kpqdw63XaQIRCDhC3U7:b-896fix) ([raw file](https://github.com/google/mobly/blob/6502b3ca6437dfc940de5dac8c31647cad271685/mobly/test_runner.py#L446)):*
<details><summary><i>Previously, k2fong wrote…</i></summary><blockquote>
Seems like there is some inconsistency with the TODO() comment inside a docstring (with '#' vs without '#')
</blockquote></details>
Done.
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/246)*
<!-- Sent from Reviewable.io -->
k2fong:
Review status: 0 of 8 files reviewed at latest revision, 7 unresolved discussions, some commit checks failed.
---
*[mobly/records.py, line 88 at r2](https://reviewable.io:443/reviews/google/mobly/246#-KpqOY-Xmtwp8s4NfqVN:-KpucYmWwY5JOEaZa1EW:b-wfwycm) ([raw file](https://github.com/google/mobly/blob/6502b3ca6437dfc940de5dac8c31647cad271685/mobly/records.py#L88)):*
<details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote>
Done.
</blockquote></details>
I don't believe entry_type of type string will yield what you are looking for with isinstance against TestSummaryEntryType. Can dump() take in the new enum type instead? Actually, looks like you are already using it as an enum, so maybe just need to update the Args comment section above.
---
*[mobly/records.py, line 94 at r3](https://reviewable.io:443/reviews/google/mobly/246#-KpudrOlxxutDjla1vtQ:-KpudrOlxxutDjla1vtR:bvim0xj) ([raw file](https://github.com/google/mobly/blob/2a7b849d78d34ba48dd1578579110af0ff33b589/mobly/records.py#L94)):*
> if not isinstance(entry_type, TestSummaryEntryType):
> raise Error('%s is not a valid entry type, see records.'
> 'TestSummaryEntryType.' % entry_type)
I would suggest not having to check against the pass in type. Not typical in Python from my experience.
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/246)*
<!-- Sent from Reviewable.io -->
xpconanfan:
Review status: 0 of 8 files reviewed at latest revision, 7 unresolved discussions, some commit checks failed.
---
*[mobly/records.py, line 88 at r2](https://reviewable.io:443/reviews/google/mobly/246#-KpqOY-Xmtwp8s4NfqVN:-KpuuHNDEs4vuOsKs5rt:b-896fix) ([raw file](https://github.com/google/mobly/blob/6502b3ca6437dfc940de5dac8c31647cad271685/mobly/records.py#L88)):*
<details><summary><i>Previously, k2fong wrote…</i></summary><blockquote>
I don't believe entry_type of type string will yield what you are looking for with isinstance against TestSummaryEntryType. Can dump() take in the new enum type instead? Actually, looks like you are already using it as an enum, so maybe just need to update the Args comment section above.
</blockquote></details>
Done.
---
*[mobly/records.py, line 94 at r3](https://reviewable.io:443/reviews/google/mobly/246#-KpudrOlxxutDjla1vtQ:-KpuuGv2qH_8WtEkdVEV:b-896fix) ([raw file](https://github.com/google/mobly/blob/2a7b849d78d34ba48dd1578579110af0ff33b589/mobly/records.py#L94)):*
<details><summary><i>Previously, k2fong wrote…</i></summary><blockquote>
> if not isinstance(entry_type, TestSummaryEntryType):
> raise Error('%s is not a valid entry type, see records.'
> 'TestSummaryEntryType.' % entry_type)
I would suggest not having to check against the pass in type. Not typical in Python from my experience.
</blockquote></details>
Done.
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/246)*
<!-- Sent from Reviewable.io -->
xpconanfan:
Review status: 0 of 8 files reviewed at latest revision, 7 unresolved discussions, some commit checks failed.
---
*[mobly/base_test.py, line 90 at r2](https://reviewable.io:443/reviews/google/mobly/246#-KpqJ7C8fQWqsl9MIICt:-KpuwkUm9_3K_UZKATEJ:b-8bmi7g) ([raw file](https://github.com/google/mobly/blob/6502b3ca6437dfc940de5dac8c31647cad271685/mobly/base_test.py#L90)):*
<details><summary><i>Previously, k2fong wrote…</i></summary><blockquote>
> self.summary_writer = configs.summary_writer
Since, there is a data object test_config to store the above. Will it make sense for BaseTestClass to store a single test_config reference instead having its own duplicate set of config entries (log_path, summary_writer, ...). Do so should reduce the need for the changes like summary_writer here.
</blockquote></details>
We actually intentionally set these attributes to `BaseTestClass` itself so the tests can use them directly via `self.xxx`
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/246)*
<!-- Sent from Reviewable.io -->
kdart:
Review status: 0 of 8 files reviewed at latest revision, 8 unresolved discussions, some commit checks failed.
---
*[mobly/logger.py, line 183 at r3](https://reviewable.io:443/reviews/google/mobly/246#-KpuxQZ7pl-EiB1TJPII:-KpuxQZ7pl-EiB1TJPIJ:b-b9j9jd) ([raw file](https://github.com/google/mobly/blob/2a7b849d78d34ba48dd1578579110af0ff33b589/mobly/logger.py#L183)):*
> ```Python
> log.addHandler(ch)
> log.addHandler(fh_info)
> log.addHandler(fh_debug)
> ```
Could this splitting be made user configurable? I'm not sure everyone would want to split INFO and DEBUG this way. It makes some events harder to correlate. If they are combined into one stream then it's a little faster, and still easily filtered with reporting tools (a simple grep could be sufficient).
---
*[mobly/records.py, line 94 at r3](https://reviewable.io:443/reviews/google/mobly/246#-KpudrOlxxutDjla1vtQ:-Kpuvmv9pS7D_gqvu6kJ:b5vf8xr) ([raw file](https://github.com/google/mobly/blob/2a7b849d78d34ba48dd1578579110af0ff33b589/mobly/records.py#L94)):*
<details><summary><i>Previously, k2fong wrote…</i></summary><blockquote>
> if not isinstance(entry_type, TestSummaryEntryType):
> raise Error('%s is not a valid entry type, see records.'
> 'TestSummaryEntryType.' % entry_type)
I would suggest not having to check against the pass in type. Not typical in Python from my experience.
</blockquote></details>
isinstance checks are actually pretty common. Over 1400 library modules in my system contain it.
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/246)*
<!-- Sent from Reviewable.io -->
xpconanfan:
Review status: 0 of 8 files reviewed at latest revision, 8 unresolved discussions, some commit checks failed.
---
*[mobly/logger.py, line 183 at r3](https://reviewable.io:443/reviews/google/mobly/246#-KpuxQZ7pl-EiB1TJPII:-Kpuyq_zjebb3WgmDa6N:b-kzwhfq) ([raw file](https://github.com/google/mobly/blob/2a7b849d78d34ba48dd1578579110af0ff33b589/mobly/logger.py#L183)):*
<details><summary><i>Previously, kdart (Keith Dart) wrote…</i></summary><blockquote>
Could this splitting be made user configurable? I'm not sure everyone would want to split INFO and DEBUG this way. It makes some events harder to correlate. If they are combined into one stream then it's a little faster, and still easily filtered with reporting tools (a simple grep could be sufficient).
</blockquote></details>
The current one dumps everything together.
What we've found is that it's very difficult to read when debugging an app, considering every single adb call is logged.
So the DEBUG one has all the lines, whereas the INFO one would only have the ones displayed in console (console outputs INFO level).
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/246)*
<!-- Sent from Reviewable.io -->
kdart: After talking to Ang, I see that in this context it's a reasonable short-term fix.
---
Review status: 0 of 8 files reviewed at latest revision, 6 unresolved discussions, some commit checks failed.
---
*[mobly/logger.py, line 183 at r3](https://reviewable.io:443/reviews/google/mobly/246#-KpuxQZ7pl-EiB1TJPII:-Kpv_5M7wjjKXJRVrQiw:b6dk7we) ([raw file](https://github.com/google/mobly/blob/2a7b849d78d34ba48dd1578579110af0ff33b589/mobly/logger.py#L183)):*
<details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote>
The current one dumps everything together.
What we've found is that it's very difficult to read when debugging an app, considering every single adb call is logged.
So the DEBUG one has all the lines, whereas the INFO one would only have the ones displayed in console (console outputs INFO level).
</blockquote></details>
OK, now I see that our debug level log also has stderr and other cruft from Android.
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/246#-:-KpvaQ_lvp_bs_HxQqsH:b-4n6nrl)*
<!-- Sent from Reviewable.io -->
xpconanfan: There is no Ang, there is only xpconanfan :P
---
Review status: 0 of 8 files reviewed at latest revision, 5 unresolved discussions, some commit checks failed.
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/246#-:-Kpvagv4qLXfIPkCXWlP:b-jgmvdo)*
<!-- Sent from Reviewable.io -->
kdart: <img class="emoji" title=":lgtm:" alt=":lgtm:" align="absmiddle" src="https://reviewable.io/lgtm.png" height="20" width="61"/>
---
Reviewed 1 of 7 files at r1, 3 of 6 files at r2, 2 of 4 files at r3, 2 of 2 files at r4.
Review status: all files reviewed at latest revision, 5 unresolved discussions, some commit checks failed.
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/246#-:-KpvclZAu9cDdf2S6Jt4:bnfp4nl)*
<!-- Sent from Reviewable.io -->
k2fong: <img class="emoji" title=":lgtm:" alt=":lgtm:" align="absmiddle" src="https://reviewable.io/lgtm.png" height="20" width="61"/>
---
Review status: 5 of 8 files reviewed at latest revision, 5 unresolved discussions.
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/246#-:-KpvziQS3_TVXNtM79tm:bnfp4nl)*
<!-- Sent from Reviewable.io -->
| diff --git a/mobly/config_parser.py b/mobly/config_parser.py
index 094777a..9d84a3a 100644
--- a/mobly/config_parser.py
+++ b/mobly/config_parser.py
@@ -37,8 +37,8 @@ def _validate_test_config(test_config):
"""
required_key = keys.Config.key_testbed.value
if required_key not in test_config:
- raise MoblyConfigError('Required key %s missing in test config.' %
- required_key)
+ raise MoblyConfigError(
+ 'Required key %s missing in test config.' % required_key)
def _validate_testbed_name(name):
@@ -109,8 +109,8 @@ def load_test_config_file(test_config_path, tb_filters=None):
if len(tbs) != len(tb_filters):
raise MoblyConfigError(
'Expect to find %d test bed configs, found %d. Check if'
- ' you have the correct test bed names.' %
- (len(tb_filters), len(tbs)))
+ ' you have the correct test bed names.' % (len(tb_filters),
+ len(tbs)))
configs[keys.Config.key_testbed.value] = tbs
mobly_params = configs.get(keys.Config.key_mobly_params.value, {})
# Decide log path.
@@ -166,6 +166,8 @@ class TestRunConfig(object):
user_params: dict, all the parameters to be consumed by the test logic.
register_controller: func, used by test classes to register controller
modules.
+ summary_writer: records.TestSummaryWriter, used to write elements to
+ the test result summary file.
"""
def __init__(self):
@@ -174,6 +176,7 @@ class TestRunConfig(object):
self.controller_configs = None
self.user_params = None
self.register_controller = None
+ self.summary_writer = None
def copy(self):
"""Returns a deep copy of the current config.
diff --git a/mobly/logger.py b/mobly/logger.py
index 1bd2eb4..de90d6a 100644
--- a/mobly/logger.py
+++ b/mobly/logger.py
@@ -20,6 +20,7 @@ import os
import re
import sys
+from mobly import records
from mobly import utils
log_line_format = '%(asctime)s.%(msecs).03d %(levelname)s %(message)s'
@@ -162,11 +163,24 @@ def _setup_test_logger(log_path, prefix=None, filename=None):
if filename is None:
filename = get_log_file_timestamp()
utils.create_dir(log_path)
+ # TODO(#270): Deprecate `test_run_details.txt` when we remove old output
+ # format support.
fh = logging.FileHandler(os.path.join(log_path, 'test_run_details.txt'))
fh.setFormatter(f_formatter)
fh.setLevel(logging.DEBUG)
- log.addHandler(ch)
log.addHandler(fh)
+ # Write logger output to files
+ fh_info = logging.FileHandler(
+ os.path.join(log_path, records.OUTPUT_FILE_INFO_LOG))
+ fh_info.setFormatter(f_formatter)
+ fh_info.setLevel(logging.INFO)
+ fh_debug = logging.FileHandler(
+ os.path.join(log_path, records.OUTPUT_FILE_DEBUG_LOG))
+ fh_debug.setFormatter(f_formatter)
+ fh_debug.setLevel(logging.DEBUG)
+ log.addHandler(ch)
+ log.addHandler(fh_info)
+ log.addHandler(fh_debug)
log.log_path = log_path
logging.log_path = log_path
@@ -224,4 +238,3 @@ def normalize_log_line_timestamp(log_line_timestamp):
norm_tp = log_line_timestamp.replace(' ', '_')
norm_tp = norm_tp.replace(':', '-')
return norm_tp
-
diff --git a/mobly/records.py b/mobly/records.py
index 0b67c9d..2bf050e 100644
--- a/mobly/records.py
+++ b/mobly/records.py
@@ -11,19 +11,88 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-"""This module is where all the record definitions and record containers live.
+"""This module has classes for test result collection, and test result output.
"""
import itertools
+import copy
+import enum
import json
import logging
import pprint
import sys
import traceback
+import yaml
from mobly import signals
from mobly import utils
+# File names for the default files output by
+OUTPUT_FILE_INFO_LOG = 'test_log.INFO'
+OUTPUT_FILE_DEBUG_LOG = 'test_log.DEBUG'
+OUTPUT_FILE_SUMMARY = 'test_summary.yaml'
+
+
+class TestSummaryEntryType(enum.Enum):
+ """Constants used to identify the type of entries in test summary file.
+
+ Test summary file contains multiple yaml documents. In order to parse this
+ file efficiently, the write adds the type of each entry when it writes the
+ entry to the file.
+
+ The idea is similar to how `TestResult.json_str` categorizes different
+ sections of a `TestResult` object in the serialized format.
+ """
+ RECORD = 'Record'
+ SUMMARY = 'Summary'
+ CONTROLLER_INFO = 'ControllerInfo'
+
+
+class Error(Exception):
+ """Raised for errors in records."""
+
+
+class TestSummaryWriter(object):
+ """Writer for the test result summary file of a test run.
+
+ For each test run, a writer is created to stream test results to the
+ summary file on disk.
+
+ The serialization and writing of the `TestResult` object is intentionally
+ kept out of `TestResult` class and put in this class. Because `TestResult`
+ can be operated on by suites, like `+` operation, and it is difficult to
+ guarantee the consistency between `TestResult` in memory and the files on
+ disk. Also, this separation makes it easier to provide a more generic way
+ for users to consume the test summary, like via a database instead of a
+ file.
+ """
+
+ def __init__(self, path):
+ self._path = path
+
+ def dump(self, content, entry_type):
+ """Dumps a dictionary as a yaml document to the summary file.
+
+ Each call to this method dumps a separate yaml document to the same
+ summary file associated with a test run.
+
+ The content of the dumped dictionary has an extra field `TYPE` that
+ specifies the type of each yaml document, which is the flag for parsers
+ to identify each document.
+
+ Args:
+ content: dictionary, the content to serialize and write.
+ entry_type: a member of enum TestSummaryEntryType.
+
+ Raises:
+ recoreds.Error is raised if an invalid entry type is passed in.
+ """
+ new_content = copy.deepcopy(content)
+ new_content['Type'] = entry_type.value
+ content_str = yaml.dump(new_content, explicit_start=True, indent=4)
+ with open(self._path, 'a') as f:
+ f.write(content_str)
+
class TestResultEnums(object):
"""Enums used for TestResultRecord class.
@@ -188,6 +257,8 @@ class TestResultRecord(object):
def json_str(self):
"""Converts this test record to a string in json format.
+ TODO(#270): Deprecate with old output format.
+
Format of the json string is:
{
'Test Name': <test name>,
@@ -251,7 +322,7 @@ class TestResult(object):
# '+' operator for TestResult is only valid when multiple
# TestResult objs were created in the same test run, which means
# the controller info would be the same across all of them.
- # TODO(angli): have a better way to validate this situation.
+ # TODO(xpconanfan): have a better way to validate this situation.
setattr(sum_result, name, l_value)
return sum_result
@@ -276,9 +347,9 @@ class TestResult(object):
def add_controller_info(self, name, info):
try:
- json.dumps(info)
+ yaml.dump(info)
except TypeError:
- logging.warning('Controller info for %s is not JSON serializable!'
+ logging.warning('Controller info for %s is not YAML serializable!'
' Coercing it to string.' % name)
self.controller_info[name] = str(info)
return
@@ -323,6 +394,8 @@ class TestResult(object):
def json_str(self):
"""Converts this test result to a string in json format.
+ TODO(#270): Deprecate with old output format.
+
Format of the json string is:
{
'Results': [
| New summary output.
The current summary log has limited debug information and is in json.
We need to consider what other useful information we can include in the summary log.
Like information on the invocation status, and device monitoring info.
Also consider using a different format so it's possible to stream this file instead of only writing it out in the end.
This will make the summary log more crash proof and takes less memory. | google/mobly | diff --git a/mobly/base_test.py b/mobly/base_test.py
index 7353afd..ece150a 100644
--- a/mobly/base_test.py
+++ b/mobly/base_test.py
@@ -87,6 +87,7 @@ class BaseTestClass(object):
self.user_params = configs.user_params
self.register_controller = configs.register_controller
self.results = records.TestResult()
+ self.summary_writer = configs.summary_writer
self.current_test_name = None
self._generated_test_table = collections.OrderedDict()
@@ -383,6 +384,8 @@ class BaseTestClass(object):
elif tr_record.result == records.TestResultEnums.TEST_RESULT_SKIP:
self._exec_procedure_func(self._on_skip, tr_record)
self.results.add_record(tr_record)
+ self.summary_writer.dump(tr_record.to_dict(),
+ records.TestSummaryEntryType.RECORD)
def _assert_function_name_in_stack(self, expected_func_name):
"""Asserts that the current stack contains the given function name."""
@@ -504,6 +507,8 @@ class BaseTestClass(object):
test_record = records.TestResultRecord(test_name, self.TAG)
test_record.test_skip(exception)
self.results.add_record(test_record)
+ self.summary_writer.dump(test_record.to_dict(),
+ records.TestSummaryEntryType.RECORD)
def run(self, test_names=None):
"""Runs tests within a test class.
@@ -534,6 +539,8 @@ class BaseTestClass(object):
class_record.test_begin()
class_record.test_error(e)
self.results.add_class_error(class_record)
+ self.summary_writer.dump(class_record.to_dict(),
+ records.TestSummaryEntryType.RECORD)
return self.results
logging.info('==========> %s <==========', self.TAG)
# Devise the actual test methods to run in the test class.
@@ -565,6 +572,8 @@ class BaseTestClass(object):
class_record.test_error(e)
self._exec_procedure_func(self._on_fail, class_record)
self.results.add_class_error(class_record)
+ self.summary_writer.dump(class_record.to_dict(),
+ records.TestSummaryEntryType.RECORD)
self._skip_remaining_tests(e)
self._safe_exec_func(self.teardown_class)
return self.results
diff --git a/mobly/test_runner.py b/mobly/test_runner.py
index 5ae2767..8f9aba4 100644
--- a/mobly/test_runner.py
+++ b/mobly/test_runner.py
@@ -158,7 +158,7 @@ def verify_controller_module(module):
def get_info(objects):
[Optional] Gets info from the controller objects used in a test
- run. The info will be included in test_result_summary.json under
+ run. The info will be included in test_summary.yaml under
the key 'ControllerInfo'. Such information could include unique
ID, version, or anything that could be useful for describing the
test bed and debugging.
@@ -262,9 +262,7 @@ class TestRunner(object):
(self._test_bed_name, config.test_bed_name))
self._test_run_infos.append(
TestRunner._TestRunInfo(
- config=config,
- test_class=test_class,
- tests=tests))
+ config=config, test_class=test_class, tests=tests))
def _run_test_class(self, config, test_class, tests=None):
"""Instantiates and executes a test class.
@@ -300,6 +298,8 @@ class TestRunner(object):
raise Error('No tests to execute.')
start_time = logger.get_log_file_timestamp()
log_path = os.path.join(self._log_dir, self._test_bed_name, start_time)
+ summary_writer = records.TestSummaryWriter(
+ os.path.join(log_path, records.OUTPUT_FILE_SUMMARY))
logger.setup_test_logger(log_path, self._test_bed_name)
try:
for test_run_info in self._test_run_infos:
@@ -308,7 +308,7 @@ class TestRunner(object):
test_config.log_path = log_path
test_config.register_controller = functools.partial(
self._register_controller, test_config)
-
+ test_config.summary_writer = summary_writer
try:
self._run_test_class(test_config, test_run_info.test_class,
test_run_info.tests)
@@ -319,6 +319,11 @@ class TestRunner(object):
finally:
self._unregister_controllers()
finally:
+ # Write controller info and summary to summary file.
+ summary_writer.dump(self.results.controller_info,
+ records.TestSummaryEntryType.CONTROLLER_INFO)
+ summary_writer.dump(self.results.summary_dict(),
+ records.TestSummaryEntryType.SUMMARY)
# Stop and show summary.
msg = '\nSummary for test run %s@%s: %s\n' % (
self._test_bed_name, start_time, self.results.summary_str())
@@ -438,7 +443,7 @@ class TestRunner(object):
def _write_results_json_str(self, log_path):
"""Writes out a json file with the test result info for easy parsing.
- TODO(angli): This should be replaced by standard log record mechanism.
+ TODO(#270): Deprecate with old output format.
"""
path = os.path.join(log_path, 'test_run_summary.json')
with open(path, 'w') as f:
diff --git a/tests/mobly/base_test_test.py b/tests/mobly/base_test_test.py
index 35c29f2..d7f152b 100755
--- a/tests/mobly/base_test_test.py
+++ b/tests/mobly/base_test_test.py
@@ -40,6 +40,7 @@ class SomeError(Exception):
class BaseTestTest(unittest.TestCase):
def setUp(self):
self.mock_test_cls_configs = config_parser.TestRunConfig()
+ self.mock_test_cls_configs.summary_writer = mock.Mock()
self.mock_test_cls_configs.log_path = '/tmp'
self.mock_test_cls_configs.user_params = {"some_param": "hahaha"}
self.mock_test_cls_configs.reporter = mock.MagicMock()
diff --git a/tests/mobly/output_test.py b/tests/mobly/output_test.py
new file mode 100755
index 0000000..4bc8e0a
--- /dev/null
+++ b/tests/mobly/output_test.py
@@ -0,0 +1,94 @@
+# Copyright 2017 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import shutil
+import tempfile
+import unittest
+import yaml
+
+from mobly import config_parser
+from mobly import records
+from mobly import test_runner
+
+from tests.lib import mock_controller
+from tests.lib import integration_test
+
+
+class OutputTest(unittest.TestCase):
+ """This test class has unit tests for the implementation of Mobly's output
+ files.
+ """
+
+ def setUp(self):
+ self.tmp_dir = tempfile.mkdtemp()
+ self.base_mock_test_config = config_parser.TestRunConfig()
+ self.base_mock_test_config.test_bed_name = 'SampleTestBed'
+ self.base_mock_test_config.controller_configs = {}
+ self.base_mock_test_config.user_params = {
+ 'icecream': 42,
+ 'extra_param': 'haha'
+ }
+ self.base_mock_test_config.log_path = self.tmp_dir
+ self.log_dir = self.base_mock_test_config.log_path
+ self.test_bed_name = self.base_mock_test_config.test_bed_name
+
+ def tearDown(self):
+ shutil.rmtree(self.tmp_dir)
+
+ def test_output(self):
+ """Verifies the expected output files from a test run.
+
+ * Files are correctly created.
+ * Basic sanity checks of each output file.
+ """
+ mock_test_config = self.base_mock_test_config.copy()
+ mock_ctrlr_config_name = mock_controller.MOBLY_CONTROLLER_CONFIG_NAME
+ my_config = [{
+ 'serial': 'xxxx',
+ 'magic': 'Magic1'
+ }, {
+ 'serial': 'xxxx',
+ 'magic': 'Magic2'
+ }]
+ mock_test_config.controller_configs[mock_ctrlr_config_name] = my_config
+ tr = test_runner.TestRunner(self.log_dir, self.test_bed_name)
+ tr.add_test_class(mock_test_config, integration_test.IntegrationTest)
+ tr.run()
+ output_dir = os.path.join(self.log_dir, self.test_bed_name, 'latest')
+ summary_file_path = os.path.join(output_dir,
+ records.OUTPUT_FILE_SUMMARY)
+ debug_log_path = os.path.join(output_dir,
+ records.OUTPUT_FILE_DEBUG_LOG)
+ info_log_path = os.path.join(output_dir, records.OUTPUT_FILE_INFO_LOG)
+ self.assertTrue(os.path.isfile(summary_file_path))
+ self.assertTrue(os.path.isfile(debug_log_path))
+ self.assertTrue(os.path.isfile(info_log_path))
+ summary_entries = []
+ with open(summary_file_path) as f:
+ for entry in yaml.load_all(f):
+ self.assertTrue(entry['Type'])
+ summary_entries.append(entry)
+ with open(debug_log_path, 'r') as f:
+ content = f.read()
+ self.assertIn('DEBUG', content)
+ self.assertIn('INFO', content)
+ with open(info_log_path, 'r') as f:
+ content = f.read()
+ self.assertIn('INFO', content)
+ self.assertNotIn('DEBUG', content)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tests/mobly/records_test.py b/tests/mobly/records_test.py
index 51b9e28..a5f68db 100755
--- a/tests/mobly/records_test.py
+++ b/tests/mobly/records_test.py
@@ -12,6 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import os
+import shutil
+import tempfile
+import yaml
+
from future.tests.base import unittest
from mobly import records
@@ -28,6 +33,10 @@ class RecordsTest(unittest.TestCase):
self.details = "Some details about the test execution."
self.float_extra = 12345.56789
self.json_extra = {"ha": "whatever"}
+ self.tmp_path = tempfile.mkdtemp()
+
+ def tearDown(self):
+ shutil.rmtree(self.tmp_path)
def verify_record(self, record, result, details, extras):
# Verify each field.
@@ -295,6 +304,19 @@ class RecordsTest(unittest.TestCase):
self.assertTrue(tr.is_test_executed(record1.test_name))
self.assertFalse(tr.is_test_executed(self.tn + 'ha'))
+ def test_summary_write_dump(self):
+ s = signals.TestFailure(self.details, self.float_extra)
+ record1 = records.TestResultRecord(self.tn)
+ record1.test_begin()
+ record1.test_fail(s)
+ dump_path = os.path.join(self.tmp_path, 'ha.yaml')
+ writer = records.TestSummaryWriter(dump_path)
+ writer.dump(record1.to_dict(), records.TestSummaryEntryType.RECORD)
+ with open(dump_path, 'r') as f:
+ content = yaml.load(f)
+ self.assertEqual(content['Type'],
+ records.TestSummaryEntryType.RECORD.value)
+
if __name__ == "__main__":
unittest.main()
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 3
} | 1.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc adb"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
future==1.0.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
-e git+https://github.com/google/mobly.git@6760e850a2a5ec0d7ebafac2e3b0de3d261ff803#egg=mobly
mock==1.0.1
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
portpicker==1.6.0
psutil==7.0.0
pytest @ file:///croot/pytest_1738938843180/work
pytz==2025.2
PyYAML==6.0.2
timeout-decorator==0.5.0
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
| name: mobly
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- future==1.0.0
- mock==1.0.1
- portpicker==1.6.0
- psutil==7.0.0
- pytz==2025.2
- pyyaml==6.0.2
- timeout-decorator==0.5.0
prefix: /opt/conda/envs/mobly
| [
"tests/mobly/base_test_test.py::BaseTestTest::test_abort_class_in_test",
"tests/mobly/base_test_test.py::BaseTestTest::test_abort_setup_class",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_equal_fail",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_equal_fail_with_msg",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_equal_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_fail_with_noop",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_fail_with_wrong_error",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_fail_with_wrong_regex",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_regex_fail_with_noop",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_regex_fail_with_wrong_error",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_regex_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_true",
"tests/mobly/base_test_test.py::BaseTestTest::test_both_teardown_and_test_body_raise_exceptions",
"tests/mobly/base_test_test.py::BaseTestTest::test_cli_test_selection_override_self_tests_list",
"tests/mobly/base_test_test.py::BaseTestTest::test_current_test_name",
"tests/mobly/base_test_test.py::BaseTestTest::test_default_execution_of_all_tests",
"tests/mobly/base_test_test.py::BaseTestTest::test_explicit_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_explicit_pass_but_teardown_test_raises_an_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_fail",
"tests/mobly/base_test_test.py::BaseTestTest::test_failure_in_procedure_functions_is_recorded",
"tests/mobly/base_test_test.py::BaseTestTest::test_failure_to_call_procedure_function_is_recorded",
"tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_call_outside_of_setup_generated_tests",
"tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_dup_test_name",
"tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_run",
"tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_selected_run",
"tests/mobly/base_test_test.py::BaseTestTest::test_implicit_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_both_test_and_teardown_test_fails",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_teardown_test_fails",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_test_fails",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_test_setup_fails_by_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_raise_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_pass_raise_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_self_tests_list",
"tests/mobly/base_test_test.py::BaseTestTest::test_setup_and_teardown_execution_count",
"tests/mobly/base_test_test.py::BaseTestTest::test_setup_class_fail_by_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_setup_test_fail_by_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_setup_test_fail_by_test_signal",
"tests/mobly/base_test_test.py::BaseTestTest::test_skip",
"tests/mobly/base_test_test.py::BaseTestTest::test_skip_if",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_assert_fail",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_executed_if_setup_test_fails",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_executed_if_test_fails",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_executed_if_test_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_raise_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_uncaught_exception"
]
| [
"tests/mobly/output_test.py::OutputTest::test_output",
"tests/mobly/records_test.py::RecordsTest::test_summary_write_dump"
]
| [
"tests/mobly/base_test_test.py::BaseTestTest::test_cli_test_selection_fail_by_convention",
"tests/mobly/base_test_test.py::BaseTestTest::test_missing_requested_test_func",
"tests/mobly/base_test_test.py::BaseTestTest::test_self_tests_list_fail_by_convention",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_basic",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_None",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_overwrite",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_overwrite_by_optional_param_list",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_overwrite_by_required_param_list",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_optional",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_optional_missing",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_optional_with_default",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_required",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_required_missing",
"tests/mobly/records_test.py::RecordsTest::test_is_all_pass",
"tests/mobly/records_test.py::RecordsTest::test_is_all_pass_negative",
"tests/mobly/records_test.py::RecordsTest::test_is_all_pass_with_add_class_error",
"tests/mobly/records_test.py::RecordsTest::test_is_test_executed",
"tests/mobly/records_test.py::RecordsTest::test_result_add_class_error_with_special_error",
"tests/mobly/records_test.py::RecordsTest::test_result_add_class_error_with_test_signal",
"tests/mobly/records_test.py::RecordsTest::test_result_add_operator_success",
"tests/mobly/records_test.py::RecordsTest::test_result_add_operator_type_mismatch",
"tests/mobly/records_test.py::RecordsTest::test_result_record_fail_none",
"tests/mobly/records_test.py::RecordsTest::test_result_record_fail_stacktrace",
"tests/mobly/records_test.py::RecordsTest::test_result_record_fail_with_float_extra",
"tests/mobly/records_test.py::RecordsTest::test_result_record_fail_with_json_extra",
"tests/mobly/records_test.py::RecordsTest::test_result_record_pass_none",
"tests/mobly/records_test.py::RecordsTest::test_result_record_pass_with_float_extra",
"tests/mobly/records_test.py::RecordsTest::test_result_record_pass_with_json_extra",
"tests/mobly/records_test.py::RecordsTest::test_result_record_skip_none",
"tests/mobly/records_test.py::RecordsTest::test_result_record_skip_with_float_extra",
"tests/mobly/records_test.py::RecordsTest::test_result_record_skip_with_json_extra"
]
| []
| Apache License 2.0 | 1,448 | [
"mobly/records.py",
"mobly/logger.py",
"mobly/config_parser.py"
]
| [
"mobly/records.py",
"mobly/logger.py",
"mobly/config_parser.py"
]
|
google__mobly-248 | 31dcff279d4808e011f6af8ab0661b9750357cda | 2017-07-11 11:28:03 | 31dcff279d4808e011f6af8ab0661b9750357cda | dthkao: fixes #247 | diff --git a/mobly/records.py b/mobly/records.py
index 6c5efe2..0b67c9d 100644
--- a/mobly/records.py
+++ b/mobly/records.py
@@ -14,6 +14,7 @@
"""This module is where all the record definitions and record containers live.
"""
+import itertools
import json
import logging
import pprint
@@ -283,7 +284,7 @@ class TestResult(object):
return
self.controller_info[name] = info
- def fail_class(self, test_record):
+ def add_class_error(self, test_record):
"""Add a record to indicate a test class has failed before any test
could execute.
@@ -337,7 +338,9 @@ class TestResult(object):
"""
d = {}
d['ControllerInfo'] = self.controller_info
- d['Results'] = [record.to_dict() for record in self.executed]
+ records_to_write = itertools.chain(self.passed, self.failed,
+ self.skipped, self.error)
+ d['Results'] = [record.to_dict() for record in records_to_write]
d['Summary'] = self.summary_dict()
json_str = json.dumps(d, indent=4, sort_keys=True)
return json_str
| Stacktrace is lost in test_summary.json
The reraise in base_test.py in 349-352 here loses the stacktrace:
https://github.com/google/mobly/pull/241
| google/mobly | diff --git a/mobly/base_test.py b/mobly/base_test.py
index 355603e..e85551a 100644
--- a/mobly/base_test.py
+++ b/mobly/base_test.py
@@ -343,12 +343,12 @@ class BaseTestClass(object):
test_method(*args, **kwargs)
else:
test_method()
- except signals.TestPass as e:
- raise e
- except Exception as e:
+ except signals.TestPass:
+ raise
+ except Exception:
logging.exception('Exception occurred in %s.',
self.current_test_name)
- raise e
+ raise
finally:
try:
self._teardown_test(test_name)
@@ -531,8 +531,8 @@ class BaseTestClass(object):
class_record = records.TestResultRecord('setup_generated_tests',
self.TAG)
class_record.test_begin()
- class_record.test_fail(e)
- self.results.fail_class(class_record)
+ class_record.test_error(e)
+ self.results.add_class_error(class_record)
return self.results
logging.info('==========> %s <==========', self.TAG)
# Devise the actual test methods to run in the test class.
@@ -551,18 +551,18 @@ class BaseTestClass(object):
except signals.TestAbortClass as e:
# The test class is intentionally aborted.
# Skip all tests peacefully.
- e.details = 'Test class aborted due to: %s' % e.details
+ e.details = 'setup_class aborted due to: %s' % e.details
self._skip_remaining_tests(e)
return self.results
except Exception as e:
# Setup class failed for unknown reasons.
# Fail the class and skip all tests.
- logging.exception('Failed to setup %s.', self.TAG)
+ logging.exception('Error in setup_class %s.', self.TAG)
class_record = records.TestResultRecord('setup_class', self.TAG)
class_record.test_begin()
- class_record.test_fail(e)
+ class_record.test_error(e)
self._exec_procedure_func(self._on_fail, class_record)
- self.results.fail_class(class_record)
+ self.results.add_class_error(class_record)
self._skip_remaining_tests(e)
return self.results
finally:
diff --git a/tests/mobly/base_test_test.py b/tests/mobly/base_test_test.py
index 65caf6f..da036ea 100755
--- a/tests/mobly/base_test_test.py
+++ b/tests/mobly/base_test_test.py
@@ -20,6 +20,10 @@ from mobly import base_test
from mobly import config_parser
from mobly import signals
+from tests.mobly import records_test
+
+validate_test_result = records_test.validate_test_result
+
MSG_EXPECTED_EXCEPTION = "This is an expected exception."
MSG_EXPECTED_TEST_FAILURE = "This is an expected test failure."
MSG_UNEXPECTED_EXCEPTION = "Unexpected exception!"
@@ -187,7 +191,9 @@ class BaseTestTest(unittest.TestCase):
bt_cls = MockBaseTest(self.mock_test_cls_configs)
bt_cls.run()
actual_record = bt_cls.results.error[0]
+ validate_test_result(bt_cls.results)
self.assertEqual(actual_record.test_name, "setup_class")
+
self.assertEqual(actual_record.details, MSG_EXPECTED_EXCEPTION)
self.assertIsNone(actual_record.extras)
expected_summary = ("Error 1, Executed 0, Failed 0, Passed 0, "
@@ -540,6 +546,7 @@ class BaseTestTest(unittest.TestCase):
signal for the entire class, which is different from raising other
exceptions in `setup_class`.
"""
+
class MockBaseTest(base_test.BaseTestClass):
def setup_class(self):
asserts.abort_class(MSG_EXPECTED_EXCEPTION)
@@ -555,6 +562,7 @@ class BaseTestTest(unittest.TestCase):
bt_cls = MockBaseTest(self.mock_test_cls_configs)
bt_cls.run(test_names=["test_1", "test_2", "test_3"])
+ self.assertEqual(len(bt_cls.results.skipped), 3)
self.assertEqual(bt_cls.results.summary_str(),
("Error 0, Executed 0, Failed 0, Passed 0, "
"Requested 3, Skipped 3"))
@@ -966,6 +974,7 @@ class BaseTestTest(unittest.TestCase):
bt_cls = MockBaseTest(self.mock_test_cls_configs)
bt_cls.run()
actual_record = bt_cls.results.error[0]
+ validate_test_result(bt_cls.results)
self.assertEqual(actual_record.test_name, "test_ha")
self.assertEqual(
actual_record.details,
diff --git a/tests/mobly/records_test.py b/tests/mobly/records_test.py
index 9500d1d..f1ee1ed 100755
--- a/tests/mobly/records_test.py
+++ b/tests/mobly/records_test.py
@@ -18,6 +18,26 @@ from mobly import records
from mobly import signals
+def validate_test_result(result):
+ """Validate basic properties of a test result.
+
+ The records in each bucket of the test result should have the corresponding
+ result enum.
+
+ Args:
+ result: The TestResult object to validate.
+ """
+ buckets = [
+ (result.passed, records.TestResultEnums.TEST_RESULT_PASS),
+ (result.failed, records.TestResultEnums.TEST_RESULT_FAIL),
+ (result.error, records.TestResultEnums.TEST_RESULT_ERROR),
+ (result.skipped, records.TestResultEnums.TEST_RESULT_SKIP),
+ ]
+ for bucket_list, expected_enum in buckets:
+ for record in bucket_list:
+ assert record.result == expected_enum
+
+
class RecordsTest(unittest.TestCase):
"""This test class tests the implementation of classes in mobly.records.
"""
@@ -208,7 +228,7 @@ class RecordsTest(unittest.TestCase):
with self.assertRaisesRegexp(TypeError, expected_msg):
tr1 += "haha"
- def test_result_fail_class_with_test_signal(self):
+ def test_result_add_class_error_with_test_signal(self):
record1 = records.TestResultRecord(self.tn)
record1.test_begin()
s = signals.TestPass(self.details, self.float_extra)
@@ -217,13 +237,13 @@ class RecordsTest(unittest.TestCase):
tr.add_record(record1)
s = signals.TestFailure(self.details, self.float_extra)
record2 = records.TestResultRecord("SomeTest", s)
- tr.fail_class(record2)
+ tr.add_class_error(record2)
self.assertEqual(len(tr.passed), 1)
self.assertEqual(len(tr.error), 1)
self.assertEqual(len(tr.executed), 1)
- def test_result_fail_class_with_special_error(self):
- """Call TestResult.fail_class with an error class that requires more
+ def test_result_add_class_error_with_special_error(self):
+ """Call TestResult.add_class_error with an error class that requires more
than one arg to instantiate.
"""
record1 = records.TestResultRecord(self.tn)
@@ -239,7 +259,7 @@ class RecordsTest(unittest.TestCase):
se = SpecialError("haha", 42)
record2 = records.TestResultRecord("SomeTest", se)
- tr.fail_class(record2)
+ tr.add_class_error(record2)
self.assertEqual(len(tr.passed), 1)
self.assertEqual(len(tr.error), 1)
self.assertEqual(len(tr.executed), 1)
@@ -271,17 +291,18 @@ class RecordsTest(unittest.TestCase):
tr = records.TestResult()
tr.add_record(record1)
tr.add_record(record2)
+ validate_test_result(tr)
self.assertFalse(tr.is_all_pass)
- def test_is_all_pass_with_fail_class(self):
- """Verifies that is_all_pass yields correct value when fail_class is
+ def test_is_all_pass_with_add_class_error(self):
+ """Verifies that is_all_pass yields correct value when add_class_error is
used.
"""
record1 = records.TestResultRecord(self.tn)
record1.test_begin()
record1.test_fail(Exception("haha"))
tr = records.TestResult()
- tr.fail_class(record1)
+ tr.add_class_error(record1)
self.assertFalse(tr.is_all_pass)
def test_is_test_executed(self):
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 1
} | 1.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
future==1.0.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
-e git+https://github.com/google/mobly.git@31dcff279d4808e011f6af8ab0661b9750357cda#egg=mobly
mock==1.0.1
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
portpicker==1.6.0
psutil==7.0.0
pytest @ file:///croot/pytest_1738938843180/work
pytz==2025.2
PyYAML==6.0.2
timeout-decorator==0.5.0
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
| name: mobly
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- future==1.0.0
- mock==1.0.1
- portpicker==1.6.0
- psutil==7.0.0
- pytz==2025.2
- pyyaml==6.0.2
- timeout-decorator==0.5.0
prefix: /opt/conda/envs/mobly
| [
"tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_dup_test_name",
"tests/mobly/base_test_test.py::BaseTestTest::test_setup_class_fail_by_exception",
"tests/mobly/records_test.py::RecordsTest::test_is_all_pass_with_add_class_error",
"tests/mobly/records_test.py::RecordsTest::test_result_add_class_error_with_special_error",
"tests/mobly/records_test.py::RecordsTest::test_result_add_class_error_with_test_signal"
]
| []
| [
"tests/mobly/base_test_test.py::BaseTestTest::test_abort_class_in_test",
"tests/mobly/base_test_test.py::BaseTestTest::test_abort_setup_class",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_equal_fail",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_equal_fail_with_msg",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_equal_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_fail_with_noop",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_fail_with_wrong_error",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_fail_with_wrong_regex",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_regex_fail_with_noop",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_regex_fail_with_wrong_error",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_regex_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_true",
"tests/mobly/base_test_test.py::BaseTestTest::test_both_teardown_and_test_body_raise_exceptions",
"tests/mobly/base_test_test.py::BaseTestTest::test_cli_test_selection_fail_by_convention",
"tests/mobly/base_test_test.py::BaseTestTest::test_cli_test_selection_override_self_tests_list",
"tests/mobly/base_test_test.py::BaseTestTest::test_current_test_name",
"tests/mobly/base_test_test.py::BaseTestTest::test_default_execution_of_all_tests",
"tests/mobly/base_test_test.py::BaseTestTest::test_explicit_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_explicit_pass_but_teardown_test_raises_an_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_fail",
"tests/mobly/base_test_test.py::BaseTestTest::test_failure_in_procedure_functions_is_recorded",
"tests/mobly/base_test_test.py::BaseTestTest::test_failure_to_call_procedure_function_is_recorded",
"tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_call_outside_of_setup_generated_tests",
"tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_run",
"tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_selected_run",
"tests/mobly/base_test_test.py::BaseTestTest::test_implicit_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_missing_requested_test_func",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_teardown_test_fails",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_test_fails",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_test_setup_fails_by_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_raise_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_pass_raise_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_self_tests_list",
"tests/mobly/base_test_test.py::BaseTestTest::test_self_tests_list_fail_by_convention",
"tests/mobly/base_test_test.py::BaseTestTest::test_setup_test_fail_by_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_setup_test_fail_by_test_signal",
"tests/mobly/base_test_test.py::BaseTestTest::test_skip",
"tests/mobly/base_test_test.py::BaseTestTest::test_skip_if",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_assert_fail",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_executed_if_setup_test_fails",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_executed_if_test_fails",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_executed_if_test_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_raise_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_uncaught_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_basic",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_None",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_overwrite",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_overwrite_by_optional_param_list",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_overwrite_by_required_param_list",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_optional",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_optional_missing",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_optional_with_default",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_required",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_required_missing",
"tests/mobly/records_test.py::RecordsTest::test_is_all_pass",
"tests/mobly/records_test.py::RecordsTest::test_is_all_pass_negative",
"tests/mobly/records_test.py::RecordsTest::test_is_test_executed",
"tests/mobly/records_test.py::RecordsTest::test_result_add_operator_success",
"tests/mobly/records_test.py::RecordsTest::test_result_add_operator_type_mismatch",
"tests/mobly/records_test.py::RecordsTest::test_result_record_fail_none",
"tests/mobly/records_test.py::RecordsTest::test_result_record_fail_stacktrace",
"tests/mobly/records_test.py::RecordsTest::test_result_record_fail_with_float_extra",
"tests/mobly/records_test.py::RecordsTest::test_result_record_fail_with_json_extra",
"tests/mobly/records_test.py::RecordsTest::test_result_record_pass_none",
"tests/mobly/records_test.py::RecordsTest::test_result_record_pass_with_float_extra",
"tests/mobly/records_test.py::RecordsTest::test_result_record_pass_with_json_extra",
"tests/mobly/records_test.py::RecordsTest::test_result_record_skip_none",
"tests/mobly/records_test.py::RecordsTest::test_result_record_skip_with_float_extra",
"tests/mobly/records_test.py::RecordsTest::test_result_record_skip_with_json_extra"
]
| []
| Apache License 2.0 | 1,449 | [
"mobly/records.py"
]
| [
"mobly/records.py"
]
|
asottile__add-trailing-comma-4 | 9ce37f20c644269487c52030912e20a75cc191c1 | 2017-07-11 20:46:37 | 9ce37f20c644269487c52030912e20a75cc191c1 | diff --git a/add_trailing_comma.py b/add_trailing_comma.py
index 9efea83..736fa7d 100644
--- a/add_trailing_comma.py
+++ b/add_trailing_comma.py
@@ -179,13 +179,16 @@ def _fix_call(call, i, tokens):
#
# func_name(arg, arg, arg)
# ^ outer paren
+ brace_start, brace_end = '(', ')'
first_paren = None
paren_stack = []
for i in range(i, len(tokens)):
token = tokens[i]
- if token.src == '(':
+ if token.src == brace_start:
paren_stack.append(i)
- elif token.src == ')':
+ # the ast lies to us about the beginning of parenthesized functions.
+ # See #3. (why we make sure there's something to pop here)
+ elif token.src == brace_end and paren_stack:
paren_stack.pop()
if (token.line, token.utf8_byte_offset) in call.arg_offsets:
@@ -194,7 +197,7 @@ def _fix_call(call, i, tokens):
else:
raise AssertionError('Past end?')
- _fix_inner('(', ')', first_paren, tokens)
+ _fix_inner(brace_start, brace_end, first_paren, tokens)
def _fix_literal(literal, i, tokens):
| "IndexError: pop from empty list" when processing valid file
(Wasn't sure how to describe this scenario better, sorry for the lame title.)
Here's a fairly minimal example:
```python
(
a
).thing(b)
```
Produces an error when processing with add-trailing-comma v0.2.0:
```python
Traceback (most recent call last):
File "/nail/home/ckuehl/.pre-commit/repojgy42wl2/py_env-python2.7/bin/add-trailing-comma", line 9, in <module>
load_entry_point('add-trailing-comma==0.2.0', 'console_scripts', 'add-trailing-comma')()
File "/nail/home/ckuehl/.pre-commit/repojgy42wl2/py_env-python2.7/local/lib/python2.7/site-packages/add_trailing_comma.py", line 269, in main
ret |= fix_file(filename, args)
File "/nail/home/ckuehl/.pre-commit/repojgy42wl2/py_env-python2.7/local/lib/python2.7/site-packages/add_trailing_comma.py", line 249, in fix_file
contents_text = _fix_commas(contents_text, args.py35_plus)
File "/nail/home/ckuehl/.pre-commit/repojgy42wl2/py_env-python2.7/local/lib/python2.7/site-packages/add_trailing_comma.py", line 232, in _fix_commas
_fix_call(call, i, tokens)
File "/nail/home/ckuehl/.pre-commit/repojgy42wl2/py_env-python2.7/local/lib/python2.7/site-packages/add_trailing_comma.py", line 189, in _fix_call
paren_stack.pop()
IndexError: pop from empty list
``` | asottile/add-trailing-comma | diff --git a/tests/add_trailing_comma_test.py b/tests/add_trailing_comma_test.py
index ddc0bc3..5d1c798 100644
--- a/tests/add_trailing_comma_test.py
+++ b/tests/add_trailing_comma_test.py
@@ -42,6 +42,10 @@ xfailif_lt_py35 = pytest.mark.xfail(sys.version_info < (3, 5), reason='py35+')
'x((\n'
' 1,\n'
'))',
+ # regression test for #3
+ '(\n'
+ ' a\n'
+ ').f(b)',
),
)
def test_fix_calls_noops(src):
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 1
} | 0.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [],
"python": "3.6",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/asottile/add-trailing-comma.git@9ce37f20c644269487c52030912e20a75cc191c1#egg=add_trailing_comma
attrs==22.2.0
certifi==2021.5.30
cfgv==3.3.1
coverage==6.2
distlib==0.3.9
filelock==3.4.1
flake8==5.0.4
identify==2.4.4
importlib-metadata==4.2.0
importlib-resources==5.2.3
iniconfig==1.1.1
mccabe==0.7.0
nodeenv==1.6.0
packaging==21.3
platformdirs==2.4.0
pluggy==1.0.0
pre-commit==2.17.0
py==1.11.0
pycodestyle==2.9.1
pyflakes==2.5.0
pyparsing==3.1.4
pytest==7.0.1
PyYAML==6.0.1
tokenize-rt==4.2.1
toml==0.10.2
tomli==1.2.3
typing_extensions==4.1.1
virtualenv==20.16.2
zipp==3.6.0
| name: add-trailing-comma
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- cfgv==3.3.1
- coverage==6.2
- distlib==0.3.9
- filelock==3.4.1
- flake8==5.0.4
- identify==2.4.4
- importlib-metadata==4.2.0
- importlib-resources==5.2.3
- iniconfig==1.1.1
- mccabe==0.7.0
- nodeenv==1.6.0
- packaging==21.3
- platformdirs==2.4.0
- pluggy==1.0.0
- pre-commit==2.17.0
- py==1.11.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
- pyparsing==3.1.4
- pytest==7.0.1
- pyyaml==6.0.1
- tokenize-rt==4.2.1
- toml==0.10.2
- tomli==1.2.3
- typing-extensions==4.1.1
- virtualenv==20.16.2
- zipp==3.6.0
prefix: /opt/conda/envs/add-trailing-comma
| [
"tests/add_trailing_comma_test.py::test_fix_calls_noops[(\\n"
]
| []
| [
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x(1)]",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[tuple(\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x(\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x((\\n",
"tests/add_trailing_comma_test.py::test_ignores_invalid_ast_node",
"tests/add_trailing_comma_test.py::test_py35_plus_rewrite",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[(1,",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[[1,",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[{1,",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[{1:",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[y(*args1,",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[y(**kwargs1,",
"tests/add_trailing_comma_test.py::test_fixes_calls[x(\\n",
"tests/add_trailing_comma_test.py::test_fixes_calls[foo()(\\n",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[(1,",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[[1,",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[{1,",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[{1:",
"tests/add_trailing_comma_test.py::test_fixes_literals[x",
"tests/add_trailing_comma_test.py::test_fixes_py35_plus_literals[x",
"tests/add_trailing_comma_test.py::test_noop_tuple_literal_without_braces",
"tests/add_trailing_comma_test.py::test_main_trivial",
"tests/add_trailing_comma_test.py::test_main_noop",
"tests/add_trailing_comma_test.py::test_main_changes_a_file",
"tests/add_trailing_comma_test.py::test_main_syntax_error",
"tests/add_trailing_comma_test.py::test_main_non_utf8_bytes",
"tests/add_trailing_comma_test.py::test_main_py35_plus_argument_star_args",
"tests/add_trailing_comma_test.py::test_main_py35_plus_argument_star_star_kwargs"
]
| []
| MIT License | 1,450 | [
"add_trailing_comma.py"
]
| [
"add_trailing_comma.py"
]
|
|
choderalab__openmmtools-253 | 9e299792b6ae45acb1b5cd6a4033a4a15df1dd75 | 2017-07-12 18:16:11 | 9e299792b6ae45acb1b5cd6a4033a4a15df1dd75 | diff --git a/docs/forcefactories.rst b/docs/forcefactories.rst
new file mode 100644
index 0000000..cd24f18
--- /dev/null
+++ b/docs/forcefactories.rst
@@ -0,0 +1,18 @@
+.. _forcefactories::
+
+Cache
+=====
+
+The module :mod:`openmmtools.forcefactories` implements utility methods and factories to configure system forces.
+
+ - :func:`replace_reaction_field`: Configure a system to model the electrostatics with an :class:`UnshiftedReactionField` force.
+
+Cache objects
+-------------
+
+.. currentmodule:: openmmtools.forcefactories
+.. autosummary::
+ :nosignatures:
+ :toctree: api/generated/
+
+ replace_reaction_field
diff --git a/docs/forces.rst b/docs/forces.rst
new file mode 100644
index 0000000..49d9bcb
--- /dev/null
+++ b/docs/forces.rst
@@ -0,0 +1,22 @@
+.. _forces::
+
+Forces
+======
+
+The module :mod:`openmmtools.forces` implements custom forces that are not natively found in OpenMM.
+
+ - :class:`UnshiftedReactionFieldForce`: A `CustomNonbondedForce <http://docs.openmm.org/7.1.0/api-python/generated/simtk.openmm.openmm.CustomNonbondedForce.html>`_ implementing a reaction field variant with `c_rf` term set to zero and a switching function. Using the native OpenMM reaction field implementation with `c_rf != 0` can cause issues with hydration free energy calculations.
+ - :func:`find_nonbonded_force`: Find the first ``NonbondedForce`` in an OpenMM ``System``.
+ - :func:`iterate_nonbonded_forces`: Iterate over all the ``NonbondedForce``s in an OpenMM ``System``.
+
+Cache objects
+-------------
+
+.. currentmodule:: openmmtools.forces
+.. autosummary::
+ :nosignatures:
+ :toctree: api/generated/
+
+ UnshiftedReactionFieldForce
+ find_nonbonded_force
+ iterate_nonbonded_forces
diff --git a/docs/index.rst b/docs/index.rst
index 108ac4c..c561815 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -53,6 +53,8 @@ Modules
mcmc
sampling
alchemy
+ forces
+ forcefactories
utils
scripts
diff --git a/openmmtools/alchemy.py b/openmmtools/alchemy.py
index 972ef8e..81e7650 100644
--- a/openmmtools/alchemy.py
+++ b/openmmtools/alchemy.py
@@ -36,14 +36,13 @@ usable for the calculation of free energy differences of hydration or ligand bin
# =============================================================================
import copy
-import inspect
import logging
import collections
import numpy as np
from simtk import openmm, unit
-from openmmtools import states, utils
+from openmmtools import states, forcefactories, utils
from openmmtools.constants import ONE_4PI_EPS0
logger = logging.getLogger(__name__)
@@ -843,9 +842,10 @@ class AbsoluteAlchemicalFactory(object):
# If the System uses a NonbondedForce, replace its NonbondedForce implementation of reaction field
# with a Custom*Force implementation that uses c_rf = 0.
- # NOTE: This adds an additional CustomNonbondedForce and CustomBondForce
- if (self.alchemical_rf_treatment == 'switched'):
- alchemical_system = self.replace_reaction_field(alchemical_system)
+ # NOTE: This adds an additional CustomNonbondedForce
+ if self.alchemical_rf_treatment == 'switched':
+ forcefactories.replace_reaction_field(alchemical_system, return_copy=False,
+ switch_width=self.switch_width)
return alchemical_system
@@ -905,73 +905,6 @@ class AbsoluteAlchemicalFactory(object):
return energy_components
- def replace_reaction_field(self, reference_system):
- """Replace reaction-field electrostatics with Custom*Force terms to ensure c_rf = 0.
-
- .. warning:: Unstable API.
- This method is still experimental. It could be moved to some
- other module or have its signature changed in the near future.
-
- A deep copy of the system is made.
-
- If reaction field electrostatics is in use, this will add a CustomNonbondedForce and CustomBondForce to the System
- for each NonbondedForce that utilizes CutoffPeriodic.
-
- Note that the resulting System object can NOT be fed to `create_alchemical_system` since the CustomNonbondedForce
- will not be recognized and re-coded.
-
- Parameters
- ----------
- reference_system : simtk.openmm.System
- The system to use as a reference for the creation of the
- alchemical system. This will not be modified.
-
- Returns
- -------
- system : simtk.openmm.System
- System with reaction-field converted to c_rf = 0
-
- """
- system = copy.deepcopy(reference_system)
- for force_index, reference_force in enumerate(system.getForces()):
- reference_force_name = reference_force.__class__.__name__
- if (reference_force_name == 'NonbondedForce' and
- reference_force.getNonbondedMethod() == openmm.NonbondedForce.CutoffPeriodic):
- # Create CustomNonbondedForce to handle switched reaction field
- epsilon_solvent = reference_force.getReactionFieldDielectric()
- r_cutoff = reference_force.getCutoffDistance()
- energy_expression = "ONE_4PI_EPS0*chargeprod*(r^(-1) + k_rf*r^2);" # Omit c_rf constant term.
- k_rf = r_cutoff**(-3) * ((epsilon_solvent - 1.0) / (2.0*epsilon_solvent + 1.0))
- energy_expression += "chargeprod = charge1*charge2;"
- energy_expression += "k_rf = %f;" % (k_rf.value_in_unit_system(unit.md_unit_system))
- energy_expression += "ONE_4PI_EPS0 = %f;" % ONE_4PI_EPS0 # already in OpenMM units
- custom_nonbonded_force = openmm.CustomNonbondedForce(energy_expression)
- custom_nonbonded_force.addPerParticleParameter("charge")
- custom_nonbonded_force.setNonbondedMethod(openmm.CustomNonbondedForce.CutoffPeriodic)
- custom_nonbonded_force.setCutoffDistance(reference_force.getCutoffDistance())
- custom_nonbonded_force.setUseLongRangeCorrection(False)
- system.addForce(custom_nonbonded_force)
-
- # Add switch
- if self.switch_width is not None:
- custom_nonbonded_force.setUseSwitchingFunction(True)
- custom_nonbonded_force.setSwitchingDistance(reference_force.getCutoffDistance() - self.switch_width)
- else:
- custom_nonbonded_force.setUseSwitchingFunction(False)
-
- # Rewrite particle charges
- for particle_index in range(reference_force.getNumParticles()):
- [charge, sigma, epsilon] = reference_force.getParticleParameters(particle_index)
- reference_force.setParticleParameters(particle_index, abs(0.0*charge), sigma, epsilon)
- custom_nonbonded_force.addParticle([charge])
-
- # Add exclusions to CustomNonbondedForce.
- for exception_index in range(reference_force.getNumExceptions()):
- iatom, jatom, chargeprod, sigma, epsilon = reference_force.getExceptionParameters(exception_index)
- custom_nonbonded_force.addExclusion(iatom, jatom)
-
- return system
-
# -------------------------------------------------------------------------
# Internal usage: AlchemicalRegion
# -------------------------------------------------------------------------
@@ -2091,20 +2024,20 @@ class AbsoluteAlchemicalFactory(object):
add_label(label.format('non-'), force_index2)
# If they are both empty they are identical and any label works.
- elif force1.getNumBonds() == 0:
+ elif force1.getNumBonds() == 0 and force2.getNumBonds() == 0:
add_label(label.format(''), force_index1)
add_label(label.format('non-'), force_index2)
# We check that the bond atoms are both alchemical or not.
else:
- atom_i, atom_j, _ = force1.getBondParameters(0)
+ atom_i, atom_j, _ = force2.getBondParameters(0)
both_alchemical = atom_i in alchemical_atoms and atom_j in alchemical_atoms
if both_alchemical:
- add_label(label.format(''), force_index1)
- add_label(label.format('non-'), force_index2)
- else:
- add_label(label.format('non-'), force_index1)
add_label(label.format(''), force_index2)
+ add_label(label.format('non-'), force_index1)
+ else:
+ add_label(label.format('non-'), force_index2)
+ add_label(label.format(''), force_index1)
return force_labels
diff --git a/openmmtools/cache.py b/openmmtools/cache.py
index 11ac9b5..87ec8d6 100644
--- a/openmmtools/cache.py
+++ b/openmmtools/cache.py
@@ -347,8 +347,7 @@ class ContextCache(object):
If the integrator is not provided, this will search the cache for
any Context in the given ThermodynamicState, regardless of its
- integrator. In this case, the method guarantees that two consecutive
- calls with the same thermodynamic state will retrieve the same context.
+ integrator.
This creates a new Context if no compatible one has been cached.
If a compatible Context exists, the ThermodynamicState is applied
@@ -388,10 +387,8 @@ class ContextCache(object):
# Only one match.
context = self._lru[matching_context_ids[0]]
else:
- # Multiple matches, prefer the non-default Integrator.
- # Always pick the least recently used to make two consective
- # calls retrieving the same integrator.
- for context_id in reversed(matching_context_ids):
+ # Multiple matches, prefer non-default Integrator.
+ for context_id in matching_context_ids:
if context_id[1] != self._default_integrator_id():
context = self._lru[context_id]
break
diff --git a/openmmtools/forcefactories.py b/openmmtools/forcefactories.py
new file mode 100644
index 0000000..777d110
--- /dev/null
+++ b/openmmtools/forcefactories.py
@@ -0,0 +1,79 @@
+#!/usr/bin/env python
+
+# =============================================================================
+# MODULE DOCSTRING
+# =============================================================================
+
+"""
+Factories to manipulate OpenMM System forces.
+
+"""
+
+
+# =============================================================================
+# GLOBAL IMPORTS
+# =============================================================================
+
+import copy
+
+from simtk import openmm, unit
+
+from openmmtools import forces
+
+
+# =============================================================================
+# FACTORY FUNCTIONS
+# =============================================================================
+
+def replace_reaction_field(reference_system, switch_width=1.0*unit.angstrom,
+ return_copy=True):
+ """Return a system converted to use a switched reaction-field electrostatics.
+
+ This will add an `UnshiftedReactionFieldForce` for each `NonbondedForce`
+ that utilizes `CutoffPeriodic`.
+
+ Note that `AbsoluteAlchemicalFactory.create_alchemical_system()` can NOT
+ handle the resulting `System` object yet since the `CustomNonbondedForce`
+ are not recognized and re-coded.
+
+ Parameters
+ ----------
+ reference_system : simtk.openmm.System
+ The system to use as a reference. This will be modified only if
+ `return_copy` is `False`.
+ switch_width : simtk.unit.Quantity, default 1.0*angstrom
+ Switch width for electrostatics (units of distance).
+ return_copy : bool
+ If `True`, `reference_system` is not modified, and a copy is returned.
+ Setting it to `False` speeds up the function execution but modifies
+ the `reference_system` object.
+
+ Returns
+ -------
+ system : simtk.openmm.System
+ System with reaction-field converted to c_rf = 0
+
+ """
+ if return_copy:
+ system = copy.deepcopy(reference_system)
+ else:
+ system = reference_system
+
+ # Add an UnshiftedReactionFieldForce for each CutoffPeriodic NonbondedForce.
+ for reference_force in forces.iterate_nonbonded_forces(system):
+ if reference_force.getNonbondedMethod() == openmm.NonbondedForce.CutoffPeriodic:
+ reaction_field_force = forces.UnshiftedReactionFieldForce.from_nonbonded_force(reference_force,
+ switch_width=switch_width)
+ system.addForce(reaction_field_force)
+
+ # Remove particle electrostatics from reference force, but leave exceptions.
+ for particle_index in range(reference_force.getNumParticles()):
+ charge, sigma, epsilon = reference_force.getParticleParameters(particle_index)
+ reference_force.setParticleParameters(particle_index, abs(0.0*charge), sigma, epsilon)
+
+ return system
+
+
+if __name__ == '__main__':
+ import doctest
+ doctest.testmod()
diff --git a/openmmtools/forces.py b/openmmtools/forces.py
new file mode 100644
index 0000000..503301f
--- /dev/null
+++ b/openmmtools/forces.py
@@ -0,0 +1,205 @@
+#!/usr/bin/env python
+
+# =============================================================================
+# MODULE DOCSTRING
+# =============================================================================
+
+"""
+Custom OpenMM Forces classes and utilities.
+
+"""
+
+
+# =============================================================================
+# GLOBAL IMPORTS
+# =============================================================================
+
+from simtk import openmm, unit
+
+from openmmtools.constants import ONE_4PI_EPS0
+
+
+# =============================================================================
+# UTILITY FUNCTIONS
+# =============================================================================
+
+def find_nonbonded_force(system):
+ """Find the first OpenMM `NonbondedForce` in the system.
+
+ Parameters
+ ----------
+ system : simtk.openmm.System
+ The system to search.
+
+ Returns
+ -------
+ nonbonded_force : simtk.openmm.NonbondedForce
+ The first `NonbondedForce` object in `system`.
+
+ Raises
+ ------
+ ValueError
+ If the system contains multiple `NonbondedForce`s
+
+ """
+ nonbonded_force = None
+ for force in system.getForces():
+ if isinstance(force, openmm.NonbondedForce):
+ if nonbonded_force is not None:
+ raise ValueError('The System has multiple NonbondedForces')
+ nonbonded_force = force
+ return nonbonded_force
+
+
+def iterate_nonbonded_forces(system):
+ """Iterate over all OpenMM `NonbondedForce`s in `system`.
+
+ Parameters
+ ----------
+ system : simtk.openmm.System
+ The system to search.
+
+ Yields
+ ------
+ force : simtk.openmm.NonbondedForce
+ A `NonbondedForce` object in `system`.
+
+ """
+ for force in system.getForces():
+ if isinstance(force, openmm.NonbondedForce):
+ yield force
+
+
+# =============================================================================
+# REACTION FIELD
+# =============================================================================
+
+class UnshiftedReactionFieldForce(openmm.CustomNonbondedForce):
+ """A force modelling switched reaction-field electrostatics.
+
+ Contrarily to a normal `NonbondedForce` with `CutoffPeriodic` nonbonded
+ method, this force sets the `c_rf` to 0.0 and uses a switching function
+ to avoid forces discontinuities at the cutoff distance.
+
+ Parameters
+ ----------
+ cutoff_distance : simtk.unit.Quantity, default 15*angstroms
+ The cutoff distance (units of distance).
+ switch_width : simtk.unit.Quantity, default 1.0*angstrom
+ Switch width for electrostatics (units of distance).
+ reaction_field_dielectric : float
+ The dielectric constant used for the solvent.
+
+ """
+
+ def __init__(self, cutoff_distance=15*unit.angstroms, switch_width=1.0*unit.angstrom,
+ reaction_field_dielectric=78.3):
+ k_rf = cutoff_distance**(-3) * (reaction_field_dielectric - 1.0) / (2.0*reaction_field_dielectric + 1.0)
+
+ # Energy expression omits c_rf constant term.
+ energy_expression = "ONE_4PI_EPS0*chargeprod*(r^(-1) + k_rf*r^2);"
+ energy_expression += "chargeprod = charge1*charge2;"
+ energy_expression += "k_rf = {:f};".format(k_rf.value_in_unit_system(unit.md_unit_system))
+ energy_expression += "ONE_4PI_EPS0 = {:f};".format(ONE_4PI_EPS0) # already in OpenMM units
+
+ # Create CustomNonbondedForce.
+ super(UnshiftedReactionFieldForce, self).__init__(energy_expression)
+
+ # Add parameters.
+ self.addPerParticleParameter("charge")
+
+ # Configure force.
+ self.setNonbondedMethod(openmm.CustomNonbondedForce.CutoffPeriodic)
+ self.setCutoffDistance(cutoff_distance)
+ self.setUseLongRangeCorrection(False)
+ if switch_width is not None:
+ self.setUseSwitchingFunction(True)
+ self.setSwitchingDistance(cutoff_distance - switch_width)
+ else: # Truncated
+ self.setUseSwitchingFunction(False)
+
+ @classmethod
+ def from_nonbonded_force(cls, nonbonded_force, switch_width=1.0*unit.angstrom):
+ """Copy constructor from an OpenMM `NonbondedForce`.
+
+ The returned force has same cutoff distance and dielectric, and
+ its particles have the same charges. Exclusions corresponding to
+ `nonbonded_force` exceptions are also added.
+
+ .. warning
+ This only creates the force object. The electrostatics in
+ `nonbonded_force` remains unmodified. Use the function
+ `replace_reaction_field` to correctly convert a system to
+ use an unshifted reaction field potential.
+
+ Parameters
+ ----------
+ nonbonded_force : simtk.openmm.NonbondedForce
+ The nonbonded force to copy.
+ switch_width : simtk.unit.Quantity
+ Switch width for electrostatics (units of distance).
+
+ Returns
+ -------
+ reaction_field_force : UnshiftedReactionFieldForce
+ The reaction field force with copied particles.
+
+ """
+ # OpenMM gives unitless values.
+ cutoff_distance = nonbonded_force.getCutoffDistance()
+ reaction_field_dielectric = nonbonded_force.getReactionFieldDielectric()
+ reaction_field_force = cls(cutoff_distance, switch_width, reaction_field_dielectric)
+
+ # Set particle charges.
+ for particle_index in range(nonbonded_force.getNumParticles()):
+ charge, sigma, epsilon = nonbonded_force.getParticleParameters(particle_index)
+ reaction_field_force.addParticle([charge])
+
+ # Add exclusions to CustomNonbondedForce.
+ for exception_index in range(nonbonded_force.getNumExceptions()):
+ iatom, jatom, chargeprod, sigma, epsilon = nonbonded_force.getExceptionParameters(exception_index)
+ reaction_field_force.addExclusion(iatom, jatom)
+
+ return reaction_field_force
+
+ @classmethod
+ def from_system(cls, system, switch_width=1.0*unit.angstrom):
+ """Copy constructor from the first OpenMM `NonbondedForce` in `system`.
+
+ If multiple `NonbondedForce`s are found, an exception is raised.
+
+ .. warning
+ This only creates the force object. The electrostatics in
+ `nonbonded_force` remains unmodified. Use the function
+ `replace_reaction_field` to correctly convert a system to
+ use an unshifted reaction field potential.
+
+ Parameters
+ ----------
+ system : simtk.openmm.System
+ The system containing the nonbonded force to copy.
+ switch_width : simtk.unit.Quantity
+ Switch width for electrostatics (units of distance).
+
+ Returns
+ -------
+ reaction_field_force : UnshiftedReactionFieldForce
+ The reaction field force.
+
+ Raises
+ ------
+ ValueError
+ If multiple `NonbondedForce`s are found in the system.
+
+ See Also
+ --------
+ UnshiftedReactionField.from_nonbonded_force
+
+ """
+ nonbonded_force = find_nonbonded_force(system)
+ return cls.from_nonbonded_force(nonbonded_force, switch_width)
+
+
+if __name__ == '__main__':
+ import doctest
+ doctest.testmod()
| Where to put replace_reaction_field?
Currently it's an instance method of `AbsoluteAlchemicalFactory`. The relative factory will surely want to use it too, so we should probably move it as a function at the `alchemy` module level at least. Are there any other plans for that feature? | choderalab/openmmtools | diff --git a/openmmtools/tests/test_alchemy.py b/openmmtools/tests/test_alchemy.py
index aac29c1..c6581c1 100644
--- a/openmmtools/tests/test_alchemy.py
+++ b/openmmtools/tests/test_alchemy.py
@@ -27,7 +27,7 @@ import nose
import scipy
from nose.plugins.attrib import attr
-from openmmtools import testsystems
+from openmmtools import testsystems, forces
from openmmtools.constants import kB
from openmmtools.alchemy import *
@@ -41,9 +41,7 @@ logger = logging.getLogger(__name__)
temperature = 300.0 * unit.kelvin # reference temperature
# MAX_DELTA = 0.01 * kB * temperature # maximum allowable deviation
MAX_DELTA = 1.0 * kB * temperature # maximum allowable deviation
-MAX_FORCE_RELATIVE_ERROR = 1.0e-6 # maximum allowable relative force error
GLOBAL_ENERGY_UNIT = unit.kilojoules_per_mole # controls printed units
-GLOBAL_FORCE_UNIT = unit.kilojoules_per_mole / unit.nanometers # controls printed units
GLOBAL_ALCHEMY_PLATFORM = None # This is used in every energy calculation.
# GLOBAL_ALCHEMY_PLATFORM = openmm.Platform.getPlatformByName('OpenCL') # DEBUG: Use OpenCL over CPU platform for testing since OpenCL is deterministic, while CPU is not
@@ -88,52 +86,6 @@ def compute_energy(system, positions, platform=None, force_group=-1):
return potential
-def compute_forces(system, positions, platform=None, force_group=-1):
- """Compute forces of the system in the given positions.
-
- Parameters
- ----------
- platform : simtk.openmm.Platform or None, optional
- If None, the global GLOBAL_ALCHEMY_PLATFORM will be used.
- force_group : int flag or set of int, optional
- Passed to the groups argument of Context.getState().
-
- """
- timestep = 1.0 * unit.femtoseconds
- integrator = openmm.VerletIntegrator(timestep)
- context = create_context(system, integrator, platform)
- context.setPositions(positions)
- state = context.getState(getForces=True, groups=force_group)
- forces = state.getForces(asNumpy=True)
- del context, integrator, state
- return forces
-
-
-def generate_new_positions(system, positions, platform=None, nsteps=50):
- """Generate new positions by taking a few steps from the old positions.
- Parameters
- ----------
- platform : simtk.openmm.Platform or None, optional
- If None, the global GLOBAL_ALCHEMY_PLATFORM will be used.
- nsteps : int, optional, default=50
- Number of steps of dynamics to take.
- Returns
- -------
- new_positions : simtk.unit.Quantity of shape [nparticles,3] with units compatible with distance
- New positions
- """
- temperature = 300 * unit.kelvin
- collision_rate = 90 / unit.picoseconds
- timestep = 1.0 * unit.femtoseconds
- integrator = openmm.LangevinIntegrator(temperature, collision_rate, timestep)
- context = create_context(system, integrator, platform)
- context.setPositions(positions)
- integrator.step(nsteps)
- new_positions = context.getState(getPositions=True).getPositions(asNumpy=True)
- del context, integrator
- return new_positions
-
-
def minimize(system, positions, platform=None, tolerance=1.0*unit.kilocalories_per_mole/unit.angstroms, maxIterations=50):
"""Minimize the energy of the given system.
@@ -162,7 +114,7 @@ def minimize(system, positions, platform=None, tolerance=1.0*unit.kilocalories_p
return minimized_positions
-def compute_energy_force(system, positions, force_name):
+def compute_force_energy(system, positions, force_name):
"""Compute the energy of the force with the given name."""
system = copy.deepcopy(system) # Copy to avoid modifications
force_name_index = 1
@@ -192,11 +144,11 @@ def assert_almost_equal(energy1, energy2, err_msg):
def dissect_nonbonded_energy(reference_system, positions, alchemical_atoms):
- """Dissect the contributions to NonbondedForce of the reference system by atom group
- and sterics/electrostatics.
+ """Dissect the nonbonded energy contributions of the reference system
+ by atom group and sterics/electrostatics.
- Note that this can only work on reference_system objects whose CutoffPeriodic forces
- have not been replaced by Custom*Force objects to set c_rf = 0.
+ This works also for systems objects whose CutoffPeriodic force
+ has been replaced by a CustomNonbondedForce to set c_rf = 0.
Parameters
----------
@@ -229,60 +181,76 @@ def dissect_nonbonded_energy(reference_system, positions, alchemical_atoms):
"""
- def turn_off(force, sterics=False, electrostatics=False,
+ def turn_off(system, sterics=False, electrostatics=False,
exceptions=False, only_atoms=frozenset()):
+ """Turn off sterics and/or electrostatics interactions.
+
+ If `exceptions` is True, only the exceptions are turned off.
+ Support also system that have gone through replace_reaction_field.
+ The `system` must have only nonbonded forces.
+ If `only_atoms` is specified, only the those atoms will be turned off.
+
+ """
if len(only_atoms) == 0: # if empty, turn off all particles
- only_atoms = set(range(force.getNumParticles()))
- e_coeff = 0.0 if sterics else 1.0
- c_coeff = 0.0 if electrostatics else 1.0
+ only_atoms = set(range(system.getNumParticles()))
+ epsilon_coeff = 0.0 if sterics else 1.0
+ charge_coeff = 0.0 if electrostatics else 1.0
+
+ # Only a Nonbonded and a CustomNonbonded (for RF) force should be here.
if exceptions: # Turn off exceptions
- for exception_index in range(force.getNumExceptions()):
- [iatom, jatom, charge, sigma, epsilon] = force.getExceptionParameters(exception_index)
+ nonbonded_force = system.getForces()[0] # NonbondedForce
+ for exception_index in range(nonbonded_force.getNumExceptions()):
+ iatom, jatom, charge, sigma, epsilon = nonbonded_force.getExceptionParameters(exception_index)
if iatom in only_atoms or jatom in only_atoms:
- force.setExceptionParameters(exception_index, iatom, jatom, c_coeff*charge,
- sigma, e_coeff*epsilon)
+ nonbonded_force.setExceptionParameters(exception_index, iatom, jatom,
+ charge_coeff*charge, sigma, epsilon_coeff*epsilon)
else: # Turn off particle interactions
- for particle_index in range(force.getNumParticles()):
- if particle_index in only_atoms:
- [charge, sigma, epsilon] = force.getParticleParameters(particle_index)
- force.setParticleParameters(particle_index, c_coeff*charge, sigma, e_coeff*epsilon)
-
- def restore_system(reference_system):
- system = copy.deepcopy(reference_system)
- nonbonded_force = system.getForces()[0]
- return system, nonbonded_force
+ for force in system.getForces():
+ for particle_index in range(force.getNumParticles()):
+ if particle_index in only_atoms:
+ # Convert tuple parameters to list to allow changes.
+ parameters = list(force.getParticleParameters(particle_index))
+ parameters[0] *= charge_coeff # charge
+ try: # CustomNonbondedForce
+ force.setParticleParameters(particle_index, parameters)
+ except TypeError: # NonbondedForce
+ parameters[2] *= epsilon_coeff # epsilon
+ force.setParticleParameters(particle_index, *parameters)
nonalchemical_atoms = set(range(reference_system.getNumParticles())).difference(alchemical_atoms)
- # Remove all forces but NonbondedForce and, if CutoffPeriodic is in use,
- # the CustomNonbondedForce and CustomBondForce used to replace it
+ # Remove all forces but NonbondedForce and eventually the
+ # CustomNonbondedForce used to model reaction field.
reference_system = copy.deepcopy(reference_system) # don't modify original system
forces_to_remove = list()
for force_index, force in enumerate(reference_system.getForces()):
- if force.__class__.__name__ != 'NonbondedForce':
- forces_to_remove.append(force_index)
- else:
- force.setForceGroup(0)
+ force.setForceGroup(0)
+ if isinstance(force, openmm.NonbondedForce):
force.setReciprocalSpaceForceGroup(30) # separate PME reciprocal from direct space
+ # We keep only CustomNonbondedForces that are not alchemically modified.
+ elif not (isinstance(force, openmm.CustomNonbondedForce) and
+ 'lambda' not in force.getEnergyFunction()):
+ forces_to_remove.append(force_index)
+
for force_index in reversed(forces_to_remove):
reference_system.removeForce(force_index)
- assert len(reference_system.getForces()) == 1
+ assert len(reference_system.getForces()) <= 2
# Compute particle interactions between different groups of atoms
# ----------------------------------------------------------------
- system, nonbonded_force = restore_system(reference_system)
+ system = copy.deepcopy(reference_system)
# Compute total energy from nonbonded interactions
tot_energy = compute_energy(system, positions)
tot_reciprocal_energy = compute_energy(system, positions, force_group={30})
# Compute contributions from particle sterics
- turn_off(nonbonded_force, sterics=True, only_atoms=alchemical_atoms)
+ turn_off(system, sterics=True, only_atoms=alchemical_atoms)
tot_energy_no_alchem_particle_sterics = compute_energy(system, positions)
- system, nonbonded_force = restore_system(reference_system) # Restore alchemical sterics
- turn_off(nonbonded_force, sterics=True, only_atoms=nonalchemical_atoms)
+ system = copy.deepcopy(reference_system) # Restore alchemical sterics
+ turn_off(system, sterics=True, only_atoms=nonalchemical_atoms)
tot_energy_no_nonalchem_particle_sterics = compute_energy(system, positions)
- turn_off(nonbonded_force, sterics=True)
+ turn_off(system, sterics=True)
tot_energy_no_particle_sterics = compute_energy(system, positions)
tot_particle_sterics = tot_energy - tot_energy_no_particle_sterics
@@ -291,15 +259,15 @@ def dissect_nonbonded_energy(reference_system, positions, alchemical_atoms):
na_particle_sterics = tot_particle_sterics - nn_particle_sterics - aa_particle_sterics
# Compute contributions from particle electrostatics
- system, nonbonded_force = restore_system(reference_system) # Restore sterics
- turn_off(nonbonded_force, electrostatics=True, only_atoms=alchemical_atoms)
+ system = copy.deepcopy(reference_system) # Restore sterics
+ turn_off(system, electrostatics=True, only_atoms=alchemical_atoms)
tot_energy_no_alchem_particle_electro = compute_energy(system, positions)
nn_reciprocal_energy = compute_energy(system, positions, force_group={30})
- system, nonbonded_force = restore_system(reference_system) # Restore alchemical electrostatics
- turn_off(nonbonded_force, electrostatics=True, only_atoms=nonalchemical_atoms)
+ system = copy.deepcopy(reference_system) # Restore alchemical electrostatics
+ turn_off(system, electrostatics=True, only_atoms=nonalchemical_atoms)
tot_energy_no_nonalchem_particle_electro = compute_energy(system, positions)
aa_reciprocal_energy = compute_energy(system, positions, force_group={30})
- turn_off(nonbonded_force, electrostatics=True)
+ turn_off(system, electrostatics=True)
tot_energy_no_particle_electro = compute_energy(system, positions)
na_reciprocal_energy = tot_reciprocal_energy - nn_reciprocal_energy - aa_reciprocal_energy
@@ -316,13 +284,13 @@ def dissect_nonbonded_energy(reference_system, positions, alchemical_atoms):
# -----------------------------------------------------
# Compute contributions from exceptions sterics
- system, nonbonded_force = restore_system(reference_system) # Restore particle interactions
- turn_off(nonbonded_force, sterics=True, exceptions=True, only_atoms=alchemical_atoms)
+ system = copy.deepcopy(reference_system) # Restore particle interactions
+ turn_off(system, sterics=True, exceptions=True, only_atoms=alchemical_atoms)
tot_energy_no_alchem_exception_sterics = compute_energy(system, positions)
- system, nonbonded_force = restore_system(reference_system) # Restore alchemical sterics
- turn_off(nonbonded_force, sterics=True, exceptions=True, only_atoms=nonalchemical_atoms)
+ system = copy.deepcopy(reference_system) # Restore alchemical sterics
+ turn_off(system, sterics=True, exceptions=True, only_atoms=nonalchemical_atoms)
tot_energy_no_nonalchem_exception_sterics = compute_energy(system, positions)
- turn_off(nonbonded_force, sterics=True, exceptions=True)
+ turn_off(system, sterics=True, exceptions=True)
tot_energy_no_exception_sterics = compute_energy(system, positions)
tot_exception_sterics = tot_energy - tot_energy_no_exception_sterics
@@ -331,13 +299,13 @@ def dissect_nonbonded_energy(reference_system, positions, alchemical_atoms):
na_exception_sterics = tot_exception_sterics - nn_exception_sterics - aa_exception_sterics
# Compute contributions from exceptions electrostatics
- system, nonbonded_force = restore_system(reference_system) # Restore exceptions sterics
- turn_off(nonbonded_force, electrostatics=True, exceptions=True, only_atoms=alchemical_atoms)
+ system = copy.deepcopy(reference_system) # Restore exceptions sterics
+ turn_off(system, electrostatics=True, exceptions=True, only_atoms=alchemical_atoms)
tot_energy_no_alchem_exception_electro = compute_energy(system, positions)
- system, nonbonded_force = restore_system(reference_system) # Restore alchemical electrostatics
- turn_off(nonbonded_force, electrostatics=True, exceptions=True, only_atoms=nonalchemical_atoms)
+ system = copy.deepcopy(reference_system) # Restore alchemical electrostatics
+ turn_off(system, electrostatics=True, exceptions=True, only_atoms=nonalchemical_atoms)
tot_energy_no_nonalchem_exception_electro = compute_energy(system, positions)
- turn_off(nonbonded_force, electrostatics=True, exceptions=True)
+ turn_off(system, electrostatics=True, exceptions=True)
tot_energy_no_exception_electro = compute_energy(system, positions)
tot_exception_electro = tot_energy - tot_energy_no_exception_electro
@@ -490,40 +458,6 @@ def compare_system_energies(reference_system, alchemical_system, alchemical_regi
raise Exception(err_msg.format(delta / unit.kilocalories_per_mole, MAX_DELTA / unit.kilocalories_per_mole))
-def compare_system_forces(reference_system, alchemical_system, positions, name="", platform=None):
- """Check that the forces of reference and modified systems are close.
-
- Parameters
- ---------
- reference_system : simtk.openmm.System
- Reference System
- alchemical_system : simtk.openmm.System
- System to compare to reference
- positions : simtk.unit.Quantity of shape [nparticles,3] with units of distance
- The particle positions to use
- name : str, optional, default=""
- System name to use for debugging.
- platform : simtk.openmm.Platform, optional, default=None
- If specified, use this platform
-
- """
- # Compute forces
- reference_force = compute_forces(reference_system, positions, platform=platform) / GLOBAL_FORCE_UNIT
- alchemical_force = compute_forces(alchemical_system, positions, platform=platform) / GLOBAL_FORCE_UNIT
-
- # Check that error is small.
- def magnitude(vec):
- return np.sqrt(np.mean(np.sum(vec**2, axis=1)))
-
- relative_error = magnitude(alchemical_force - reference_force) / magnitude(reference_force)
- if np.any(np.abs(relative_error) > MAX_FORCE_RELATIVE_ERROR):
- print("========")
- err_msg = ("Maximum allowable relative force error exceeded (was {:.8f}; allowed {:.8f}).\n"
- "alchemical_force = {:.8f}, reference_force = {:.8f}, difference = {:.8f}")
- raise Exception(err_msg.format(relative_error, MAX_FORCE_RELATIVE_ERROR, magnitude(alchemical_force),
- magnitude(reference_force), magnitude(alchemical_force-reference_force)))
-
-
def check_interacting_energy_components(reference_system, alchemical_system, alchemical_regions, positions):
"""Compare full and alchemically-modified system energies by energy component.
@@ -660,7 +594,7 @@ def check_interacting_energy_components(reference_system, alchemical_system, alc
for force_name in ['HarmonicBondForce', 'HarmonicAngleForce', 'PeriodicTorsionForce',
'GBSAOBCForce', 'CustomGBForce']:
alchemical_forces_energies = [energy for label, energy in energy_components.items() if force_name in label]
- reference_force_energy = compute_energy_force(reference_system, positions, force_name)
+ reference_force_energy = compute_force_energy(reference_system, positions, force_name)
# There should be no force in the alchemical system if force_name is missing from the reference
if reference_force_energy is None:
@@ -795,7 +729,7 @@ def check_noninteracting_energy_components(reference_system, alchemical_system,
if i not in alchemical_regions.alchemical_atoms]
# Compute reference force energy.
- reference_force_energy = compute_energy_force(system, non_alchemical_positions, force_name)
+ reference_force_energy = compute_force_energy(system, non_alchemical_positions, force_name)
assert_almost_equal(reference_force_energy, alchemical_energy,
'reference {}, alchemical {}'.format(reference_force_energy, alchemical_energy))
@@ -1224,12 +1158,7 @@ class TestAbsoluteAlchemicalFactory(object):
def generate_cases(cls):
"""Generate all test cases in cls.test_cases combinatorially."""
cls.test_cases = dict()
- switched_rf_factory = AbsoluteAlchemicalFactory(alchemical_rf_treatment='switched')
- shifted_rf_factory = AbsoluteAlchemicalFactory(alchemical_rf_treatment='shifted')
-
- # Create reference versions of all rf-containing systems with their switched counterparts with c_rf = 0
- for (name, testsystem) in cls.test_systems.items():
- setattr(testsystem, 'modified_rf_system', switched_rf_factory.replace_reaction_field(testsystem.system))
+ factory = AbsoluteAlchemicalFactory(alchemical_rf_treatment='switched')
# We generate all possible combinations of annihilate_sterics/electrostatics
# for each test system. We also annihilate bonds, angles and torsions every
@@ -1268,20 +1197,24 @@ class TestAbsoluteAlchemicalFactory(object):
softcore_c=1.0, softcore_d=1.0, softcore_e=1.0, softcore_f=1.0)
test_case_name += ', modified softcore parameters'
- # Also store shifted rf alchemical system for energy component comparisons
- shifted_rf_alchemical_system = shifted_rf_factory.create_alchemical_system(test_system.system, region)
- setattr(test_system, 'shifted_rf_alchemical_system', shifted_rf_alchemical_system)
+ # Pre-generate alchemical system.
+ alchemical_system = factory.create_alchemical_system(test_system.system, region)
- # Pre-generate alchemical system with switched rf
- alchemical_system = switched_rf_factory.create_alchemical_system(test_system.system, region)
+ # Add test case.
cls.test_cases[test_case_name] = (test_system, alchemical_system, region)
-
n_test_cases += 1
+ # If the test system uses reaction field replace reaction field
+ # of the reference system to allow comparisons.
+ nonbonded_force = forces.find_nonbonded_force(test_system.system)
+ if nonbonded_force.getNonbondedMethod() == openmm.NonbondedForce.CutoffPeriodic:
+ forcefactories.replace_reaction_field(test_system.system, return_copy=False,
+ switch_width=factory.switch_width)
+
def test_fully_interacting_energy(self):
"""Compare the energies of reference and fully interacting alchemical system."""
for test_name, (test_system, alchemical_system, alchemical_region) in self.test_cases.items():
- f = partial(compare_system_energies, test_system.modified_rf_system,
+ f = partial(compare_system_energies, test_system.system,
alchemical_system, alchemical_region, test_system.positions)
f.description = "Testing fully interacting energy of {}".format(test_name)
yield f
@@ -1294,25 +1227,6 @@ class TestAbsoluteAlchemicalFactory(object):
f.description = "Testing non-interacting energy of {}".format(test_name)
yield f
- def test_replace_reaction_field(self):
- """Check that replacing reaction-field electrostatics with Custom*Force
- yields minimal force differences with original system.
-
- Note that we cannot test for energy consistency or energy overlap because
- which atoms are within the cutoff will cause energy difference to vary wildly.
-
- """
- platform = openmm.Platform.getPlatformByName('Reference')
- factory = AbsoluteAlchemicalFactory(alchemical_rf_treatment='switched', switch_width=None)
- for test_name, (test_system, alchemical_system, alchemical_region) in self.test_cases.items():
- if (test_system.system.getNumForces() != test_system.modified_rf_system.getNumForces()):
- modified_rf_system = factory.replace_reaction_field(test_system.system)
- # Make sure positions are not at minimum
- positions = generate_new_positions(test_system.system, test_system.positions)
- f = partial(compare_system_forces, test_system.system, modified_rf_system, positions, name=test_name, platform=platform)
- f.description = "Testing replace_reaction_field on system {}".format(test_name)
- yield f
-
@attr('slow')
def test_fully_interacting_energy_components(self):
"""Test interacting state energy by force component."""
@@ -1322,10 +1236,8 @@ class TestAbsoluteAlchemicalFactory(object):
if 'Explicit' in test_name]
for test_name in test_cases_names:
test_system, alchemical_system, alchemical_region = self.test_cases[test_name]
- # We have to compare shifted rf system with original system because test cannot handle
- # re-coded reaction-field forces with c_rf = 0
- f = partial(check_interacting_energy_components, test_system.system, test_system.shifted_rf_alchemical_system,
- alchemical_region, test_system.positions)
+ f = partial(check_interacting_energy_components, test_system.system, alchemical_system,
+ alchemical_region, test_system.positions)
f.description = "Testing energy components of %s..." % test_name
yield f
@@ -1347,10 +1259,12 @@ class TestAbsoluteAlchemicalFactory(object):
for platform in platforms:
GLOBAL_ALCHEMY_PLATFORM = platform
for test_name, (test_system, alchemical_system, alchemical_region) in self.test_cases.items():
- f = partial(compare_system_energies, test_system.modified_rf_system, alchemical_system, alchemical_region, test_system.positions)
+ f = partial(compare_system_energies, test_system.system, alchemical_system,
+ alchemical_region, test_system.positions)
f.description = "Test fully interacting energy of {} on {}".format(test_name, platform.getName())
yield f
- f = partial(check_noninteracting_energy_components, alchemical_system, alchemical_region, test_system.positions)
+ f = partial(check_noninteracting_energy_components, test_system.system, alchemical_system,
+ alchemical_region, test_system.positions)
f.description = "Test non-interacting energy of {} on {}".format(test_name, platform.getName())
yield f
@@ -1364,7 +1278,7 @@ class TestAbsoluteAlchemicalFactory(object):
#cached_trajectory_filename = os.path.join(os.environ['HOME'], '.cache', 'alchemy', 'tests',
# test_name + '.pickle')
cached_trajectory_filename = None
- f = partial(overlap_check, test_system.modified_rf_system, alchemical_system, test_system.positions,
+ f = partial(overlap_check, test_system.system, alchemical_system, test_system.positions,
cached_trajectory_filename=cached_trajectory_filename, name=test_name)
f.description = "Testing reference/alchemical overlap for {}".format(test_name)
yield f
diff --git a/openmmtools/tests/test_cache.py b/openmmtools/tests/test_cache.py
index 8704183..ef8a8ef 100644
--- a/openmmtools/tests/test_cache.py
+++ b/openmmtools/tests/test_cache.py
@@ -260,19 +260,9 @@ class TestContextCache(object):
assert state1.is_context_compatible(context)
assert isinstance(integrator, type(self.verlet_2fs)), type(integrator)
- # When it has a choice, ContextCache picks the same context
- # in consecutive calls with same thermodynamic state.
- # First add another integrator so that ContextCache has 2 possible options.
- assert type(self.langevin_2fs_310k) is not type(default_integrator) # test precondition
- cache.get_context(state1, copy.deepcopy(self.langevin_2fs_310k))
- assert len(cache) == 3
- context, integrator = cache.get_context(state1)
- for i in range(5): # 5 attempts to make the test fail
- assert cache.get_context(state1)[0] is context
-
# With an incompatible state, a new Context is created.
cache.get_context(state2)
- assert len(cache) == 4
+ assert len(cache) == 3
def test_cache_capacity_ttl(self):
"""Check that the cache capacity and time_to_live work as expected."""
diff --git a/openmmtools/tests/test_forces.py b/openmmtools/tests/test_forces.py
new file mode 100644
index 0000000..d076da6
--- /dev/null
+++ b/openmmtools/tests/test_forces.py
@@ -0,0 +1,169 @@
+#!/usr/bin/env python
+
+# =============================================================================
+# MODULE DOCSTRING
+# =============================================================================
+
+"""
+Test Force classes in forces.py.
+
+"""
+
+# =============================================================================
+# GLOBAL IMPORTS
+# =============================================================================
+
+from functools import partial
+
+import numpy as np
+
+from openmmtools.forcefactories import *
+from openmmtools import testsystems
+
+
+# =============================================================================
+# CONSTANTS
+# =============================================================================
+
+MAX_FORCE_RELATIVE_ERROR = 1.0e-6 # maximum allowable relative force error
+GLOBAL_FORCE_UNIT = unit.kilojoules_per_mole / unit.nanometers # controls printed units
+GLOBAL_FORCES_PLATFORM = None # This is used in every calculation.
+
+
+# =============================================================================
+# TESTING UTILITIES
+# =============================================================================
+
+def create_context(system, integrator, platform=None):
+ """Create a Context.
+
+ If platform is None, GLOBAL_ALCHEMY_PLATFORM is used.
+
+ """
+ if platform is None:
+ platform = GLOBAL_FORCES_PLATFORM
+ if platform is not None:
+ context = openmm.Context(system, integrator, platform)
+ else:
+ context = openmm.Context(system, integrator)
+ return context
+
+
+# =============================================================================
+# UTILITY FUNCTIONS
+# =============================================================================
+
+def compute_forces(system, positions, platform=None, force_group=-1):
+ """Compute forces of the system in the given positions.
+
+ Parameters
+ ----------
+ platform : simtk.openmm.Platform or None, optional
+ If None, the global GLOBAL_ALCHEMY_PLATFORM will be used.
+ force_group : int flag or set of int, optional
+ Passed to the groups argument of Context.getState().
+
+ """
+ timestep = 1.0 * unit.femtoseconds
+ integrator = openmm.VerletIntegrator(timestep)
+ context = create_context(system, integrator, platform)
+ context.setPositions(positions)
+ state = context.getState(getForces=True, groups=force_group)
+ forces = state.getForces(asNumpy=True)
+ del context, integrator, state
+ return forces
+
+
+def compare_system_forces(reference_system, alchemical_system, positions, name="", platform=None):
+ """Check that the forces of reference and modified systems are close.
+
+ Parameters
+ ---------
+ reference_system : simtk.openmm.System
+ Reference System
+ alchemical_system : simtk.openmm.System
+ System to compare to reference
+ positions : simtk.unit.Quantity of shape [nparticles,3] with units of distance
+ The particle positions to use
+ name : str, optional, default=""
+ System name to use for debugging.
+ platform : simtk.openmm.Platform, optional, default=None
+ If specified, use this platform
+
+ """
+ # Compute forces
+ reference_force = compute_forces(reference_system, positions, platform=platform) / GLOBAL_FORCE_UNIT
+ alchemical_force = compute_forces(alchemical_system, positions, platform=platform) / GLOBAL_FORCE_UNIT
+
+ # Check that error is small.
+ def magnitude(vec):
+ return np.sqrt(np.mean(np.sum(vec**2, axis=1)))
+
+ relative_error = magnitude(alchemical_force - reference_force) / magnitude(reference_force)
+ if np.any(np.abs(relative_error) > MAX_FORCE_RELATIVE_ERROR):
+ err_msg = ("Maximum allowable relative force error exceeded (was {:.8f}; allowed {:.8f}).\n"
+ "alchemical_force = {:.8f}, reference_force = {:.8f}, difference = {:.8f}")
+ raise Exception(err_msg.format(relative_error, MAX_FORCE_RELATIVE_ERROR, magnitude(alchemical_force),
+ magnitude(reference_force), magnitude(alchemical_force-reference_force)))
+
+
+def generate_new_positions(system, positions, platform=None, nsteps=50):
+ """Generate new positions by taking a few steps from the old positions.
+
+ Parameters
+ ----------
+ platform : simtk.openmm.Platform or None, optional
+ If None, the global GLOBAL_ALCHEMY_PLATFORM will be used.
+ nsteps : int, optional, default=50
+ Number of steps of dynamics to take.
+
+ Returns
+ -------
+ new_positions : simtk.unit.Quantity of shape [nparticles,3] with units compatible with distance
+ New positions
+
+ """
+ temperature = 300 * unit.kelvin
+ collision_rate = 90 / unit.picoseconds
+ timestep = 1.0 * unit.femtoseconds
+ integrator = openmm.LangevinIntegrator(temperature, collision_rate, timestep)
+ context = create_context(system, integrator, platform)
+ context.setPositions(positions)
+ integrator.step(nsteps)
+ new_positions = context.getState(getPositions=True).getPositions(asNumpy=True)
+ del context, integrator
+ return new_positions
+
+
+# =============================================================================
+# TEST LRU CACHE
+# =============================================================================
+
+def test_replace_reaction_field():
+ """Check that replacing reaction-field electrostatics with Custom*Force
+ yields minimal force differences with original system.
+
+ Note that we cannot test for energy consistency or energy overlap because
+ which atoms are within the cutoff will cause energy difference to vary wildly.
+
+ """
+ test_cases = [
+ testsystems.AlanineDipeptideExplicit(nonbondedMethod=openmm.app.CutoffPeriodic),
+ testsystems.HostGuestExplicit(nonbondedMethod=openmm.app.CutoffPeriodic)
+ ]
+ platform = openmm.Platform.getPlatformByName('Reference')
+ for test_system in test_cases:
+ test_name = test_system.__class__.__name__
+
+ # Replace reaction field.
+ modified_rf_system = replace_reaction_field(test_system.system, switch_width=None)
+
+ # Make sure positions are not at minimum.
+ positions = generate_new_positions(test_system.system, test_system.positions)
+
+ # Test forces.
+ f = partial(compare_system_forces, test_system.system, modified_rf_system, positions,
+ name=test_name, platform=platform)
+ f.description = "Testing replace_reaction_field on system {}".format(test_name)
+ yield f
+
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 3
} | 0.11 | {
"env_vars": null,
"env_yml_path": [
"docs/environment.yml"
],
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": true,
"packages": "environment.yml",
"pip_packages": [
"nose",
"pymbar",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster @ file:///home/ktietz/src/ci/alabaster_1611921544520/work
attrs==22.2.0
Babel @ file:///tmp/build/80754af9/babel_1620871417480/work
brotlipy==0.7.0
certifi==2021.5.30
cffi @ file:///tmp/build/80754af9/cffi_1625814693874/work
charset-normalizer @ file:///tmp/build/80754af9/charset-normalizer_1630003229654/work
colorama @ file:///tmp/build/80754af9/colorama_1607707115595/work
cryptography @ file:///tmp/build/80754af9/cryptography_1635366128178/work
Cython @ file:///tmp/build/80754af9/cython_1626256602391/work
docutils @ file:///tmp/build/80754af9/docutils_1620827982266/work
idna @ file:///tmp/build/80754af9/idna_1637925883363/work
imagesize @ file:///tmp/build/80754af9/imagesize_1637939814114/work
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig==1.1.1
Jinja2 @ file:///opt/conda/conda-bld/jinja2_1647436528585/work
MarkupSafe @ file:///tmp/build/80754af9/markupsafe_1621528150516/work
nose==1.3.7
numexpr==2.8.1
numpy @ file:///tmp/build/80754af9/numpy_and_numpy_base_1603483703303/work
numpydoc @ file:///tmp/build/80754af9/numpydoc_1605117425582/work
OpenMM==7.4.2
-e git+https://github.com/choderalab/openmmtools.git@9e299792b6ae45acb1b5cd6a4033a4a15df1dd75#egg=openmmtools
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pandas==1.1.5
ParmEd==3.2.0
pluggy==1.0.0
py==1.11.0
pycparser @ file:///tmp/build/80754af9/pycparser_1636541352034/work
Pygments @ file:///opt/conda/conda-bld/pygments_1644249106324/work
pymbar==4.0.3
pyOpenSSL @ file:///opt/conda/conda-bld/pyopenssl_1643788558760/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
PySocks @ file:///tmp/build/80754af9/pysocks_1605305763431/work
pytest==7.0.1
python-dateutil @ file:///tmp/build/80754af9/python-dateutil_1626374649649/work
pytz==2021.3
requests @ file:///opt/conda/conda-bld/requests_1641824580448/work
scipy @ file:///tmp/build/80754af9/scipy_1597686635649/work
six @ file:///tmp/build/80754af9/six_1644875935023/work
snowballstemmer @ file:///tmp/build/80754af9/snowballstemmer_1637937080595/work
Sphinx @ file:///opt/conda/conda-bld/sphinx_1643644169832/work
sphinxcontrib-applehelp @ file:///home/ktietz/src/ci/sphinxcontrib-applehelp_1611920841464/work
sphinxcontrib-devhelp @ file:///home/ktietz/src/ci/sphinxcontrib-devhelp_1611920923094/work
sphinxcontrib-htmlhelp @ file:///tmp/build/80754af9/sphinxcontrib-htmlhelp_1623945626792/work
sphinxcontrib-jsmath @ file:///home/ktietz/src/ci/sphinxcontrib-jsmath_1611920942228/work
sphinxcontrib-qthelp @ file:///home/ktietz/src/ci/sphinxcontrib-qthelp_1611921055322/work
sphinxcontrib-serializinghtml @ file:///tmp/build/80754af9/sphinxcontrib-serializinghtml_1624451540180/work
tomli==1.2.3
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
urllib3 @ file:///opt/conda/conda-bld/urllib3_1643638302206/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: openmmtools
channels:
- omnia
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- alabaster=0.7.12=pyhd3eb1b0_0
- babel=2.9.1=pyhd3eb1b0_0
- blas=1.0=openblas
- brotlipy=0.7.0=py36h27cfd23_1003
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- cffi=1.14.6=py36h400218f_0
- charset-normalizer=2.0.4=pyhd3eb1b0_0
- colorama=0.4.4=pyhd3eb1b0_0
- cryptography=35.0.0=py36hd23ed53_0
- cython=0.29.24=py36h295c915_0
- docutils=0.17.1=py36h06a4308_1
- fftw3f=3.3.4=2
- idna=3.3=pyhd3eb1b0_0
- imagesize=1.3.0=pyhd3eb1b0_0
- importlib-metadata=4.8.1=py36h06a4308_0
- jinja2=3.0.3=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgfortran-ng=7.5.0=ha8ba4b0_17
- libgfortran4=7.5.0=ha8ba4b0_17
- libgomp=11.2.0=h1234567_1
- libopenblas=0.3.18=hf726d26_0
- libstdcxx-ng=11.2.0=h1234567_1
- markupsafe=2.0.1=py36h27cfd23_0
- ncurses=6.4=h6a678d5_0
- numpy=1.19.2=py36h6163131_0
- numpy-base=1.19.2=py36h75fe3a5_0
- numpydoc=1.1.0=pyhd3eb1b0_1
- openmm=7.4.2=py36_cuda101_rc_1
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pandas=1.1.5=py36ha9443f7_0
- parmed=3.2.0=py36_0
- pip=21.2.2=py36h06a4308_0
- pycparser=2.21=pyhd3eb1b0_0
- pygments=2.11.2=pyhd3eb1b0_0
- pyopenssl=22.0.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pysocks=1.7.1=py36h06a4308_0
- python=3.6.13=h12debd9_1
- python-dateutil=2.8.2=pyhd3eb1b0_0
- pytz=2021.3=pyhd3eb1b0_0
- readline=8.2=h5eee18b_0
- requests=2.27.1=pyhd3eb1b0_0
- scipy=1.5.2=py36habc2bb6_0
- setuptools=58.0.4=py36h06a4308_0
- six=1.16.0=pyhd3eb1b0_1
- snowballstemmer=2.2.0=pyhd3eb1b0_0
- sphinx=4.4.0=pyhd3eb1b0_0
- sphinxcontrib-applehelp=1.0.2=pyhd3eb1b0_0
- sphinxcontrib-devhelp=1.0.2=pyhd3eb1b0_0
- sphinxcontrib-htmlhelp=2.0.0=pyhd3eb1b0_0
- sphinxcontrib-jsmath=1.0.1=pyhd3eb1b0_0
- sphinxcontrib-qthelp=1.0.3=pyhd3eb1b0_0
- sphinxcontrib-serializinghtml=1.1.5=pyhd3eb1b0_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- typing_extensions=4.1.1=pyh06a4308_0
- urllib3=1.26.8=pyhd3eb1b0_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- iniconfig==1.1.1
- nose==1.3.7
- numexpr==2.8.1
- pluggy==1.0.0
- py==1.11.0
- pymbar==4.0.3
- pytest==7.0.1
- tomli==1.2.3
prefix: /opt/conda/envs/openmmtools
| [
"openmmtools/tests/test_alchemy.py::test_resolve_alchemical_region",
"openmmtools/tests/test_alchemy.py::TestAlchemicalState::test_constructor",
"openmmtools/tests/test_alchemy.py::TestAlchemicalState::test_from_system_constructor",
"openmmtools/tests/test_alchemy.py::TestAlchemicalState::test_equality_operator",
"openmmtools/tests/test_alchemy.py::TestAlchemicalState::test_apply_to_system",
"openmmtools/tests/test_alchemy.py::TestAlchemicalState::test_check_system_consistency",
"openmmtools/tests/test_alchemy.py::TestAlchemicalState::test_apply_to_context",
"openmmtools/tests/test_alchemy.py::TestAlchemicalState::test_standardize_system",
"openmmtools/tests/test_alchemy.py::TestAlchemicalState::test_alchemical_functions",
"openmmtools/tests/test_alchemy.py::TestAlchemicalState::test_constructor_compound_state",
"openmmtools/tests/test_alchemy.py::TestAlchemicalState::test_lambda_properties_compound_state",
"openmmtools/tests/test_alchemy.py::TestAlchemicalState::test_set_system_compound_state",
"openmmtools/tests/test_alchemy.py::TestAlchemicalState::test_method_compatibility_compound_state",
"openmmtools/tests/test_alchemy.py::TestAlchemicalState::test_serialization",
"openmmtools/tests/test_cache.py::test_lru_cache_cache_entry_unpacking",
"openmmtools/tests/test_cache.py::test_lru_cache_maximum_capacity",
"openmmtools/tests/test_cache.py::test_lru_cache_eliminate_least_recently_used",
"openmmtools/tests/test_cache.py::test_lru_cache_access_to_live",
"openmmtools/tests/test_cache.py::test_lru_cache_capacity_property",
"openmmtools/tests/test_cache.py::test_lru_cache_time_to_live_property",
"openmmtools/tests/test_cache.py::TestContextCache::test_copy_integrator_state",
"openmmtools/tests/test_cache.py::TestContextCache::test_generate_compatible_context_key",
"openmmtools/tests/test_cache.py::TestContextCache::test_generate_incompatible_context_key",
"openmmtools/tests/test_cache.py::TestContextCache::test_get_compatible_context",
"openmmtools/tests/test_cache.py::TestContextCache::test_get_incompatible_context",
"openmmtools/tests/test_cache.py::TestContextCache::test_get_context_any_integrator",
"openmmtools/tests/test_cache.py::TestContextCache::test_cache_capacity_ttl",
"openmmtools/tests/test_cache.py::TestContextCache::test_platform_property"
]
| []
| []
| []
| MIT License | 1,453 | [
"docs/forces.rst",
"openmmtools/forcefactories.py",
"openmmtools/cache.py",
"docs/forcefactories.rst",
"openmmtools/forces.py",
"openmmtools/alchemy.py",
"docs/index.rst"
]
| [
"docs/forces.rst",
"openmmtools/forcefactories.py",
"openmmtools/cache.py",
"docs/forcefactories.rst",
"openmmtools/forces.py",
"openmmtools/alchemy.py",
"docs/index.rst"
]
|
|
google__mobly-258 | c9ba28477626c6f7d5365bec019646f915a5bd2d | 2017-07-12 18:30:24 | 9bb2ab41518a2f037178888f9e606fc42394ffb0 | diff --git a/mobly/controllers/android_device_lib/callback_handler.py b/mobly/controllers/android_device_lib/callback_handler.py
index 4207f47..1ab67d8 100644
--- a/mobly/controllers/android_device_lib/callback_handler.py
+++ b/mobly/controllers/android_device_lib/callback_handler.py
@@ -83,13 +83,14 @@ class CallbackHandler(object):
(timeout, MAX_TIMEOUT))
timeout *= 1000 # convert to milliseconds for java side
try:
- raw_event = self._event_client.eventWaitAndGet(self._id,
- event_name, timeout)
+ raw_event = self._event_client.eventWaitAndGet(
+ self._id, event_name, timeout)
except Exception as e:
if 'EventSnippetException: timeout.' in str(e):
raise TimeoutError(
- 'Timeout waiting for event "%s" triggered by %s (%s).' %
- (event_name, self._method_name, self._id))
+ 'Timed out after waiting %ss for event "%s" triggered by'
+ ' %s (%s).' % (timeout, event_name, self._method_name,
+ self._id))
raise
return snippet_event.from_dict(raw_event)
diff --git a/mobly/controllers/android_device_lib/snippet_client.py b/mobly/controllers/android_device_lib/snippet_client.py
index 3d85e40..f7f473b 100644
--- a/mobly/controllers/android_device_lib/snippet_client.py
+++ b/mobly/controllers/android_device_lib/snippet_client.py
@@ -24,15 +24,27 @@ _INSTRUMENTATION_RUNNER_PACKAGE = (
'com.google.android.mobly.snippet.SnippetRunner')
# TODO(adorokhine): delete this in Mobly 1.6 when snippet v0 support is removed.
-_LAUNCH_CMD_V0 = ('am instrument -w -e action start -e port %s %s/' +
+_LAUNCH_CMD_V0 = ('%s am instrument -w -e action start -e port %s %s/' +
_INSTRUMENTATION_RUNNER_PACKAGE)
_LAUNCH_CMD_V1 = (
- 'am instrument -w -e action start %s/' + _INSTRUMENTATION_RUNNER_PACKAGE)
+ '%s am instrument -w -e action start %s/' + _INSTRUMENTATION_RUNNER_PACKAGE)
_STOP_CMD = (
'am instrument -w -e action stop %s/' + _INSTRUMENTATION_RUNNER_PACKAGE)
+# Test that uses UiAutomation requires the shell session to be maintained while
+# test is in progress. However, this requirement does not hold for the test that
+# deals with device USB disconnection (Once device disconnects, the shell
+# session that started the instrument ends, and UiAutomation fails with error:
+# "UiAutomation not connected"). To keep the shell session and redirect
+# stdin/stdout/stderr, use "setsid" or "nohup" while launching the
+# instrumentation test. Because these commands may not be available in every
+# android system, try to use them only if exists.
+_SETSID_COMMAND = 'setsid'
+
+_NOHUP_COMMAND = 'nohup'
+
# Maximum time to wait for a v0 snippet to start on the device (10 minutes).
# TODO(adorokhine): delete this in Mobly 1.6 when snippet v0 support is removed.
_APP_START_WAIT_TIME_V0 = 10 * 60
@@ -60,7 +72,7 @@ class SnippetClient(jsonrpc_client_base.JsonRpcClientBase):
def __init__(self, package, adb_proxy, log=logging.getLogger()):
"""Initializes a SnippetClient.
-
+
Args:
package: (str) The package name of the apk where the snippets are
defined.
@@ -77,13 +89,14 @@ class SnippetClient(jsonrpc_client_base.JsonRpcClientBase):
"""Overrides superclass. Launches a snippet app and connects to it."""
self._check_app_installed()
+ persists_shell_cmd = self._get_persist_command()
# Try launching the app with the v1 protocol. If that fails, fall back
# to v0 for compatibility. Use info here so people know exactly what's
# happening here, which is helpful since they need to create their own
# instrumentations and manifest.
self.log.info('Launching snippet apk %s with protocol v1',
self.package)
- cmd = _LAUNCH_CMD_V1 % self.package
+ cmd = _LAUNCH_CMD_V1 % (persists_shell_cmd, self.package)
start_time = time.time()
self._proc = self._do_start_app(cmd)
@@ -106,7 +119,7 @@ class SnippetClient(jsonrpc_client_base.JsonRpcClientBase):
# Reuse the host port as the device port in v0 snippet. This isn't
# safe in general, but the protocol is deprecated.
self.device_port = self.host_port
- cmd = _LAUNCH_CMD_V0 % (self.device_port, self.package)
+ cmd = _LAUNCH_CMD_V0 % (persists_shell_cmd, self.device_port, self.package)
self._proc = self._do_start_app(cmd)
self._connect_to_v0()
self._launch_version = 'v0'
@@ -291,3 +304,17 @@ class SnippetClient(jsonrpc_client_base.JsonRpcClientBase):
return line
self.log.debug('Discarded line from instrumentation output: "%s"',
line)
+
+ def _get_persist_command(self):
+ """Check availability and return path of command if available."""
+ for command in [_SETSID_COMMAND, _NOHUP_COMMAND]:
+ try:
+ if command in self._adb.shell('which %s' % command):
+ return command
+ except adb.AdbError:
+ continue
+ self.log.warning('No %s and %s commands available to launch instrument '
+ 'persistently, tests that depend on UiAutomator and '
+ 'at the same time performs USB disconnection may fail',
+ _SETSID_COMMAND, _NOHUP_COMMAND)
+ return ''
| Exceptions from `CallbackHandler` should include timeout value
Right now some timeout exceptions thrown by `CallbackHandler` do not include how long the timeout was, making debugging more difficult. | google/mobly | diff --git a/mobly/base_test.py b/mobly/base_test.py
index e85551a..b3ccf43 100644
--- a/mobly/base_test.py
+++ b/mobly/base_test.py
@@ -553,6 +553,7 @@ class BaseTestClass(object):
# Skip all tests peacefully.
e.details = 'setup_class aborted due to: %s' % e.details
self._skip_remaining_tests(e)
+ self._safe_exec_func(self.teardown_class)
return self.results
except Exception as e:
# Setup class failed for unknown reasons.
@@ -564,9 +565,8 @@ class BaseTestClass(object):
self._exec_procedure_func(self._on_fail, class_record)
self.results.add_class_error(class_record)
self._skip_remaining_tests(e)
- return self.results
- finally:
self._safe_exec_func(self.teardown_class)
+ return self.results
# Run tests in order.
try:
for test_name, test_method in tests:
diff --git a/tests/mobly/base_test_test.py b/tests/mobly/base_test_test.py
index 725dcda..db615dd 100755
--- a/tests/mobly/base_test_test.py
+++ b/tests/mobly/base_test_test.py
@@ -38,7 +38,6 @@ class SomeError(Exception):
class BaseTestTest(unittest.TestCase):
-
def setUp(self):
self.mock_test_cls_configs = config_parser.TestRunConfig()
self.mock_test_cls_configs.log_path = '/tmp'
@@ -566,6 +565,25 @@ class BaseTestTest(unittest.TestCase):
("Error 0, Executed 0, Failed 0, Passed 0, "
"Requested 3, Skipped 3"))
+ def test_setup_and_teardown_execution_count(self):
+ class MockBaseTest(base_test.BaseTestClass):
+ def test_func(self):
+ pass
+
+ def test_func2(self):
+ pass
+
+ bt_cls = MockBaseTest(self.mock_test_cls_configs)
+ bt_cls.setup_class = mock.Mock()
+ bt_cls.teardown_class = mock.Mock()
+ bt_cls.setup_test = mock.Mock()
+ bt_cls.teardown_test = mock.Mock()
+ bt_cls.run()
+ self.assertEqual(bt_cls.setup_class.call_count, 1)
+ self.assertEqual(bt_cls.teardown_class.call_count, 1)
+ self.assertEqual(bt_cls.setup_test.call_count, 2)
+ self.assertEqual(bt_cls.teardown_test.call_count, 2)
+
def test_abort_class_in_test(self):
class MockBaseTest(base_test.BaseTestClass):
def test_1(self):
diff --git a/tests/mobly/controllers/android_device_lib/callback_handler_test.py b/tests/mobly/controllers/android_device_lib/callback_handler_test.py
index a701d51..f288ef3 100755
--- a/tests/mobly/controllers/android_device_lib/callback_handler_test.py
+++ b/tests/mobly/controllers/android_device_lib/callback_handler_test.py
@@ -34,6 +34,7 @@ MOCK_RAW_EVENT = {
class CallbackHandlerTest(unittest.TestCase):
"""Unit tests for mobly.controllers.android_device_lib.callback_handler.
"""
+
def test_timeout_value(self):
self.assertGreaterEqual(jsonrpc_client_base._SOCKET_READ_TIMEOUT,
callback_handler.MAX_TIMEOUT)
@@ -64,9 +65,9 @@ class CallbackHandlerTest(unittest.TestCase):
event_client=mock_event_client,
ret_value=None,
method_name=None)
- expected_msg = 'Timeout waiting for event "ha" .*'
+ expected_msg = 'Timed out after waiting .*s for event "ha" .*'
with self.assertRaisesRegex(callback_handler.TimeoutError,
- expected_msg):
+ expected_msg):
handler.waitAndGet('ha')
def test_wait_for_event(self):
@@ -101,7 +102,7 @@ class CallbackHandlerTest(unittest.TestCase):
return False
with self.assertRaisesRegex(callback_handler.TimeoutError,
- expected_msg):
+ expected_msg):
handler.waitForEvent('AsyncTaskResult', some_condition, 0.01)
diff --git a/tests/mobly/controllers/android_device_lib/snippet_client_test.py b/tests/mobly/controllers/android_device_lib/snippet_client_test.py
index 010064c..beb9262 100755
--- a/tests/mobly/controllers/android_device_lib/snippet_client_test.py
+++ b/tests/mobly/controllers/android_device_lib/snippet_client_test.py
@@ -18,6 +18,7 @@ from builtins import bytes
import mock
from future.tests.base import unittest
+from mobly.controllers.android_device_lib import adb
from mobly.controllers.android_device_lib import jsonrpc_client_base
from mobly.controllers.android_device_lib import snippet_client
from tests.lib import jsonrpc_client_test_base
@@ -51,6 +52,8 @@ class MockAdbProxy(object):
return bytes('instrumentation:{p}/{r} (target={p})'.format(
p=MOCK_PACKAGE_NAME,
r=snippet_client._INSTRUMENTATION_RUNNER_PACKAGE), 'utf-8')
+ elif 'which' in params:
+ return ''
def __getattr__(self, name):
"""All calls to the none-existent functions in adb proxy would
@@ -175,6 +178,73 @@ class SnippetClientTest(jsonrpc_client_test_base.JsonRpcClientTestBase):
client.start_app_and_connect()
self.assertEqual(123, client.device_port)
+ @mock.patch('mobly.controllers.android_device_lib.snippet_client.'
+ 'SnippetClient._do_start_app')
+ @mock.patch('mobly.controllers.android_device_lib.snippet_client.'
+ 'SnippetClient._check_app_installed')
+ @mock.patch('mobly.controllers.android_device_lib.snippet_client.'
+ 'SnippetClient._read_protocol_line')
+ @mock.patch('mobly.controllers.android_device_lib.snippet_client.'
+ 'SnippetClient._connect_to_v1')
+ @mock.patch('mobly.controllers.android_device_lib.snippet_client.'
+ 'utils.get_available_host_port')
+ def test_snippet_start_app_and_connect_v1_persistent_session(
+ self, mock_get_port, mock_connect_to_v1, mock_read_protocol_line,
+ mock_check_app_installed, mock_do_start_app):
+
+ def _mocked_shell(arg):
+ if 'setsid' in arg:
+ raise adb.AdbError('cmd', 'stdout', 'stderr', 'ret_code')
+ else:
+ return 'nohup'
+
+ mock_get_port.return_value = 123
+ mock_read_protocol_line.side_effect = [
+ 'SNIPPET START, PROTOCOL 1 234',
+ 'SNIPPET SERVING, PORT 1234',
+ 'SNIPPET START, PROTOCOL 1 234',
+ 'SNIPPET SERVING, PORT 1234',
+ 'SNIPPET START, PROTOCOL 1 234',
+ 'SNIPPET SERVING, PORT 1234',
+ ]
+
+ # Test 'setsid' exists
+ client = self._make_client()
+ client._adb.shell = mock.Mock(return_value='setsid')
+ client.start_app_and_connect()
+ cmd_setsid = '%s am instrument -w -e action start %s/%s' % (
+ snippet_client._SETSID_COMMAND,
+ MOCK_PACKAGE_NAME,
+ snippet_client._INSTRUMENTATION_RUNNER_PACKAGE)
+ mock_do_start_app.assert_has_calls(mock.call(cmd_setsid))
+
+ # Test 'setsid' does not exist, but 'nohup' exsits
+ client = self._make_client()
+ client._adb.shell = _mocked_shell
+ client.start_app_and_connect()
+ cmd_nohup = '%s am instrument -w -e action start %s/%s' % (
+ snippet_client._NOHUP_COMMAND,
+ MOCK_PACKAGE_NAME,
+ snippet_client._INSTRUMENTATION_RUNNER_PACKAGE)
+ mock_do_start_app.assert_has_calls([
+ mock.call(cmd_setsid),
+ mock.call(cmd_nohup)
+ ])
+
+ # Test both 'setsid' and 'nohup' do not exist
+ client._adb.shell = mock.Mock(
+ side_effect=adb.AdbError('cmd', 'stdout', 'stderr', 'ret_code'))
+ client = self._make_client()
+ client.start_app_and_connect()
+ cmd_not_persist = ' am instrument -w -e action start %s/%s' % (
+ MOCK_PACKAGE_NAME,
+ snippet_client._INSTRUMENTATION_RUNNER_PACKAGE)
+ mock_do_start_app.assert_has_calls([
+ mock.call(cmd_setsid),
+ mock.call(cmd_nohup),
+ mock.call(cmd_not_persist)
+ ])
+
@mock.patch('socket.create_connection')
@mock.patch('mobly.controllers.android_device_lib.snippet_client.'
'utils.start_standing_subprocess')
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 2
} | 1.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y adb python3-setuptools"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | coverage==7.8.0
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
execnet==2.1.1
future==1.0.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
-e git+https://github.com/google/mobly.git@c9ba28477626c6f7d5365bec019646f915a5bd2d#egg=mobly
mock==1.0.1
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
portpicker==1.6.0
psutil==7.0.0
pytest @ file:///croot/pytest_1738938843180/work
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
pytz==2025.2
PyYAML==6.0.2
timeout-decorator==0.5.0
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
typing_extensions==4.13.0
| name: mobly
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.8.0
- execnet==2.1.1
- future==1.0.0
- mock==1.0.1
- portpicker==1.6.0
- psutil==7.0.0
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- pytz==2025.2
- pyyaml==6.0.2
- timeout-decorator==0.5.0
- typing-extensions==4.13.0
prefix: /opt/conda/envs/mobly
| [
"tests/mobly/controllers/android_device_lib/callback_handler_test.py::CallbackHandlerTest::test_wait_and_get_timeout",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect_v1_persistent_session"
]
| []
| [
"tests/mobly/base_test_test.py::BaseTestTest::test_abort_class_in_test",
"tests/mobly/base_test_test.py::BaseTestTest::test_abort_setup_class",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_equal_fail",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_equal_fail_with_msg",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_equal_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_fail_with_noop",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_fail_with_wrong_error",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_fail_with_wrong_regex",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_regex_fail_with_noop",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_regex_fail_with_wrong_error",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_regex_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_true",
"tests/mobly/base_test_test.py::BaseTestTest::test_both_teardown_and_test_body_raise_exceptions",
"tests/mobly/base_test_test.py::BaseTestTest::test_cli_test_selection_fail_by_convention",
"tests/mobly/base_test_test.py::BaseTestTest::test_cli_test_selection_override_self_tests_list",
"tests/mobly/base_test_test.py::BaseTestTest::test_current_test_name",
"tests/mobly/base_test_test.py::BaseTestTest::test_default_execution_of_all_tests",
"tests/mobly/base_test_test.py::BaseTestTest::test_explicit_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_explicit_pass_but_teardown_test_raises_an_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_fail",
"tests/mobly/base_test_test.py::BaseTestTest::test_failure_in_procedure_functions_is_recorded",
"tests/mobly/base_test_test.py::BaseTestTest::test_failure_to_call_procedure_function_is_recorded",
"tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_call_outside_of_setup_generated_tests",
"tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_dup_test_name",
"tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_run",
"tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_selected_run",
"tests/mobly/base_test_test.py::BaseTestTest::test_implicit_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_missing_requested_test_func",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_teardown_test_fails",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_test_fails",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_test_setup_fails_by_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_raise_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_pass_raise_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_self_tests_list",
"tests/mobly/base_test_test.py::BaseTestTest::test_self_tests_list_fail_by_convention",
"tests/mobly/base_test_test.py::BaseTestTest::test_setup_and_teardown_execution_count",
"tests/mobly/base_test_test.py::BaseTestTest::test_setup_class_fail_by_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_setup_test_fail_by_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_setup_test_fail_by_test_signal",
"tests/mobly/base_test_test.py::BaseTestTest::test_skip",
"tests/mobly/base_test_test.py::BaseTestTest::test_skip_if",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_assert_fail",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_executed_if_setup_test_fails",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_executed_if_test_fails",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_executed_if_test_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_raise_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_uncaught_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_basic",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_None",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_overwrite",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_overwrite_by_optional_param_list",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_overwrite_by_required_param_list",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_optional",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_optional_missing",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_optional_with_default",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_required",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_required_missing",
"tests/mobly/controllers/android_device_lib/callback_handler_test.py::CallbackHandlerTest::test_event_dict_to_snippet_event",
"tests/mobly/controllers/android_device_lib/callback_handler_test.py::CallbackHandlerTest::test_timeout_value",
"tests/mobly/controllers/android_device_lib/callback_handler_test.py::CallbackHandlerTest::test_wait_for_event",
"tests/mobly/controllers/android_device_lib/callback_handler_test.py::CallbackHandlerTest::test_wait_for_event_negative",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_check_app_installed_fail_app_not_installed",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_check_app_installed_fail_not_instrumented",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_check_app_installed_fail_target_not_installed",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_check_app_installed_normal",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_restore_event_client",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect_no_valid_line",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect_unknown_protocol",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect_v0",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect_v0_header_junk",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect_v1",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect_v1_header_junk",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_event_client"
]
| []
| Apache License 2.0 | 1,454 | [
"mobly/controllers/android_device_lib/snippet_client.py",
"mobly/controllers/android_device_lib/callback_handler.py"
]
| [
"mobly/controllers/android_device_lib/snippet_client.py",
"mobly/controllers/android_device_lib/callback_handler.py"
]
|
|
elastic__elasticsearch-py-618 | 0397527d12bcb43274ce9054111c5f7f673a7ad6 | 2017-07-13 02:52:29 | a03504cbb2af34490e31c7b912165028a1535381 | davidt99: @jmcarp Any plan to fix the review comments? Most of the driver requests (if not all) doesn't state content type, resulting warning logs in elastic 5.5.
The header support you wrote will be allow adding content type to all the request, thus fixing the warning.
HonzaKral: @davidt99 All requests are anotated with content-type accepted by elasticsearch since d862c8a (released in `5.2`). You can also set headers globally by specifying `headers={'custom': 'header'}` when instantiating `Elasticsearch`.
davidt99: @HonzaKral Oh, I was using 5.1 and looked for similar issue. Thanks.
jmcarp: I cleaned this up based on your suggestions @HonzaKral; let me know if this needs more revisions. I'm not sure why the tests are failing, but it looks like they're failing in the same way as `master`.
jmcarp: I rebased on master and dropped the deprecated methods, and reverted whitespace changes. This is ready for another look when you have time @HonzaKral.
fxdgear: This LGTM. Can you please rebase off master and push again? I had to change the version of ES that travis uses. | diff --git a/elasticsearch/client/__init__.py b/elasticsearch/client/__init__.py
index 59858549..834f1d86 100644
--- a/elasticsearch/client/__init__.py
+++ b/elasticsearch/client/__init__.py
@@ -1127,7 +1127,8 @@ class Elasticsearch(object):
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
return self.transport.perform_request('POST', _make_path(index,
- doc_type, '_bulk'), params=params, body=self._bulk_body(body))
+ doc_type, '_bulk'), params=params, body=self._bulk_body(body),
+ headers={'content-type': 'application/x-ndjson'})
@query_params('max_concurrent_searches', 'pre_filter_shard_size',
'search_type', 'typed_keys')
@@ -1159,7 +1160,8 @@ class Elasticsearch(object):
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
return self.transport.perform_request('GET', _make_path(index,
- doc_type, '_msearch'), params=params, body=self._bulk_body(body))
+ doc_type, '_msearch'), params=params, body=self._bulk_body(body),
+ headers={'content-type': 'application/x-ndjson'})
@query_params('field_statistics', 'fields', 'offsets', 'parent', 'payloads',
'positions', 'preference', 'realtime', 'routing', 'term_statistics',
@@ -1363,7 +1365,8 @@ class Elasticsearch(object):
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
return self.transport.perform_request('GET', _make_path(index, doc_type,
- '_msearch', 'template'), params=params, body=self._bulk_body(body))
+ '_msearch', 'template'), params=params, body=self._bulk_body(body),
+ headers={'content-type': 'application/x-ndjson'})
@query_params('allow_no_indices', 'expand_wildcards', 'fields',
'ignore_unavailable')
@@ -1387,3 +1390,4 @@ class Elasticsearch(object):
"""
return self.transport.perform_request('GET', _make_path(index,
'_field_caps'), params=params, body=body)
+
diff --git a/elasticsearch/connection/http_requests.py b/elasticsearch/connection/http_requests.py
index 59dd381c..b98e7772 100644
--- a/elasticsearch/connection/http_requests.py
+++ b/elasticsearch/connection/http_requests.py
@@ -61,13 +61,13 @@ class RequestsHttpConnection(Connection):
warnings.warn(
'Connecting to %s using SSL with verify_certs=False is insecure.' % self.base_url)
- def perform_request(self, method, url, params=None, body=None, timeout=None, ignore=()):
+ def perform_request(self, method, url, params=None, body=None, timeout=None, ignore=(), headers=None):
url = self.base_url + url
if params:
url = '%s?%s' % (url, urlencode(params or {}))
start = time.time()
- request = requests.Request(method=method, url=url, data=body)
+ request = requests.Request(method=method, headers=headers, url=url, data=body)
prepared_request = self.session.prepare_request(request)
settings = self.session.merge_environment_settings(prepared_request.url, {}, None, None, None)
send_kwargs = {'timeout': timeout or self.timeout}
diff --git a/elasticsearch/connection/http_urllib3.py b/elasticsearch/connection/http_urllib3.py
index 7b4e6c79..62957ed2 100644
--- a/elasticsearch/connection/http_urllib3.py
+++ b/elasticsearch/connection/http_urllib3.py
@@ -91,7 +91,7 @@ class Urllib3HttpConnection(Connection):
self.pool = pool_class(host, port=port, timeout=self.timeout, maxsize=maxsize, **kw)
- def perform_request(self, method, url, params=None, body=None, timeout=None, ignore=()):
+ def perform_request(self, method, url, params=None, body=None, timeout=None, ignore=(), headers=None):
url = self.url_prefix + url
if params:
url = '%s?%s' % (url, urlencode(params))
@@ -111,6 +111,9 @@ class Urllib3HttpConnection(Connection):
if not isinstance(method, str):
method = method.encode('utf-8')
+ if headers:
+ request_headers = dict(self.headers)
+ request_headers.update(headers or {})
response = self.pool.urlopen(method, url, body, retries=False, headers=self.headers, **kw)
duration = time.time() - start
raw_data = response.data.decode('utf-8')
diff --git a/elasticsearch/transport.py b/elasticsearch/transport.py
index dc8cd891..f876a945 100644
--- a/elasticsearch/transport.py
+++ b/elasticsearch/transport.py
@@ -255,7 +255,7 @@ class Transport(object):
if self.sniff_on_connection_fail:
self.sniff_hosts()
- def perform_request(self, method, url, params=None, body=None):
+ def perform_request(self, method, url, headers=None, params=None, body=None):
"""
Perform the actual request. Retrieve a connection from the connection
pool, pass all the information to it's perform_request method and
@@ -269,6 +269,8 @@ class Transport(object):
:arg method: HTTP method to use
:arg url: absolute url (without host) to target
+ :arg headers: dictionary of headers, will be handed over to the
+ underlying :class:`~elasticsearch.Connection` class
:arg params: dictionary of query parameters, will be handed over to the
underlying :class:`~elasticsearch.Connection` class for serialization
:arg body: body of the request, will be serializes using serializer and
@@ -309,7 +311,7 @@ class Transport(object):
connection = self.get_connection()
try:
- status, headers, data = connection.perform_request(method, url, params, body, ignore=ignore, timeout=timeout)
+ status, headers, data = connection.perform_request(method, url, params, body, headers=headers, ignore=ignore, timeout=timeout)
except TransportError as e:
if method == 'HEAD' and e.status_code == 404:
| Set `application/x-ndjson` content type on bulk requests
As of elastic 5.x, requests to `/_bulk` should set `Content-Type` to `application/x-ndjson`. If not, elastic logs a warning. It looks like this library defaults to `application/json`. To fix, I'm thinking we should accept an optional dict of headers at `https://github.com/elastic/elasticsearch-py/blob/master/elasticsearch/connection/http_urllib3.py#L94`. If that sounds reasonable, I'd be happy to submit a patch.
cc @cnelson @wjwoodson | elastic/elasticsearch-py | diff --git a/test_elasticsearch/test_connection.py b/test_elasticsearch/test_connection.py
index b2d84996..e4e0ae63 100644
--- a/test_elasticsearch/test_connection.py
+++ b/test_elasticsearch/test_connection.py
@@ -104,6 +104,13 @@ class TestRequestsConnection(TestCase):
self.assertEquals('GET', request.method)
self.assertEquals(None, request.body)
+ def test_merge_headers(self):
+ con = self._get_mock_connection(connection_params={'headers': {'h1': 'v1', 'h2': 'v2'}})
+ req = self._get_request(con, 'GET', '/', headers={'h2': 'v2p', 'h3': 'v3'})
+ self.assertEquals(req.headers['h1'], 'v1')
+ self.assertEquals(req.headers['h2'], 'v2p')
+ self.assertEquals(req.headers['h3'], 'v3')
+
def test_http_auth(self):
con = RequestsHttpConnection(http_auth='username:secret')
self.assertEquals(('username', 'secret'), con.session.auth)
diff --git a/test_elasticsearch/test_transport.py b/test_elasticsearch/test_transport.py
index 328325c1..50acb7fa 100644
--- a/test_elasticsearch/test_transport.py
+++ b/test_elasticsearch/test_transport.py
@@ -74,7 +74,7 @@ class TestTransport(TestCase):
t.perform_request('GET', '/', params={'request_timeout': 42})
self.assertEquals(1, len(t.get_connection().calls))
self.assertEquals(('GET', '/', {}, None), t.get_connection().calls[0][0])
- self.assertEquals({'timeout': 42, 'ignore': ()}, t.get_connection().calls[0][1])
+ self.assertEquals({'timeout': 42, 'ignore': (), 'headers': None}, t.get_connection().calls[0][1])
def test_send_get_body_as_source(self):
t = Transport([{}], send_get_body_as='source', connection_class=DummyConnection)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 4
} | 2.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[develop]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"coverage",
"mock",
"pyaml",
"nosexcover",
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs==22.2.0
Babel==2.11.0
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
docutils==0.18.1
-e git+https://github.com/elastic/elasticsearch-py.git@0397527d12bcb43274ce9054111c5f7f673a7ad6#egg=elasticsearch
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
iniconfig==1.1.1
Jinja2==3.0.3
MarkupSafe==2.0.1
mock==5.2.0
nose==1.3.7
nosexcover==1.0.11
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyaml==23.5.8
Pygments==2.14.0
pyparsing==3.1.4
pytest==7.0.1
pytz==2025.2
PyYAML==6.0.1
requests==2.27.1
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-rtd-theme==2.0.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.21.1
zipp==3.6.0
| name: elasticsearch-py
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- attrs==22.2.0
- babel==2.11.0
- charset-normalizer==2.0.12
- coverage==6.2
- docutils==0.18.1
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- jinja2==3.0.3
- markupsafe==2.0.1
- mock==5.2.0
- nose==1.3.7
- nosexcover==1.0.11
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyaml==23.5.8
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytz==2025.2
- pyyaml==6.0.1
- requests==2.27.1
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-rtd-theme==2.0.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.21.1
- zipp==3.6.0
prefix: /opt/conda/envs/elasticsearch-py
| [
"test_elasticsearch/test_connection.py::TestRequestsConnection::test_merge_headers",
"test_elasticsearch/test_transport.py::TestTransport::test_request_timeout_extracted_from_params_and_passed"
]
| []
| [
"test_elasticsearch/test_connection.py::TestUrllib3Connection::test_doesnt_use_https_if_not_specified",
"test_elasticsearch/test_connection.py::TestUrllib3Connection::test_http_auth",
"test_elasticsearch/test_connection.py::TestUrllib3Connection::test_http_auth_list",
"test_elasticsearch/test_connection.py::TestUrllib3Connection::test_http_auth_tuple",
"test_elasticsearch/test_connection.py::TestUrllib3Connection::test_keep_alive_is_on_by_default",
"test_elasticsearch/test_connection.py::TestUrllib3Connection::test_timeout_set",
"test_elasticsearch/test_connection.py::TestUrllib3Connection::test_uses_https_if_verify_certs_is_off",
"test_elasticsearch/test_connection.py::TestRequestsConnection::test_body_attached",
"test_elasticsearch/test_connection.py::TestRequestsConnection::test_conflict_error_is_returned_on_409",
"test_elasticsearch/test_connection.py::TestRequestsConnection::test_custom_http_auth_is_allowed",
"test_elasticsearch/test_connection.py::TestRequestsConnection::test_defaults",
"test_elasticsearch/test_connection.py::TestRequestsConnection::test_failed_request_logs_and_traces",
"test_elasticsearch/test_connection.py::TestRequestsConnection::test_head_with_404_doesnt_get_logged",
"test_elasticsearch/test_connection.py::TestRequestsConnection::test_http_auth",
"test_elasticsearch/test_connection.py::TestRequestsConnection::test_http_auth_attached",
"test_elasticsearch/test_connection.py::TestRequestsConnection::test_http_auth_list",
"test_elasticsearch/test_connection.py::TestRequestsConnection::test_http_auth_tuple",
"test_elasticsearch/test_connection.py::TestRequestsConnection::test_not_found_error_is_returned_on_404",
"test_elasticsearch/test_connection.py::TestRequestsConnection::test_params_properly_encoded",
"test_elasticsearch/test_connection.py::TestRequestsConnection::test_repr",
"test_elasticsearch/test_connection.py::TestRequestsConnection::test_request_error_is_returned_on_400",
"test_elasticsearch/test_connection.py::TestRequestsConnection::test_success_logs_and_traces",
"test_elasticsearch/test_connection.py::TestRequestsConnection::test_timeout_set",
"test_elasticsearch/test_connection.py::TestRequestsConnection::test_url_prefix",
"test_elasticsearch/test_connection.py::TestRequestsConnection::test_uses_https_if_verify_certs_is_off",
"test_elasticsearch/test_transport.py::TestHostsInfoCallback::test_master_only_nodes_are_ignored",
"test_elasticsearch/test_transport.py::TestTransport::test_add_connection",
"test_elasticsearch/test_transport.py::TestTransport::test_body_bytes_get_passed_untouched",
"test_elasticsearch/test_transport.py::TestTransport::test_body_gets_encoded_into_bytes",
"test_elasticsearch/test_transport.py::TestTransport::test_body_surrogates_replaced_encoded_into_bytes",
"test_elasticsearch/test_transport.py::TestTransport::test_custom_connection_class",
"test_elasticsearch/test_transport.py::TestTransport::test_failed_connection_will_be_marked_as_dead",
"test_elasticsearch/test_transport.py::TestTransport::test_kwargs_passed_on_to_connection_pool",
"test_elasticsearch/test_transport.py::TestTransport::test_kwargs_passed_on_to_connections",
"test_elasticsearch/test_transport.py::TestTransport::test_request_will_fail_after_X_retries",
"test_elasticsearch/test_transport.py::TestTransport::test_resurrected_connection_will_be_marked_as_live_on_success",
"test_elasticsearch/test_transport.py::TestTransport::test_send_get_body_as_post",
"test_elasticsearch/test_transport.py::TestTransport::test_send_get_body_as_source",
"test_elasticsearch/test_transport.py::TestTransport::test_single_connection_uses_dummy_connection_pool",
"test_elasticsearch/test_transport.py::TestTransport::test_sniff_after_n_seconds",
"test_elasticsearch/test_transport.py::TestTransport::test_sniff_on_fail_triggers_sniffing_on_fail",
"test_elasticsearch/test_transport.py::TestTransport::test_sniff_on_start_fetches_and_uses_nodes_list",
"test_elasticsearch/test_transport.py::TestTransport::test_sniff_on_start_ignores_sniff_timeout",
"test_elasticsearch/test_transport.py::TestTransport::test_sniff_reuses_connection_instances_if_possible",
"test_elasticsearch/test_transport.py::TestTransport::test_sniff_uses_sniff_timeout",
"test_elasticsearch/test_transport.py::TestTransport::test_sniff_will_use_seed_connections"
]
| []
| Apache License 2.0 | 1,455 | [
"elasticsearch/connection/http_urllib3.py",
"elasticsearch/connection/http_requests.py",
"elasticsearch/client/__init__.py",
"elasticsearch/transport.py"
]
| [
"elasticsearch/connection/http_urllib3.py",
"elasticsearch/connection/http_requests.py",
"elasticsearch/client/__init__.py",
"elasticsearch/transport.py"
]
|
adamlwgriffiths__Pyrr-61 | ae9903fa3365d99fae588779df9e13d23fc3fc7d | 2017-07-13 07:06:17 | ae9903fa3365d99fae588779df9e13d23fc3fc7d | diff --git a/README.md b/README.md
index 734c277..b005037 100644
--- a/README.md
+++ b/README.md
@@ -188,10 +188,11 @@ Authors
-------
* [Adam Griffiths](https://github.com/adamlwgriffiths/).
- * [Chris Bates](https://github.com/chrsbats)
- * [Jakub Stasiak](https://github.com/jstasiak/).
+ * [Chris Bates](https://github.com/chrsbats/).
+ * [Jakub Stasiak](https://github.com/jstasiak/).
* [Bogdan Teleaga](https://github.com/bogdanteleaga/).
- * [Szabolcs Dombi](https://github.com/cprogrammer1994)
+ * [Szabolcs Dombi](https://github.com/cprogrammer1994/).
+ * [Korijn van Golen](https://github.com/Korijn/).
Contributions are welcome.
diff --git a/docs/source/info_contributing.rst b/docs/source/info_contributing.rst
index eaf4d9a..136e285 100644
--- a/docs/source/info_contributing.rst
+++ b/docs/source/info_contributing.rst
@@ -25,5 +25,6 @@ Developers and contributors include:
* `Adam Griffiths <https://github.com/adamlwgriffiths/>`_
* `Jakub Stasiak <https://github.com/jstasiak/>`_
+ * `Korijn van Golen <https://github.com/Korijn/>`_
Is your name left out? Post an issue in `Pyrr's bug tracker <https://github.com/adamlwgriffiths/Pyrr/issues/>`_ =)
diff --git a/pyrr/vector3.py b/pyrr/vector3.py
index 198792e..7e52c84 100644
--- a/pyrr/vector3.py
+++ b/pyrr/vector3.py
@@ -89,9 +89,43 @@ def generate_normals(v1, v2, v3, normalize_result=True):
b = v3 - v2
n = cross(b, a)
if normalize_result:
- normalize(n)
+ n = normalize(n)
return n
+def generate_vertex_normals(vertices, index, normalize_result=True):
+ """Generates a normal vector for each vertex.
+
+ The result is a normalized vector.
+
+ The index array should list the faces by indexing into the
+ vertices array. It is assumed the ordering in index is
+ counter-clockwise.
+
+ The vertices and index arrays are Nd arrays and must be 2d,
+ where the final axis is of size 3.
+
+ An example::
+ >>> vertices = numpy.array( [ [ 1.0, 0.0, 0.0 ], [ 0.0, 0.0, 0.0 ], [ 0.0, 1.0, 0.0 ] ] )
+ >>> index = numpy.array( [ [ 0, 2, 1 ] ] )
+ >>> vector.generate_vertex_normals( vertices, index )
+ array([[ 0., 0., 1.], [ 0., 0., 1.], [ 0., 0., 1.]])
+
+ :param numpy.array vertices: an 2d array with the final dimension
+ being size 3. (a vector)
+ :param numpy.array index: an Nd array with the final dimension
+ being size 3. (a vector)
+ :param boolean normalize_result: Specifies if the result should
+ be normalized before being returned.
+ """
+ v1, v2, v3 = np.rollaxis(vertices[index], axis=-2)
+ face_normals = generate_normals(v1, v2, v3, normalize_result=False)
+ vertex_normals = np.zeros_like(vertices)
+ for i in range(3):
+ np.add.at(vertex_normals, index[..., i], face_normals)
+ if normalize_result:
+ vertex_normals = normalize(vertex_normals)
+ return vertex_normals
+
class index:
#: The index of the X value within the vector
| generate_normals for vertices instead of faces
Your utility `pyrr.vector3.generate_normals` works wonders for generating face normals. However, for constructing vertex normals from those face normals, I needed to write some additional code.
```python
def generate_vertex_normals(vertices, index):
face_normals = pyrr.vector3.generate_normals(*np.rollaxis(vertices[index], axis=1))
vertex_normals = np.zeros_like(vertices)
for i in range(index.shape[-1]):
np.add.at(vertex_normals, index[:, i], face_normals)
return pyrr.vector3.normalize(vertex_normals)
```
Is there already such functionality in the library that I may have missed? Is there an intention to add such a utility function? | adamlwgriffiths/Pyrr | diff --git a/pyrr/tests/test_vector3.py b/pyrr/tests/test_vector3.py
index cc64312..a51d489 100644
--- a/pyrr/tests/test_vector3.py
+++ b/pyrr/tests/test_vector3.py
@@ -102,6 +102,85 @@ class test_vector3(unittest.TestCase):
]
np.testing.assert_almost_equal(result, expected, decimal=5)
+ def test_generate_normals(self):
+ vertices = np.array([
+ [2.0, 0.0, 0.0],
+ [0.0, 0.0, 0.0],
+ [0.0, 2.0, 0.0],
+ [2.0, 2.0, 0.0]
+ ])
+ index = np.array([
+ [0, 2, 1],
+ [0, 3, 2],
+ ])
+ v1, v2, v3 = np.rollaxis(vertices[index], axis=1)
+ result = vector3.generate_normals(v1, v2, v3)
+ expected = np.array([
+ [0., 0., 1.],
+ [0., 0., 1.]
+ ])
+ np.testing.assert_array_equal(result, expected)
+
+ def test_generate_normals_unnormalized(self):
+ vertices = np.array([
+ [2.0, 0.0, 0.0],
+ [0.0, 0.0, 0.0],
+ [0.0, 2.0, 0.0],
+ [2.0, 2.0, 0.0]
+ ])
+ index = np.array([
+ [0, 2, 1],
+ [0, 3, 2],
+ ])
+ v1, v2, v3 = np.rollaxis(vertices[index], axis=1)
+ result = vector3.generate_normals(v1, v2, v3, normalize_result=False)
+ expected = np.array([
+ [0., 0., 4.],
+ [0., 0., 4.]
+ ])
+ np.testing.assert_array_equal(result, expected)
+
+ def test_generate_vertex_normals(self):
+ vertices = np.array([
+ [1.0, 0.0, 0.0],
+ [0.0, 0.0, 0.0],
+ [0.0, 1.0, 0.0],
+ [1.0, 1.0, 0.0]
+ ])
+ index = np.array([
+ [0, 2, 1],
+ [0, 3, 2],
+ ])
+ result = vector3.generate_vertex_normals(vertices, index)
+ expected = np.array([
+ [0., 0., 1.],
+ [0., 0., 1.],
+ [0., 0., 1.],
+ [0., 0., 1.]
+ ])
+ np.testing.assert_array_equal(result, expected)
+
+ def test_generate_vertex_normals_unnormalized(self):
+ vertices = np.array([
+ [1.0, 0.0, 0.0],
+ [0.0, 0.0, 0.0],
+ [0.0, 1.0, 0.0],
+ [1.0, 1.0, 0.0]
+ ])
+ index = np.array([
+ [0, 2, 1],
+ [0, 3, 2],
+ ])
+ result = vector3.generate_vertex_normals(
+ vertices, index, normalize_result=False)
+ expected = np.array([
+ [0., 0., 2.],
+ [0., 0., 1.],
+ [0., 0., 2.],
+ [0., 0., 1.]
+ ])
+ np.testing.assert_array_equal(result, expected)
+
def test_squared_length_single_vector(self):
result = vector3.squared_length([1.,1.,1.])
np.testing.assert_almost_equal(result, 3., decimal=5)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 3
} | 0.9 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup==1.2.2
iniconfig==2.1.0
multipledispatch==1.0.0
numpy==2.0.2
packaging==24.2
pluggy==1.5.0
-e git+https://github.com/adamlwgriffiths/Pyrr.git@ae9903fa3365d99fae588779df9e13d23fc3fc7d#egg=pyrr
pytest==8.3.5
tomli==2.2.1
| name: Pyrr
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- multipledispatch==1.0.0
- numpy==2.0.2
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- tomli==2.2.1
prefix: /opt/conda/envs/Pyrr
| [
"pyrr/tests/test_vector3.py::test_vector3::test_generate_normals",
"pyrr/tests/test_vector3.py::test_vector3::test_generate_vertex_normals",
"pyrr/tests/test_vector3.py::test_vector3::test_generate_vertex_normals_unnormalized"
]
| [
"pyrr/tests/test_vector3.py::test_vector3::test_create",
"pyrr/tests/test_vector3.py::test_vector3::test_create_from_matrix44_translation",
"pyrr/tests/test_vector3.py::test_vector3::test_create_unit_length_x",
"pyrr/tests/test_vector3.py::test_vector3::test_create_unit_length_y",
"pyrr/tests/test_vector3.py::test_vector3::test_create_unit_length_z"
]
| [
"pyrr/tests/test_vector3.py::test_vector3::test_create_from_matrix44_translation_dtype_matches",
"pyrr/tests/test_vector3.py::test_vector3::test_create_from_vector4",
"pyrr/tests/test_vector3.py::test_vector3::test_create_list",
"pyrr/tests/test_vector3.py::test_vector3::test_create_unit_length_x_dtype",
"pyrr/tests/test_vector3.py::test_vector3::test_create_unit_length_y_dtype",
"pyrr/tests/test_vector3.py::test_vector3::test_create_unit_length_z_dtype",
"pyrr/tests/test_vector3.py::test_vector3::test_create_values",
"pyrr/tests/test_vector3.py::test_vector3::test_cross_batch",
"pyrr/tests/test_vector3.py::test_vector3::test_cross_coincident",
"pyrr/tests/test_vector3.py::test_vector3::test_cross_single_vector",
"pyrr/tests/test_vector3.py::test_vector3::test_dot_adjacent",
"pyrr/tests/test_vector3.py::test_vector3::test_dot_angle",
"pyrr/tests/test_vector3.py::test_vector3::test_dot_batch",
"pyrr/tests/test_vector3.py::test_vector3::test_dot_parallel",
"pyrr/tests/test_vector3.py::test_vector3::test_generate_normals_unnormalized",
"pyrr/tests/test_vector3.py::test_vector3::test_import",
"pyrr/tests/test_vector3.py::test_vector3::test_interoplation",
"pyrr/tests/test_vector3.py::test_vector3::test_length",
"pyrr/tests/test_vector3.py::test_vector3::test_length_batch",
"pyrr/tests/test_vector3.py::test_vector3::test_normalize_batch",
"pyrr/tests/test_vector3.py::test_vector3::test_normalize_single_vector",
"pyrr/tests/test_vector3.py::test_vector3::test_set_length",
"pyrr/tests/test_vector3.py::test_vector3::test_set_length_batch_vector",
"pyrr/tests/test_vector3.py::test_vector3::test_squared_length_batch",
"pyrr/tests/test_vector3.py::test_vector3::test_squared_length_single_vector"
]
| []
| BSD License | 1,456 | [
"docs/source/info_contributing.rst",
"README.md",
"pyrr/vector3.py"
]
| [
"docs/source/info_contributing.rst",
"README.md",
"pyrr/vector3.py"
]
|
|
openmrslab__suspect-77 | 153f01f32f025eedfaefc655fb14621d087dd113 | 2017-07-13 15:11:30 | 820e897294d90e08c4b91be7289e4ee9ebc6d009 | coveralls:
[](https://coveralls.io/builds/12379758)
Coverage increased (+0.3%) to 79.735% when pulling **cce3e373e9c39fbcdc2a185f9597c06d4a51f87a on 76_autophasing** into **153f01f32f025eedfaefc655fb14621d087dd113 on master**.
| diff --git a/suspect/basis/__init__.py b/suspect/basis/__init__.py
index e0296f9..257e640 100644
--- a/suspect/basis/__init__.py
+++ b/suspect/basis/__init__.py
@@ -1,7 +1,9 @@
import numpy
+from ..mrsobjects import MRSData
-def gaussian(time_axis, frequency, phase, fwhm):
+def gaussian(time_axis, frequency, phase, fwhm, f0=123.0):
+ dt = time_axis[1] - time_axis[0]
oscillatory_term = numpy.exp(2j * numpy.pi * (frequency * time_axis) + 1j * phase)
damping = numpy.exp(-time_axis ** 2 / 4 * numpy.pi ** 2 / numpy.log(2) * fwhm ** 2)
fid = oscillatory_term * damping
@@ -13,10 +15,11 @@ def gaussian(time_axis, frequency, phase, fwhm):
# the chosen df does not affect the area, so we divide by df, which is
# equivalent to multiplying by dt * np, then the np terms cancel and we
# are left with the dt term (and a 2 because fid[0] = 0.5, not 1)
- return fid * (time_axis[1] - time_axis[0]) * 2.0
+ fid = fid * dt * 2.0
+ return MRSData(fid, dt, f0)
-def lorentzian(time_axis, frequency, phase, fwhm):
+def lorentzian(time_axis, frequency, phase, fwhm, f0=123.0):
oscillatory_term = numpy.exp(1j * (2 * numpy.pi * frequency * time_axis + phase))
damping = numpy.exp(-time_axis * numpy.pi * fwhm)
fid = oscillatory_term * damping
diff --git a/suspect/processing/__init__.py b/suspect/processing/__init__.py
index 4944019..68d8c93 100644
--- a/suspect/processing/__init__.py
+++ b/suspect/processing/__init__.py
@@ -1,2 +1,2 @@
-from . import frequency_correction, channel_combination, denoising, water_suppression
+from . import frequency_correction, channel_combination, denoising, water_suppression, phase
from suspect.processing._apodize import *
\ No newline at end of file
diff --git a/suspect/processing/phase.py b/suspect/processing/phase.py
new file mode 100644
index 0000000..acfe9dd
--- /dev/null
+++ b/suspect/processing/phase.py
@@ -0,0 +1,161 @@
+import lmfit
+import numpy as np
+
+
+def mag_real(data, *args, range_hz=None, range_ppm=None):
+ """
+ Estimates the zero and first order phase parameters which minimise the
+ difference between the real part of the spectrum and the magnitude. Note
+ that these are the phase correction terms, designed to be used directly
+ in the adjust_phase() function without negation.
+
+ Parameters
+ ----------
+ data: MRSBase
+ The data to be phased
+ range_hz: tuple (low, high)
+ The frequency range in Hertz over which to compare the spectra
+ range_ppm: tuple (low, high)
+ The frequency range in PPM over which to compare the spectra. range_hz
+ and range_ppm cannot both be defined.
+ Returns
+ -------
+ phi0 : float
+ The estimated zero order phase correction
+ phi1 : float
+ The estimated first order phase correction
+ """
+ if range_hz is not None and range_ppm is not None:
+ raise KeyError("Cannot specify both range_hz and range_ppm")
+
+ if range_hz is not None:
+ frequency_slice = data.slice_hz(*range_hz)
+ elif range_hz is not None:
+ frequency_slice = data.slice_ppm(*range_ppm)
+ else:
+ frequency_slice = slice(0, data.np)
+
+ def single_spectrum_version(spectrum):
+ def residual(pars):
+ par_vals = pars.valuesdict()
+ phased_data = spectrum.adjust_phase(par_vals['phi0'],)
+ #par_vals['phi1'])
+
+ diff = np.real(phased_data) - np.abs(spectrum)
+
+ return diff[frequency_slice]
+
+ params = lmfit.Parameters()
+ params.add('phi0', value=0, min=-np.pi, max=np.pi)
+ #params.add('phi1', value=0.0, min=-0.01, max=0.25)
+
+ result = lmfit.minimize(residual, params)
+ #return result.params['phi0'].value, result.params['phi1'].value
+ return result.params['phi0'].value, 0
+
+ return np.apply_along_axis(single_spectrum_version,
+ axis=-1,
+ arr=data.spectrum())
+
+
+def ernst(data):
+ """
+ Estimates the zero and first order phase using the ACME algorithm, which
+ minimises the integral of the imaginary part of the spectrum. Note that
+ these are the phase correction terms, designed to be used directly in the
+ adjust_phase() function without negation.
+
+ Parameters
+ ----------
+ data: MRSBase
+ The data to be phased
+ range_hz: tuple (low, high)
+ The frequency range in Hertz over which to compare the spectra
+ range_ppm: tuple (low, high)
+ The frequency range in PPM over which to compare the spectra. range_hz
+ and range_ppm cannot both be defined.
+ Returns
+ -------
+ phi0 : float
+ The estimated zero order phase correction
+ phi1 : float
+ The estimated first order phase correction
+ """
+ def residual(pars):
+ par_vals = pars.valuesdict()
+ phased_data = data.adjust_phase(par_vals['phi0'],
+ par_vals['phi1'])
+ return np.sum(phased_data.spectrum().imag)
+
+ params = lmfit.Parameters()
+ params.add('phi0', value=0, min=-np.pi, max=np.pi)
+ params.add('phi1', value=0.0, min=-0.005, max=0.1)
+
+ result = lmfit.minimize(residual, params, method='simplex')
+ return result.params['phi0'].value, result.params['phi1'].value
+
+
+def acme(data, *args, range_hz=None, range_ppm=None):
+ """
+ Estimates the zero and first order phase using the ACME algorithm, which
+ minimises the entropy of the real part of the spectrum. Note that these
+ are the phase correction terms, designed to be used directly in the
+ adjust_phase() function without negation.
+
+ Parameters
+ ----------
+ data: MRSBase
+ The data to be phased
+ range_hz: tuple (low, high)
+ The frequency range in Hertz over which to compare the spectra
+ range_ppm: tuple (low, high)
+ The frequency range in PPM over which to compare the spectra. range_hz
+ and range_ppm cannot both be defined.
+ Returns
+ -------
+ phi0 : float
+ The estimated zero order phase correction
+ phi1 : float
+ The estimated first order phase correction
+ """
+ if range_hz is not None and range_ppm is not None:
+ raise KeyError("Cannot specify both range_hz and range_ppm")
+
+ if range_hz is not None:
+ frequency_slice = data.slice_hz(*range_hz)
+ elif range_hz is not None:
+ frequency_slice = data.slice_ppm(*range_ppm)
+ else:
+ frequency_slice = slice(0, data.np)
+
+ def single_spectrum_version(spectrum):
+ def residual(pars):
+ par_vals = pars.valuesdict()
+ phased_data = spectrum.adjust_phase(par_vals['phi0'],
+ par_vals['phi1'])
+
+ r = phased_data.real[frequency_slice]
+ derivative = np.abs((r[1:] - r[:-1]))
+ derivative_norm = derivative / np.sum(derivative)
+
+ # make sure the entropy doesn't blow up by removing 0 values
+ derivative_norm[derivative_norm == 0] = 1
+
+ entropy = -np.sum(derivative_norm * np.log(derivative_norm))
+
+ # penalty function
+ p = np.sum(r[r < 0] ** 2)
+ gamma = 1000
+
+ return entropy + gamma * p
+
+ params = lmfit.Parameters()
+ params.add('phi0', value=0.0, min=-np.pi, max=np.pi)
+ params.add('phi1', value=0.0, min=-0.005, max=0.01)
+
+ result = lmfit.minimize(residual, params, method='simplex')
+ return result.params['phi0'].value, result.params['phi1'].value
+
+ return np.apply_along_axis(single_spectrum_version,
+ -1,
+ data.spectrum())
| ENH: auto-phasing
Add some algorithms for automatic phase estimation. Good first candidate is mag/real comparison, could also look at Ernst (minimise integral of imaginary component). | openmrslab/suspect | diff --git a/tests/test_mrs/test_processing/test_phasing.py b/tests/test_mrs/test_processing/test_phasing.py
new file mode 100644
index 0000000..b7ae7da
--- /dev/null
+++ b/tests/test_mrs/test_processing/test_phasing.py
@@ -0,0 +1,61 @@
+import suspect
+import numpy as np
+
+
+def test_mag_real_zero():
+ time_axis = np.arange(0, 1.024, 2.5e-4)
+ sample_data = (6 * suspect.basis.gaussian(time_axis, 0, 0.0, 12)
+ + suspect.basis.gaussian(time_axis, 250, 0.0, 12)
+ + suspect.basis.gaussian(time_axis, 500, 0.0, 12))
+ sample_data = sample_data.adjust_phase(0.2, 0)
+ sample_data += np.random.rand(len(sample_data)) * 1e-6
+
+ phi0, phi1 = suspect.processing.phase.mag_real(sample_data)
+
+ np.testing.assert_allclose(phi0, -0.2, rtol=0.05)
+
+
+def test_acme_zero():
+ time_axis = np.arange(0, 1.024, 2.5e-4)
+ sample_data = (6 * suspect.basis.gaussian(time_axis, 0, 0.0, 12)
+ + suspect.basis.gaussian(time_axis, 50, 0.0, 12)
+ + suspect.basis.gaussian(time_axis, 200, 0.0, 12))
+ sample_data = sample_data.adjust_phase(0.2, 0)
+ sample_data += np.random.rand(len(sample_data)) * 1e-6
+
+ phi0, phi1 = suspect.processing.phase.acme(sample_data)
+
+ np.testing.assert_allclose(phi0, -0.2, rtol=0.05)
+
+
+def test_acme_first():
+ time_axis = np.arange(0, 1.024, 2.5e-4)
+ sample_data = (6 * suspect.basis.gaussian(time_axis, 0, 0.0, 6)
+ + suspect.basis.gaussian(time_axis, 150, 0.0, 6))
+ sample_data += np.random.rand(len(sample_data)) * 2e-6
+
+ in_0 = 0.5
+ in_1 = 0.001
+ sample_data = sample_data.adjust_phase(in_0, in_1)
+
+ out_0, out_1 = suspect.processing.phase.acme(sample_data)
+
+ np.testing.assert_allclose(in_0, -out_0, rtol=0.05)
+ np.testing.assert_allclose(in_1, -out_1, rtol=0.05)
+
+
+def test_acme_range_hz():
+ time_axis = np.arange(0, 1.024, 2.5e-4)
+ sample_data = (6 * suspect.basis.gaussian(time_axis, 0, 0.0, 12)
+ + suspect.basis.gaussian(time_axis, 50, 0.0, 12)
+ - suspect.basis.gaussian(time_axis, 200, 0.0, 12))
+ sample_data += np.random.rand(len(sample_data)) * 1e-7
+
+ in_0 = 0.2
+ in_1 = 0.001
+ sample_data = sample_data.adjust_phase(in_0, in_1)
+
+ out_0, out_1 = suspect.processing.phase.acme(sample_data, range_hz=(-1000, 75))
+
+ np.testing.assert_allclose(in_0, -out_0, rtol=0.05)
+ np.testing.assert_allclose(in_1, -out_1, rtol=0.05)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 3,
"issue_text_score": 3,
"test_score": 1
},
"num_modified_files": 2
} | 0.3 | {
"env_vars": null,
"env_yml_path": [],
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [],
"python": "3.5",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
asteval==0.9.26
async-generator==1.10
attrs==22.2.0
Babel==2.11.0
backcall==0.2.0
bleach==4.1.0
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
decorator==5.1.1
defusedxml==0.7.1
docutils==0.18.1
entrypoints==0.4
future==1.0.0
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
iniconfig==1.1.1
ipykernel==5.5.6
ipython==7.16.3
ipython-genutils==0.2.0
jedi==0.17.2
Jinja2==3.0.3
jsonschema==3.2.0
jupyter-client==7.1.2
jupyter-core==4.9.2
jupyterlab-pygments==0.1.2
lmfit==1.0.3
MarkupSafe==2.0.1
mistune==0.8.4
nbclient==0.5.9
nbconvert==6.0.7
nbformat==5.1.3
nbsphinx==0.8.8
nest-asyncio==1.6.0
nibabel==3.2.2
numpy==1.19.5
packaging==21.3
pandocfilters==1.5.1
parse==1.20.2
Parsley==1.3
parso==0.7.1
pexpect==4.9.0
pickleshare==0.7.5
pluggy==1.0.0
prompt-toolkit==3.0.36
ptyprocess==0.7.0
py==1.11.0
pydicom==2.3.1
Pygments==2.14.0
pyparsing==3.1.4
pyrsistent==0.18.0
pytest==7.0.1
pytest-cov==4.0.0
python-dateutil==2.9.0.post0
pytz==2025.2
PyWavelets==1.1.1
pyzmq==25.1.2
requests==2.27.1
scipy==1.5.4
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
-e git+https://github.com/openmrslab/suspect.git@153f01f32f025eedfaefc655fb14621d087dd113#egg=suspect
testpath==0.6.0
tomli==1.2.3
tornado==6.1
traitlets==4.3.3
typing_extensions==4.1.1
uncertainties==3.1.7
urllib3==1.26.20
wcwidth==0.2.13
webencodings==0.5.1
zipp==3.6.0
| name: suspect
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- asteval==0.9.26
- async-generator==1.10
- attrs==22.2.0
- babel==2.11.0
- backcall==0.2.0
- bleach==4.1.0
- charset-normalizer==2.0.12
- coverage==6.2
- decorator==5.1.1
- defusedxml==0.7.1
- docutils==0.18.1
- entrypoints==0.4
- future==1.0.0
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- ipykernel==5.5.6
- ipython==7.16.3
- ipython-genutils==0.2.0
- jedi==0.17.2
- jinja2==3.0.3
- jsonschema==3.2.0
- jupyter-client==7.1.2
- jupyter-core==4.9.2
- jupyterlab-pygments==0.1.2
- lmfit==1.0.3
- markupsafe==2.0.1
- mistune==0.8.4
- nbclient==0.5.9
- nbconvert==6.0.7
- nbformat==5.1.3
- nbsphinx==0.8.8
- nest-asyncio==1.6.0
- nibabel==3.2.2
- numpy==1.19.5
- packaging==21.3
- pandocfilters==1.5.1
- parse==1.20.2
- parsley==1.3
- parso==0.7.1
- pexpect==4.9.0
- pickleshare==0.7.5
- pluggy==1.0.0
- prompt-toolkit==3.0.36
- ptyprocess==0.7.0
- py==1.11.0
- pydicom==2.3.1
- pygments==2.14.0
- pyparsing==3.1.4
- pyrsistent==0.18.0
- pytest==7.0.1
- pytest-cov==4.0.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pywavelets==1.1.1
- pyzmq==25.1.2
- requests==2.27.1
- scipy==1.5.4
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- testpath==0.6.0
- tomli==1.2.3
- tornado==6.1
- traitlets==4.3.3
- typing-extensions==4.1.1
- uncertainties==3.1.7
- urllib3==1.26.20
- wcwidth==0.2.13
- webencodings==0.5.1
- zipp==3.6.0
prefix: /opt/conda/envs/suspect
| [
"tests/test_mrs/test_processing/test_phasing.py::test_mag_real_zero",
"tests/test_mrs/test_processing/test_phasing.py::test_acme_zero",
"tests/test_mrs/test_processing/test_phasing.py::test_acme_first",
"tests/test_mrs/test_processing/test_phasing.py::test_acme_range_hz"
]
| []
| []
| []
| MIT License | 1,457 | [
"suspect/processing/phase.py",
"suspect/basis/__init__.py",
"suspect/processing/__init__.py"
]
| [
"suspect/processing/phase.py",
"suspect/basis/__init__.py",
"suspect/processing/__init__.py"
]
|
mapbox__mapbox-sdk-py-203 | e95e63b401eca29004aeea72f7a99592ea591215 | 2017-07-13 22:48:36 | a823d77661d0f7a2209d47dd14e5195fc8549e67 | diff --git a/CHANGES b/CHANGES
index 2770b78..d7b48d9 100644
--- a/CHANGES
+++ b/CHANGES
@@ -1,3 +1,10 @@
+Changes
+=======
+
+Next
+----
+- Added support for Directions v5.
+
0.15.1 (2018-01-16)
-------------------
- Restore the valid_profiles attribute needed by the Mapbox CLI's distance
diff --git a/docs/geocoding.md b/docs/geocoding.md
index 2557e44..919dd2c 100644
--- a/docs/geocoding.md
+++ b/docs/geocoding.md
@@ -168,7 +168,7 @@ Places at a longitude, latitude point may be found using `Geocoder.reverse()`.
>>> for f in features:
... print('{place_name}: {id}'.format(**f))
10003... postcode...
-120 East 13 Street, Manhattan, New York, New York 10003... address...
+120 East 13th Street, Manhattan, New York, New York 10003... address...
Greenwich Village... neighborhood...
Manhattan... locality...
New York, New York... place...
diff --git a/mapbox/encoding.py b/mapbox/encoding.py
index 4aabe9f..a6e6f96 100644
--- a/mapbox/encoding.py
+++ b/mapbox/encoding.py
@@ -66,11 +66,11 @@ def encode_waypoints(features, min_limit=None, max_limit=None, precision=6):
if min_limit is not None and len(coords) < min_limit:
raise InvalidFeatureError(
- "Not enough features to encode waypoints, "
+ "Not enough features to encode coordinates, "
"need at least {0}".format(min_limit))
if max_limit is not None and len(coords) > max_limit:
raise InvalidFeatureError(
- "Too many features to encode waypoints, "
+ "Too many features to encode coordinates, "
"need at most {0}".format(max_limit))
return ';'.join(coords)
diff --git a/mapbox/services/directions.py b/mapbox/services/directions.py
index 1c7318b..8df3400 100644
--- a/mapbox/services/directions.py
+++ b/mapbox/services/directions.py
@@ -1,124 +1,272 @@
+import warnings
+from numbers import Number
+
+import polyline
from uritemplate import URITemplate
-from mapbox.encoding import encode_waypoints
+from mapbox.encoding import encode_waypoints as encode_coordinates
from mapbox.services.base import Service
from mapbox import errors
class Directions(Service):
- """Access to the Directions API V4"""
+ """Access to the Directions v5 API."""
api_name = 'directions'
- api_version = 'v4'
+ api_version = 'v5'
- valid_profiles = ['mapbox.driving',
- 'mapbox.cycling',
- 'mapbox.walking']
- valid_instruction_formats = ['text', 'html']
- valid_geom_encoding = ['geojson', 'polyline', 'false']
+ valid_profiles = [
+ 'mapbox/driving',
+ 'mapbox/driving-traffic',
+ 'mapbox/walking',
+ 'mapbox/cycling']
+ valid_geom_encoding = ['geojson', 'polyline', 'polyline6']
+ valid_geom_overview = ['full', 'simplified', False]
+ valid_annotations = ['duration', 'distance', 'speed']
@property
def baseuri(self):
- return 'https://{0}/{2}/{1}'.format(
+ return 'https://{0}/{1}/{2}'.format(
self.host, self.api_name, self.api_version)
def _validate_profile(self, profile):
+ # Backwards compatible with v4 profiles
+ v4_to_v5_profiles = {
+ 'mapbox.driving': 'mapbox/driving',
+ 'mapbox.cycling': 'mapbox/cycling',
+ 'mapbox.walking': 'mapbox/walking'}
+ if profile in v4_to_v5_profiles:
+ profile = v4_to_v5_profiles[profile]
+ warnings.warn('Converting v4 profile to v5, use {} instead'.format(profile),
+ errors.MapboxDeprecationWarning)
if profile not in self.valid_profiles:
raise errors.InvalidProfileError(
"{0} is not a valid profile".format(profile))
return profile
+ def _validate_annotations(self, annotations):
+ results = []
+ if annotations is None:
+ return None
+ for annotation in annotations:
+ if annotation not in self.valid_annotations:
+ raise errors.InvalidParameterError(
+ "{0} is not a valid annotation".format(annotation))
+ else:
+ results.append(annotation)
+ return results
+
def _validate_geom_encoding(self, geom_encoding):
if geom_encoding is not None and \
geom_encoding not in self.valid_geom_encoding:
raise errors.InvalidParameterError(
- "{0} is not a valid geometry encoding".format(geom_encoding))
+ "{0} is not a valid geometry format".format(geom_encoding))
return geom_encoding
- def _validate_instruction_format(self, instruction_format):
- if instruction_format is not None and \
- instruction_format not in self.valid_instruction_formats:
+ def _validate_geom_overview(self, overview):
+ if overview is not None and overview not in self.valid_geom_overview:
raise errors.InvalidParameterError(
- "{0} is not a valid instruction format".format(
- instruction_format))
- return instruction_format
+ "{0} is not a valid geometry overview type".format(overview))
+ return overview
- def directions(self, features, profile='mapbox.driving', alternatives=None,
- instructions=None, geometry=None, steps=None):
- """Request directions for waypoints encoded as GeoJSON features.
+ def _validate_snapping(self, snaps, features):
+ bearings = []
+ radii = []
+ if snaps is None:
+ return (None, None)
+ if len(snaps) != len(features):
+ raise errors.InvalidParameterError(
+ 'Must provide exactly one snapping element for each input feature')
+ for snap in snaps:
+ if snap is None:
+ bearings.append(None)
+ radii.append(None)
+ else:
+ try:
+ # radius-only
+ radius = self._validate_radius(snap)
+ bearing = None
+ except errors.InvalidParameterError:
+ # (radius, angle, range) tuple
+ try:
+ radius, angle, rng = snap
+ except ValueError:
+ raise errors.InvalidParameterError(
+ 'waypoint snapping should contain 3 elements: '
+ '(bearing, angle, range)')
+ self._validate_radius(radius)
+
+ try:
+ assert angle >= 0
+ assert angle <= 360
+ assert rng >= 0
+ assert rng <= 360
+ except (TypeError, AssertionError):
+ raise errors.InvalidParameterError(
+ 'angle and range must be between 0 and 360')
+ bearing = (angle, rng)
+
+ bearings.append(bearing)
+ radii.append(radius)
- Parameters
- ----------
- features : list
- List of GeoJSON features.
- profile : str
- Name of a Mapbox profile such as 'mapbox.driving'.
- alternatives : bool
+ if all([b is None for b in bearings]):
+ bearings = None
- instructions : str
+ return (bearings, radii)
- geometry : str
+ def _validate_radius(self, radius):
+ if radius is None:
+ return None
+
+ if isinstance(radius, str):
+ if radius != 'unlimited':
+ raise errors.InvalidParameterError(
+ '{0} is not a valid radius'.format(radius))
+ elif isinstance(radius, Number):
+ if radius <= 0:
+ raise errors.InvalidParameterError(
+ 'radius must be greater than zero'.format(radius))
+ else:
+ raise errors.InvalidParameterError(
+ '{0} is not a valid radius'.format(radius))
+
+ return radius
+
+ @staticmethod
+ def _encode_bearing(b):
+ if b is None:
+ return ''
+ else:
+ return '{},{}'.format(*b)
+
+ def directions(self, features, profile='mapbox/driving',
+ alternatives=None, geometries=None, overview=None, steps=None,
+ continue_straight=None, waypoint_snapping=None, annotations=None,
+ language=None, **kwargs):
+ """Request directions for waypoints encoded as GeoJSON features.
- steps : bool
+ Parameters
+ ----------
+ features : iterable
+ An collection of GeoJSON features
+ profile : str
+ Name of a Mapbox profile such as 'mapbox.driving'
+ alternatives : bool
+ Whether to try to return alternative routes, default: False
+ geometries : string
+ Type of geometry returned (geojson, polyline, polyline6)
+ overview : string or False
+ Type of returned overview geometry: 'full', 'simplified',
+ or False
+ steps : bool
+ Whether to return steps and turn-by-turn instructions,
+ default: False
+ continue_straight : bool
+ Direction of travel when departing intermediate waypoints
+ radiuses : iterable of numbers or 'unlimited'
+ Must be same length as features
+ waypoint_snapping : list
+ Controls snapping of waypoints
+ The list is zipped with the features collection and must
+ have the same length. Elements of the list must be one of:
- Returns
- -------
- response
- It returns a response object with a geojson() method for accessing the route(s) as a GeoJSON-like FeatureCollection dictionary.
+ - A number (interpretted as a snapping radius)
+ - The string 'unlimited' (unlimited snapping radius)
+ - A 3-element tuple consisting of (radius, angle, range)
+ - None (no snapping parameters specified for that waypoint)
+ annotations : str
+ Whether or not to return additional metadata along the route
+
+ Possible values are: 'duration', 'distance', 'speed', and
+ 'congestion'. Several annotations can be used by joining
+ them with ','.
+ language : str
+ Language of returned turn-by-turn text instructions,
+ default: 'en'
+
+ Returns
+ -------
+ requests.Response
+ The response object has a geojson() method for access to
+ the route(s) as a GeoJSON-like FeatureCollection
+ dictionary.
"""
+ # backwards compatible, deprecated
+ if 'geometry' in kwargs and geometries is None:
+ geometries = kwargs['geometry']
+ warnings.warn('Use `geometries` instead of `geometry`',
+ errors.MapboxDeprecationWarning)
+
+ annotations = self._validate_annotations(annotations)
+ coordinates = encode_coordinates(
+ features, precision=6, min_limit=2, max_limit=25)
+ geometries = self._validate_geom_encoding(geometries)
+ overview = self._validate_geom_overview(overview)
profile = self._validate_profile(profile)
- instructions = self._validate_instruction_format(instructions)
- geometry = self._validate_geom_encoding(geometry)
- waypoints = encode_waypoints(features, precision=6,
- min_limit=2, max_limit=30)
+
+ bearings, radii = self._validate_snapping(waypoint_snapping, features)
params = {}
if alternatives is not None:
params.update(
{'alternatives': 'true' if alternatives is True else 'false'})
- if instructions is not None:
- params.update({'instructions': instructions})
- if geometry is not None:
+ if geometries is not None:
+ params.update({'geometries': geometries})
+ if overview is not None:
params.update(
- {'geometry': 'false' if geometry is False else geometry})
+ {'overview': 'false' if overview is False else overview})
if steps is not None:
params.update(
{'steps': 'true' if steps is True else 'false'})
+ if continue_straight is not None:
+ params.update(
+ {'continue_straight': 'true' if steps is True else 'false'})
+ if annotations is not None:
+ params.update({'annotations': ','.join(annotations)})
+ if language is not None:
+ params.update({'language': language})
+ if radii is not None:
+ params.update(
+ {'radiuses': ';'.join(str(r) for r in radii)})
+ if bearings is not None:
+ params.update(
+ {'bearings': ';'.join(self._encode_bearing(b) for b in bearings)})
+
+ profile_ns, profile_name = profile.split('/')
- uri = URITemplate(self.baseuri + '/{profile}/{waypoints}.json').expand(
- profile=profile, waypoints=waypoints)
+ uri = URITemplate(
+ self.baseuri + '/{profile_ns}/{profile_name}/{coordinates}.json').expand(
+ profile_ns=profile_ns, profile_name=profile_name, coordinates=coordinates)
resp = self.session.get(uri, params=params)
self.handle_http_error(resp)
def geojson():
- return self._geojson(resp.json())
-
+ return self._geojson(resp.json(), geom_format=geometries)
resp.geojson = geojson
return resp
- def _geojson(self, data):
+ def _geojson(self, data, geom_format=None):
fc = {
'type': 'FeatureCollection',
'features': []}
for route in data['routes']:
+ if geom_format == 'geojson':
+ geom = route['geometry']
+ else:
+ # convert default polyline encoded geometry
+ geom = {
+ 'type': 'LineString',
+ 'coodinates': polyline.decode(route['geometry'])}
feature = {
'type': 'Feature',
+ 'geometry': geom,
'properties': {
- # TODO handle these nested structures
- # Flatten or ???
- # 'destination': data['destination'],
- # 'origin': data['origin'],
- # 'waypoints': data['waypoints'],
- # 'steps': route['steps']
+ # TODO include RouteLegs and other details
'distance': route['distance'],
- 'duration': route['duration'],
- 'summary': route['summary']}}
-
- feature['geometry'] = route['geometry']
+ 'duration': route['duration']}}
fc['features'].append(feature)
-
return fc
| Directions API update
@perrygeo let's coordinate a new release to go along with the next Directions API update.
| mapbox/mapbox-sdk-py | diff --git a/tests/moors.json b/tests/moors.json
index 4bcb54c..23f3ab5 100644
--- a/tests/moors.json
+++ b/tests/moors.json
@@ -1,1 +1,1 @@
-{"origin":{"type":"Feature","geometry":{"type":"Point","coordinates":[-87.33773803710938,36.53900909423828]},"properties":{"name":"US 79"}},"destination":{"type":"Feature","geometry":{"type":"Point","coordinates":[-88.24732208251953,36.92213821411133]},"properties":{"name":"Moors Camp Highway"}},"waypoints":[],"routes":[{"distance":146474,"duration":5951,"summary":"I 24 - I 24;I 69","geometry":{"type":"LineString","coordinates":[[-87.337733,36.539006],[-87.337802,36.538969],[-87.337908,36.53893],[-87.338082,36.538892],[-87.338282,36.53888],[-87.337889,36.538796],[-87.337673,36.53876],[-87.337557,36.53874],[-87.337415,36.539455],[-87.337364,36.53971],[-87.337159,36.540704],[-87.33695,36.541804],[-87.336924,36.54191],[-87.33686,36.5421],[-87.336821,36.542193],[-87.336728,36.542421],[-87.336614,36.542681],[-87.336504,36.542916],[-87.336403,36.543137],[-87.336309,36.543349],[-87.336166,36.54363],[-87.336058,36.543863],[-87.335957,36.544078],[-87.335845,36.544316],[-87.33572,36.544583],[-87.335557,36.544932],[-87.335361,36.545309],[-87.335225,36.545538],[-87.335073,36.545729],[-87.334763,36.546061],[-87.334364,36.546408],[-87.334209,36.54661],[-87.332988,36.547578],[-87.332321,36.548082],[-87.328617,36.550923],[-87.326853,36.55227],[-87.325047,36.553631],[-87.324423,36.554084],[-87.323926,36.554453],[-87.322803,36.555285],[-87.320678,36.556868],[-87.31978,36.557531],[-87.318274,36.558642],[-87.318105,36.558768],[-87.316189,36.560188],[-87.315139,36.560971],[-87.315084,36.561016],[-87.314177,36.561759],[-87.313798,36.562066],[-87.313517,36.562289],[-87.313263,36.562481],[-87.312928,36.562732],[-87.312644,36.562937],[-87.31255,36.563005],[-87.312445,36.563083],[-87.312151,36.563301],[-87.311789,36.563567],[-87.31108,36.564092],[-87.310988,36.564161],[-87.310586,36.564459],[-87.310376,36.564614],[-87.309622,36.565174],[-87.308562,36.565963],[-87.30807,36.566321],[-87.307566,36.5667],[-87.306917,36.567173],[-87.306196,36.567707],[-87.305954,36.567899],[-87.305753,36.568068],[-87.305559,36.568235],[-87.305297,36.568486],[-87.305069,36.568723],[-87.304954,36.568849],[-87.304759,36.569084],[-87.304669,36.569206],[-87.304557,36.569357],[-87.304437,36.569525],[-87.304305,36.569709],[-87.304169,36.569959],[-87.303978,36.570331],[-87.30387,36.570571],[-87.303794,36.570752],[-87.30368,36.571082],[-87.303615,36.571294],[-87.303583,36.571404],[-87.303417,36.571888],[-87.303355,36.572179],[-87.303289,36.572388],[-87.303226,36.572591],[-87.303144,36.572865],[-87.302999,36.573324],[-87.302966,36.573419],[-87.302917,36.573564],[-87.302889,36.573651],[-87.302807,36.573891],[-87.302747,36.574064],[-87.302634,36.574354],[-87.302434,36.574822],[-87.302294,36.57512],[-87.302176,36.575372],[-87.302061,36.575592],[-87.301927,36.575845],[-87.301789,36.576079],[-87.301679,36.576265],[-87.301602,36.576398],[-87.301533,36.576516],[-87.301325,36.576844],[-87.301164,36.57709],[-87.300922,36.577429],[-87.300694,36.577734],[-87.300029,36.578604],[-87.299817,36.578881],[-87.299396,36.579439],[-87.299102,36.57983],[-87.298972,36.580005],[-87.298507,36.580603],[-87.29787,36.581443],[-87.297718,36.581643],[-87.29747,36.58196],[-87.29735,36.582113],[-87.297253,36.582247],[-87.296998,36.582589],[-87.296878,36.58275],[-87.296603,36.583106],[-87.296056,36.583857],[-87.295875,36.584128],[-87.295677,36.584441],[-87.295372,36.584951],[-87.295228,36.585213],[-87.294995,36.585625],[-87.294897,36.585803],[-87.294832,36.585934],[-87.294681,36.586204],[-87.294618,36.58632],[-87.294503,36.586534],[-87.294315,36.58687],[-87.294115,36.587233],[-87.293983,36.587471],[-87.293749,36.587889],[-87.293679,36.588012],[-87.293596,36.588165],[-87.293371,36.58853],[-87.293263,36.588693],[-87.293087,36.588959],[-87.292949,36.589154],[-87.292563,36.58968],[-87.292448,36.589825],[-87.292294,36.590015],[-87.292182,36.590147],[-87.292017,36.590345],[-87.291709,36.5907],[-87.291356,36.591072],[-87.290844,36.591627],[-87.290594,36.591889],[-87.290332,36.592173],[-87.290031,36.59249],[-87.289815,36.592725],[-87.289361,36.593216],[-87.288309,36.594342],[-87.287669,36.595015],[-87.287584,36.595105],[-87.287213,36.595517],[-87.286945,36.595798],[-87.286737,36.596022],[-87.286547,36.596225],[-87.286259,36.596529],[-87.286071,36.596721],[-87.285717,36.59707],[-87.285576,36.597203],[-87.285461,36.597319],[-87.28513,36.597646],[-87.28485,36.597797],[-87.2844,36.598197],[-87.284251,36.59835],[-87.283992,36.598576],[-87.28153,36.600762],[-87.281286,36.600978],[-87.281068,36.601171],[-87.281183,36.601239],[-87.281191,36.60131],[-87.281228,36.60141],[-87.281279,36.601478],[-87.281408,36.601615],[-87.282635,36.602077],[-87.283768,36.602552],[-87.284369,36.602849],[-87.284841,36.603173],[-87.285551,36.603764],[-87.286131,36.604419],[-87.28667,36.605037],[-87.287836,36.606355],[-87.289403,36.608135],[-87.290573,36.609457],[-87.291581,36.610599],[-87.292736,36.611907],[-87.294102,36.613451],[-87.294562,36.613979],[-87.295617,36.615177],[-87.296791,36.616513],[-87.29811,36.617997],[-87.298982,36.618987],[-87.300245,36.620418],[-87.302278,36.622723],[-87.303158,36.623712],[-87.30356,36.624158],[-87.303885,36.624483],[-87.304142,36.624718],[-87.304488,36.625002],[-87.304824,36.625249],[-87.305215,36.625522],[-87.305663,36.625784],[-87.306139,36.626046],[-87.306615,36.626272],[-87.307474,36.626631],[-87.308961,36.627232],[-87.311507,36.628248],[-87.313463,36.629043],[-87.316378,36.63021],[-87.317496,36.630656],[-87.319528,36.631472],[-87.320636,36.631915],[-87.322124,36.632514],[-87.322912,36.63283],[-87.326746,36.634365],[-87.331147,36.636131],[-87.331833,36.636431],[-87.332633,36.636802],[-87.333274,36.637122],[-87.333949,36.637499],[-87.334537,36.637839],[-87.335119,36.638202],[-87.335998,36.63879],[-87.336655,36.639273],[-87.337296,36.63977],[-87.337772,36.640164],[-87.338606,36.640932],[-87.339251,36.641543],[-87.339702,36.641967],[-87.341759,36.643892],[-87.342212,36.644304],[-87.342547,36.644588],[-87.342855,36.644831],[-87.343112,36.64503],[-87.343337,36.645191],[-87.343836,36.645526],[-87.344227,36.645769],[-87.344588,36.645979],[-87.345011,36.646202],[-87.345502,36.646446],[-87.345956,36.646651],[-87.346628,36.646928],[-87.347315,36.647174],[-87.351457,36.648682],[-87.355412,36.650126],[-87.358047,36.651086],[-87.359198,36.65151],[-87.360299,36.651912],[-87.361184,36.652231],[-87.361537,36.652368],[-87.362136,36.652603],[-87.362517,36.652782],[-87.363075,36.653039],[-87.363567,36.653285],[-87.364006,36.653521],[-87.364369,36.653726],[-87.364754,36.653955],[-87.365059,36.654146],[-87.365546,36.654462],[-87.366332,36.655023],[-87.366888,36.655424],[-87.368545,36.656625],[-87.369431,36.657256],[-87.37017,36.657795],[-87.372173,36.659246],[-87.374249,36.660745],[-87.376123,36.662102],[-87.378443,36.663777],[-87.38052,36.665287],[-87.382593,36.666778],[-87.383424,36.667382],[-87.38421,36.667967],[-87.384845,36.668502],[-87.386221,36.669717],[-87.388164,36.67144],[-87.390006,36.673066],[-87.39192,36.674767],[-87.393591,36.676232],[-87.394044,36.6766],[-87.394506,36.676946],[-87.395113,36.677375],[-87.395722,36.677773],[-87.39614,36.678035],[-87.39648,36.678228],[-87.397258,36.678645],[-87.397843,36.678931],[-87.399284,36.679603],[-87.400008,36.679942],[-87.401538,36.680648],[-87.403989,36.681789],[-87.406342,36.68288],[-87.410115,36.684642],[-87.411405,36.68524],[-87.413715,36.68631],[-87.416128,36.687429],[-87.418524,36.68854],[-87.41967,36.68908],[-87.42077,36.689579],[-87.421497,36.689908],[-87.422268,36.690243],[-87.42327,36.69066],[-87.424245,36.691062],[-87.427228,36.692217],[-87.429737,36.693181],[-87.432274,36.694158],[-87.435004,36.695211],[-87.437413,36.696135],[-87.439887,36.697091],[-87.442498,36.698097],[-87.44496,36.699037],[-87.447401,36.699977],[-87.448056,36.700237],[-87.448446,36.700383],[-87.452444,36.701973],[-87.454684,36.702788],[-87.455566,36.703132],[-87.459308,36.704564],[-87.461765,36.705511],[-87.462984,36.705983],[-87.464587,36.706593],[-87.46735,36.707659],[-87.467784,36.707826],[-87.472397,36.709599],[-87.474092,36.710255],[-87.474623,36.71046],[-87.475171,36.710691],[-87.475647,36.710905],[-87.476052,36.711107],[-87.476426,36.71129],[-87.476875,36.711527],[-87.477364,36.711803],[-87.477702,36.712009],[-87.478132,36.71229],[-87.478723,36.712688],[-87.479024,36.712912],[-87.479279,36.713107],[-87.479787,36.713506],[-87.480163,36.713824],[-87.480628,36.714251],[-87.481162,36.714765],[-87.481552,36.715198],[-87.482125,36.715836],[-87.482441,36.716242],[-87.482739,36.716647],[-87.483022,36.717062],[-87.483264,36.717434],[-87.483502,36.717829],[-87.483775,36.718343],[-87.485397,36.72137],[-87.486761,36.723958],[-87.487933,36.726167],[-87.488893,36.727989],[-87.489171,36.728506],[-87.489431,36.728992],[-87.48958,36.729241],[-87.489894,36.729733],[-87.490228,36.730215],[-87.490583,36.730685],[-87.490956,36.731141],[-87.491148,36.731364],[-87.491345,36.731584],[-87.491752,36.732014],[-87.492176,36.732436],[-87.492395,36.732643],[-87.49262,36.732846],[-87.492852,36.733045],[-87.493079,36.733239],[-87.493313,36.733431],[-87.49355,36.733619],[-87.49379,36.733803],[-87.494033,36.733983],[-87.49428,36.734159],[-87.494782,36.734499],[-87.495289,36.734824],[-87.498635,36.736801],[-87.502133,36.738859],[-87.506676,36.741522],[-87.508549,36.742634],[-87.517894,36.74813],[-87.52032,36.749559],[-87.528768,36.754512],[-87.529935,36.755201],[-87.54347,36.763165],[-87.544151,36.763565],[-87.558129,36.771787],[-87.574281,36.78127],[-87.578233,36.783586],[-87.583164,36.786482],[-87.583941,36.786931],[-87.584471,36.787221],[-87.585013,36.787502],[-87.585554,36.787765],[-87.585816,36.787884],[-87.586416,36.788157],[-87.586995,36.788403],[-87.587578,36.788636],[-87.59794,36.792665],[-87.598397,36.792846],[-87.598986,36.793087],[-87.599854,36.793472],[-87.600428,36.793747],[-87.600998,36.794035],[-87.601561,36.794339],[-87.602119,36.794656],[-87.602665,36.794983],[-87.603198,36.795321],[-87.603462,36.795494],[-87.603982,36.795852],[-87.60449,36.796221],[-87.604985,36.7966],[-87.605466,36.796984],[-87.614169,36.804276],[-87.61464,36.804665],[-87.615903,36.805722],[-87.617412,36.806985],[-87.620481,36.809549],[-87.62442,36.812849],[-87.624637,36.813022],[-87.624858,36.81319],[-87.625085,36.813348],[-87.625317,36.813498],[-87.625554,36.813639],[-87.625795,36.813774],[-87.626043,36.813901],[-87.626296,36.814021],[-87.626553,36.814133],[-87.626815,36.814236],[-87.62708,36.814331],[-87.627348,36.814418],[-87.627617,36.814496],[-87.627885,36.814568],[-87.628812,36.814793],[-87.630644,36.815233],[-87.633646,36.815935],[-87.634271,36.816081],[-87.638969,36.81719],[-87.639555,36.817338],[-87.640194,36.817522],[-87.640591,36.817648],[-87.640998,36.817798],[-87.641204,36.817879],[-87.641409,36.817967],[-87.641613,36.81806],[-87.642026,36.818264],[-87.642236,36.818374],[-87.642664,36.818614],[-87.64653,36.820885],[-87.649584,36.822673],[-87.65748,36.827305],[-87.658762,36.828062],[-87.659008,36.828215],[-87.659252,36.828374],[-87.659493,36.828538],[-87.659542,36.828573],[-87.659969,36.828884],[-87.66005,36.828946],[-87.660204,36.829065],[-87.660669,36.829433],[-87.660888,36.829649],[-87.661107,36.829849],[-87.661312,36.830049],[-87.661527,36.83026],[-87.662335,36.831103],[-87.662972,36.831786],[-87.663953,36.832804],[-87.664583,36.833457],[-87.665035,36.833914],[-87.666882,36.835876],[-87.66728,36.83629],[-87.669066,36.838175],[-87.669872,36.83901],[-87.670282,36.839425],[-87.6707,36.839826],[-87.671342,36.840409],[-87.671773,36.840763],[-87.672163,36.841099],[-87.672773,36.841577],[-87.673207,36.841911],[-87.673456,36.842108],[-87.673859,36.842399],[-87.674279,36.842693],[-87.674718,36.842992],[-87.675414,36.843442],[-87.675655,36.843593],[-87.676153,36.843893],[-87.676669,36.844192],[-87.677201,36.844489],[-87.678007,36.844916],[-87.678845,36.845331],[-87.67971,36.845729],[-87.680292,36.845984],[-87.680586,36.846106],[-87.681178,36.846344],[-87.681778,36.846575],[-87.683899,36.847369],[-87.695248,36.851583],[-87.696399,36.852018],[-87.696682,36.85213],[-87.697242,36.852369],[-87.697796,36.852625],[-87.698021,36.852736],[-87.698606,36.853039],[-87.699132,36.853329],[-87.699642,36.853631],[-87.700137,36.853949],[-87.700624,36.854271],[-87.701081,36.854601],[-87.701525,36.854944],[-87.701954,36.855298],[-87.702165,36.855478],[-87.702574,36.855847],[-87.702756,36.85602],[-87.70297,36.856225],[-87.703347,36.856615],[-87.703709,36.857012],[-87.704054,36.857417],[-87.704381,36.857829],[-87.710946,36.866296],[-87.71205,36.867723],[-87.712254,36.867973],[-87.712557,36.868329],[-87.712753,36.868549],[-87.712922,36.86873],[-87.713309,36.869125],[-87.713506,36.869308],[-87.71391,36.869664],[-87.714331,36.870005],[-87.71455,36.870171],[-87.714776,36.870335],[-87.715248,36.870657],[-87.715493,36.870816],[-87.715744,36.870973],[-87.716,36.871124],[-87.716263,36.871271],[-87.71637,36.871329],[-87.717426,36.871898],[-87.717857,36.872134],[-87.718668,36.872566],[-87.722058,36.874394],[-87.725361,36.876163],[-87.726922,36.877012],[-87.727904,36.877575],[-87.728627,36.878007],[-87.729596,36.878602],[-87.73083,36.879372],[-87.732283,36.8803],[-87.734799,36.881857],[-87.735297,36.88216],[-87.739089,36.884579],[-87.745669,36.888727],[-87.747825,36.890079],[-87.749479,36.89112],[-87.749833,36.891332],[-87.750297,36.891608],[-87.750619,36.891787],[-87.752228,36.892656],[-87.753998,36.89361],[-87.756167,36.894785],[-87.756984,36.89523],[-87.762715,36.898307],[-87.763343,36.898649],[-87.763712,36.89886],[-87.764103,36.899098],[-87.764636,36.899439],[-87.764993,36.899686],[-87.765383,36.899963],[-87.765882,36.900348],[-87.766789,36.901064],[-87.770817,36.904255],[-87.772381,36.905495],[-87.773282,36.906198],[-87.773756,36.906548],[-87.774254,36.9069],[-87.77477,36.907245],[-87.775289,36.907573],[-87.775807,36.907884],[-87.777903,36.909089],[-87.78146,36.911146],[-87.782665,36.911845],[-87.785125,36.913267],[-87.787607,36.9147],[-87.789405,36.915743],[-87.792748,36.917669],[-87.79474,36.918819],[-87.795475,36.91926],[-87.797586,36.920567],[-87.798825,36.921335],[-87.80226,36.923473],[-87.805305,36.925354],[-87.806797,36.926251],[-87.807629,36.926769],[-87.810793,36.928749],[-87.812297,36.929685],[-87.818011,36.93322],[-87.819177,36.933981],[-87.819843,36.934468],[-87.821658,36.935807],[-87.823295,36.937025],[-87.824512,36.93792],[-87.826431,36.939344],[-87.830994,36.942711],[-87.831682,36.943212],[-87.834633,36.945396],[-87.835529,36.94605],[-87.83666,36.946892],[-87.838312,36.948112],[-87.839051,36.94865],[-87.840248,36.949522],[-87.841728,36.950618],[-87.842937,36.951494],[-87.846217,36.953768],[-87.848189,36.955135],[-87.84973,36.956201],[-87.850635,36.956813],[-87.851418,36.957317],[-87.851957,36.957646],[-87.852516,36.957971],[-87.853169,36.958343],[-87.853846,36.958711],[-87.855123,36.959355],[-87.85582,36.95969],[-87.856387,36.959951],[-87.857132,36.960271],[-87.857762,36.960532],[-87.858484,36.960822],[-87.85942,36.961174],[-87.860924,36.961739],[-87.862778,36.962434],[-87.864789,36.963183],[-87.86651,36.963835],[-87.86896,36.96475],[-87.875493,36.967212],[-87.878406,36.968293],[-87.879327,36.968643],[-87.880866,36.969218],[-87.886376,36.971284],[-87.887692,36.971778],[-87.888743,36.972171],[-87.889742,36.972548],[-87.891252,36.97313],[-87.892422,36.973571],[-87.893235,36.973873],[-87.894664,36.974405],[-87.896292,36.975015],[-87.898027,36.97565],[-87.89935,36.976161],[-87.902406,36.977305],[-87.903636,36.977762],[-87.904448,36.978069],[-87.905668,36.978526],[-87.906895,36.978988],[-87.907712,36.979293],[-87.909344,36.979902],[-87.910494,36.980326],[-87.91092,36.980488],[-87.913804,36.981575],[-87.915224,36.982107],[-87.916447,36.982565],[-87.917667,36.983024],[-87.919497,36.983709],[-87.920927,36.984243],[-87.923771,36.985311],[-87.925,36.985769],[-87.926439,36.986312],[-87.927875,36.986847],[-87.928474,36.987072],[-87.929906,36.987609],[-87.931752,36.988299],[-87.933581,36.988982],[-87.936843,36.990205],[-87.937863,36.990589],[-87.939278,36.99111],[-87.940294,36.991489],[-87.941524,36.991946],[-87.943016,36.992519],[-87.943538,36.992719],[-87.943942,36.992879],[-87.944342,36.993045],[-87.944741,36.993214],[-87.945139,36.993387],[-87.945728,36.993654],[-87.946314,36.993929],[-87.946703,36.994117],[-87.947283,36.994406],[-87.947664,36.994602],[-87.948606,36.995092],[-87.949553,36.995579],[-87.950312,36.995975],[-87.951079,36.996369],[-87.95165,36.996666],[-87.952027,36.996861],[-87.95278,36.997253],[-87.953537,36.997645],[-87.954105,36.997941],[-87.95487,36.998335],[-87.955799,36.998801],[-87.956758,36.999296],[-87.95865,37.00027],[-87.960129,37.001034],[-87.961632,37.001812],[-87.962755,37.002394],[-87.963486,37.002777],[-87.963968,37.00302],[-87.965221,37.003666],[-87.965738,37.003946],[-87.966718,37.004483],[-87.967527,37.004949],[-87.968143,37.005324],[-87.968517,37.005555],[-87.968885,37.005795],[-87.96931,37.006076],[-87.96975,37.006378],[-87.970336,37.00679],[-87.970797,37.007133],[-87.971682,37.007805],[-87.972242,37.00826],[-87.972906,37.008821],[-87.973373,37.009238],[-87.973882,37.009696],[-87.974598,37.010342],[-87.975342,37.011021],[-87.97622,37.011809],[-87.976955,37.012481],[-87.977287,37.012791],[-87.978221,37.013631],[-87.978824,37.014174],[-87.979633,37.014909],[-87.980045,37.015282],[-87.980518,37.015706],[-87.981183,37.01631],[-87.983301,37.018231],[-87.984158,37.019005],[-87.984595,37.0194],[-87.984863,37.01964],[-87.985204,37.019931],[-87.985579,37.020256],[-87.985867,37.020491],[-87.986284,37.020836],[-87.986723,37.021166],[-87.987137,37.02148],[-87.987614,37.021827],[-87.988,37.022107],[-87.988441,37.022415],[-87.988746,37.022621],[-87.98914,37.022883],[-87.989532,37.023133],[-87.989903,37.023365],[-87.99044,37.023684],[-87.990845,37.023918],[-87.991382,37.024231],[-87.991767,37.024457],[-87.993489,37.025425],[-87.994966,37.026262],[-87.996102,37.026902],[-87.997014,37.02742],[-87.997678,37.027795],[-87.998235,37.028126],[-87.998473,37.028274],[-87.998825,37.028494],[-87.999274,37.028798],[-87.999552,37.028999],[-87.999941,37.02929],[-88.000319,37.029589],[-88.000649,37.029867],[-88.00096,37.030135],[-88.001316,37.030462],[-88.001687,37.030826],[-88.001985,37.031136],[-88.002373,37.031546],[-88.002667,37.031864],[-88.003042,37.032258],[-88.004129,37.033407],[-88.00447,37.033769],[-88.004968,37.034296],[-88.005316,37.034659],[-88.00617,37.035574],[-88.006524,37.035943],[-88.007105,37.036572],[-88.00797,37.037503],[-88.008591,37.038162],[-88.008992,37.03859],[-88.010205,37.039869],[-88.010876,37.040585],[-88.011416,37.04116],[-88.011964,37.041736],[-88.012099,37.041879],[-88.012237,37.042021],[-88.012522,37.042302],[-88.012667,37.042443],[-88.012815,37.042582],[-88.012966,37.042719],[-88.013119,37.042854],[-88.013429,37.043122],[-88.013587,37.043254],[-88.013746,37.043384],[-88.013908,37.043513],[-88.014072,37.04364],[-88.014238,37.043766],[-88.014574,37.044014],[-88.014745,37.044135],[-88.015264,37.044492],[-88.016298,37.045194],[-88.016957,37.045639],[-88.017501,37.046008],[-88.018208,37.046484],[-88.019375,37.047256],[-88.021455,37.04866],[-88.022443,37.049326],[-88.023529,37.050062],[-88.024188,37.050504],[-88.02589,37.051661],[-88.027298,37.052612],[-88.028759,37.053601],[-88.029509,37.054104],[-88.031182,37.055235],[-88.033157,37.056575],[-88.034344,37.057364],[-88.035199,37.057955],[-88.036181,37.058613],[-88.037197,37.0593],[-88.038597,37.060247],[-88.0402,37.061328],[-88.041508,37.062209],[-88.042582,37.062938],[-88.04287,37.063135],[-88.043213,37.063356],[-88.043456,37.063505],[-88.043662,37.063626],[-88.043867,37.063736],[-88.044037,37.063823],[-88.044254,37.063926],[-88.04448,37.064019],[-88.044801,37.064152],[-88.045035,37.06425],[-88.045242,37.06432],[-88.045453,37.064386],[-88.045665,37.064445],[-88.045879,37.064501],[-88.046094,37.06455],[-88.046311,37.064595],[-88.046528,37.064636],[-88.046748,37.064672],[-88.04697,37.064702],[-88.047192,37.064729],[-88.047865,37.064799],[-88.048986,37.064905],[-88.050547,37.065059],[-88.052802,37.065282],[-88.054374,37.065434],[-88.056165,37.065613],[-88.058629,37.065853],[-88.061964,37.066176],[-88.065941,37.066564],[-88.067061,37.06667],[-88.067531,37.066722],[-88.068424,37.066811],[-88.069337,37.066899],[-88.069685,37.066947],[-88.069973,37.06699],[-88.070193,37.067022],[-88.070411,37.067057],[-88.070629,37.067094],[-88.071065,37.067173],[-88.071281,37.067216],[-88.071497,37.067262],[-88.071713,37.06731],[-88.071928,37.06736],[-88.072354,37.067465],[-88.072567,37.06752],[-88.072991,37.067638],[-88.073412,37.067763],[-88.074039,37.06796],[-88.074268,37.068034],[-88.074872,37.068229],[-88.075291,37.068362],[-88.075854,37.068547],[-88.076131,37.068637],[-88.076961,37.068911],[-88.077587,37.069112],[-88.078214,37.069314],[-88.07926,37.069652],[-88.080252,37.069967],[-88.081155,37.070256],[-88.082429,37.070678],[-88.083855,37.071149],[-88.085316,37.071625],[-88.086153,37.071899],[-88.086576,37.072032],[-88.086788,37.072096],[-88.087002,37.072158],[-88.087218,37.072215],[-88.087436,37.072269],[-88.087655,37.07232],[-88.087876,37.072368],[-88.088098,37.072412],[-88.088321,37.072451],[-88.088545,37.072487],[-88.088768,37.072516],[-88.088992,37.072542],[-88.089215,37.072564],[-88.089439,37.072583],[-88.089662,37.072597],[-88.089885,37.072606],[-88.090127,37.072611],[-88.090331,37.072613],[-88.090555,37.072609],[-88.090779,37.072603],[-88.091223,37.072578],[-88.091444,37.072563],[-88.091886,37.072529],[-88.092768,37.072452],[-88.093886,37.072351],[-88.094562,37.07229],[-88.096144,37.07215],[-88.09705,37.072067],[-88.098338,37.071957],[-88.099041,37.07189],[-88.100141,37.07179],[-88.102939,37.071539],[-88.104953,37.071354],[-88.10672,37.071195],[-88.107502,37.071134],[-88.107937,37.071112],[-88.10836,37.071109],[-88.108765,37.071116],[-88.109146,37.071143],[-88.109676,37.071192],[-88.110092,37.071249],[-88.110481,37.071315],[-88.110819,37.071384],[-88.111253,37.071485],[-88.111777,37.071617],[-88.113888,37.072142],[-88.115399,37.07252],[-88.11695,37.072908],[-88.119691,37.073593],[-88.121054,37.07395],[-88.122075,37.074203],[-88.123213,37.074478],[-88.124834,37.074875],[-88.125517,37.075048],[-88.128011,37.075672],[-88.128832,37.07587],[-88.129243,37.075971],[-88.129732,37.076076],[-88.130227,37.076159],[-88.13064,37.076211],[-88.131144,37.076261],[-88.131599,37.07628],[-88.132182,37.076287],[-88.132631,37.076273],[-88.133186,37.076233],[-88.13362,37.076182],[-88.134042,37.07612],[-88.134436,37.076044],[-88.134953,37.075928],[-88.135374,37.075818],[-88.135943,37.075636],[-88.136342,37.075487],[-88.136871,37.075268],[-88.137314,37.075057],[-88.137743,37.074822],[-88.138175,37.074572],[-88.140057,37.073369],[-88.140504,37.073079],[-88.1413,37.07257],[-88.141788,37.072264],[-88.142519,37.071802],[-88.14329,37.071339],[-88.143613,37.07117],[-88.143997,37.070991],[-88.144428,37.070828],[-88.144881,37.070674],[-88.145381,37.070541],[-88.145776,37.070454],[-88.146231,37.070381],[-88.147298,37.070235],[-88.148095,37.070132],[-88.152519,37.069591],[-88.155317,37.069245],[-88.157205,37.069012],[-88.158299,37.068869],[-88.159065,37.068748],[-88.159862,37.068596],[-88.160427,37.06847],[-88.161101,37.068304],[-88.161797,37.068105],[-88.162358,37.067926],[-88.162881,37.06775],[-88.163369,37.067569],[-88.164208,37.067229],[-88.166162,37.066403],[-88.168672,37.065342],[-88.17119,37.064276],[-88.173575,37.063273],[-88.175032,37.062664],[-88.176107,37.062222],[-88.176672,37.062024],[-88.177106,37.061881],[-88.17762,37.061735],[-88.178129,37.061609],[-88.178589,37.061499],[-88.179185,37.061377],[-88.179795,37.061277],[-88.180519,37.06118],[-88.181073,37.061129],[-88.181594,37.061091],[-88.18228,37.061067],[-88.182645,37.061063],[-88.183178,37.061069],[-88.183628,37.061086],[-88.184012,37.06111],[-88.184514,37.061155],[-88.185026,37.06121],[-88.185507,37.061277],[-88.185985,37.061353],[-88.186497,37.061449],[-88.186947,37.061548],[-88.187477,37.061682],[-88.188038,37.061838],[-88.189089,37.062158],[-88.189766,37.062357],[-88.191027,37.062756],[-88.194301,37.063755],[-88.195027,37.063969],[-88.195444,37.064082],[-88.195918,37.064197],[-88.196431,37.064292],[-88.197002,37.064376],[-88.197545,37.06442],[-88.198102,37.064442],[-88.198746,37.064448],[-88.199394,37.064426],[-88.199825,37.064387],[-88.200754,37.064232],[-88.201157,37.064148],[-88.209006,37.062257],[-88.216038,37.060556],[-88.221537,37.059222],[-88.223151,37.058838],[-88.223362,37.058792],[-88.223569,37.058745],[-88.2246,37.058521],[-88.225218,37.058389],[-88.226044,37.058222],[-88.226173,37.058197],[-88.226871,37.058061],[-88.227703,37.057908],[-88.228535,37.057761],[-88.228953,37.05769],[-88.229483,37.057594],[-88.229786,37.057541],[-88.230202,37.057464],[-88.230615,37.05738],[-88.231027,37.057291],[-88.231232,37.057245],[-88.231642,37.057149],[-88.231846,37.057098],[-88.232456,37.056937],[-88.232861,37.056824],[-88.23329,37.056699],[-88.233838,37.05653],[-88.234217,37.056409],[-88.234663,37.056247],[-88.235254,37.056029],[-88.235642,37.055878],[-88.236026,37.055724],[-88.236405,37.055565],[-88.23678,37.055402],[-88.237179,37.055219],[-88.237348,37.055135],[-88.237709,37.054967],[-88.238069,37.054791],[-88.238427,37.054607],[-88.238782,37.054417],[-88.238958,37.054321],[-88.239307,37.054124],[-88.23965,37.053922],[-88.239991,37.053715],[-88.24067,37.053298],[-88.241011,37.053087],[-88.242378,37.052237],[-88.24272,37.052029],[-88.243063,37.051824],[-88.243408,37.051623],[-88.243583,37.051524],[-88.243937,37.05133],[-88.244294,37.051139],[-88.244838,37.050859],[-88.245202,37.050676],[-88.245441,37.050561],[-88.245754,37.050415],[-88.246313,37.050162],[-88.246499,37.050079],[-88.246959,37.049881],[-88.247254,37.049758],[-88.247638,37.049605],[-88.248024,37.049456],[-88.248605,37.049238],[-88.249184,37.049022],[-88.25035,37.048595],[-88.250934,37.048379],[-88.25191,37.048023],[-88.252493,37.047805],[-88.253274,37.047518],[-88.254637,37.047013],[-88.255555,37.046663],[-88.256303,37.046385],[-88.257555,37.045945],[-88.258139,37.045726],[-88.258425,37.045617],[-88.259109,37.045355],[-88.259492,37.045204],[-88.259873,37.045049],[-88.260442,37.04481],[-88.261006,37.044568],[-88.261157,37.044502],[-88.261587,37.044299],[-88.262459,37.043898],[-88.263029,37.04363],[-88.2636,37.043354],[-88.264328,37.042996],[-88.265059,37.042651],[-88.265402,37.042436],[-88.266104,37.042038],[-88.266457,37.04184],[-88.266977,37.041536],[-88.267663,37.041126],[-88.267833,37.041023],[-88.268169,37.040812],[-88.268837,37.040376],[-88.269007,37.040278],[-88.269506,37.039953],[-88.27,37.039621],[-88.270489,37.039284],[-88.270974,37.038941],[-88.271295,37.038708],[-88.271614,37.038474],[-88.271942,37.03823],[-88.272558,37.037757],[-88.273172,37.03727],[-88.273477,37.037023],[-88.273931,37.03665],[-88.274375,37.036272],[-88.283614,37.028166],[-88.288469,37.023912],[-88.292496,37.020395],[-88.29357,37.019465],[-88.293912,37.019169],[-88.294465,37.018746],[-88.294569,37.018667],[-88.294834,37.018481],[-88.295108,37.018301],[-88.295385,37.018126],[-88.295671,37.017957],[-88.295962,37.017795],[-88.296239,37.017643],[-88.296694,37.017401],[-88.297166,37.017195],[-88.297804,37.016938],[-88.298452,37.016703],[-88.299113,37.016493],[-88.299789,37.016315],[-88.300475,37.016165],[-88.30185,37.015899],[-88.30771,37.014809],[-88.308739,37.014608],[-88.309081,37.014535],[-88.309759,37.014375],[-88.310095,37.014288],[-88.310761,37.0141],[-88.311419,37.01389],[-88.312226,37.013603],[-88.312591,37.013466],[-88.31302,37.013292],[-88.313638,37.013018],[-88.314248,37.012729],[-88.314842,37.012422],[-88.31542,37.012096],[-88.315987,37.011757],[-88.316538,37.011401],[-88.317071,37.011028],[-88.317591,37.010642],[-88.318089,37.01024],[-88.318569,37.009824],[-88.319033,37.009396],[-88.319256,37.009175],[-88.31969,37.008727],[-88.321773,37.006455],[-88.324722,37.003248],[-88.325117,37.00281],[-88.325489,37.002387],[-88.326164,37.00164],[-88.327856,36.999765],[-88.329841,36.997595],[-88.332444,36.994765],[-88.332663,36.994539],[-88.333125,36.994105],[-88.333369,36.993898],[-88.333622,36.993697],[-88.333885,36.993505],[-88.334156,36.993319],[-88.334435,36.993142],[-88.334724,36.992973],[-88.335019,36.992814],[-88.335322,36.992663],[-88.335631,36.992522],[-88.335946,36.99239],[-88.336267,36.992266],[-88.336592,36.992154],[-88.336922,36.992051],[-88.337255,36.991959],[-88.337591,36.991877],[-88.337932,36.991805],[-88.338275,36.991744],[-88.338623,36.991692],[-88.339338,36.991606],[-88.346965,36.990792],[-88.347578,36.990725],[-88.348074,36.990732],[-88.348331,36.990731],[-88.348587,36.990743],[-88.348842,36.990771],[-88.349086,36.990818],[-88.34931,36.990888],[-88.34951,36.990982],[-88.349679,36.991098],[-88.349812,36.991232],[-88.349912,36.991383],[-88.349975,36.991543],[-88.350007,36.991707],[-88.350006,36.99187],[-88.349973,36.992029],[-88.349908,36.99218],[-88.349808,36.992313],[-88.34968,36.992423],[-88.349526,36.992506],[-88.349356,36.992564],[-88.349173,36.992599],[-88.348988,36.992614],[-88.348804,36.99261],[-88.348625,36.992584],[-88.348455,36.992537],[-88.348297,36.992472],[-88.348154,36.99239],[-88.348029,36.992298],[-88.347927,36.992193],[-88.34784,36.992081],[-88.347761,36.991959],[-88.347688,36.991822],[-88.347625,36.99167],[-88.347524,36.99133],[-88.347454,36.991143],[-88.347319,36.99038],[-88.346625,36.986174],[-88.34654,36.985674],[-88.346471,36.985243],[-88.346458,36.985171],[-88.346375,36.984667],[-88.346334,36.984413],[-88.346289,36.984158],[-88.346159,36.983387],[-88.346076,36.982871],[-88.346036,36.982613],[-88.346,36.982355],[-88.345966,36.982098],[-88.345936,36.981841],[-88.345908,36.981585],[-88.345882,36.981329],[-88.345859,36.981072],[-88.345822,36.98056],[-88.345793,36.98005],[-88.345784,36.979795],[-88.345771,36.979026],[-88.345772,36.978769],[-88.345781,36.978253],[-88.345788,36.977995],[-88.3458,36.977737],[-88.345815,36.97748],[-88.345832,36.977222],[-88.345851,36.976964],[-88.345896,36.976447],[-88.345925,36.976188],[-88.345957,36.97593],[-88.345988,36.975671],[-88.346023,36.975412],[-88.346062,36.975154],[-88.346103,36.974895],[-88.346148,36.974638],[-88.346192,36.974379],[-88.346285,36.973858],[-88.346429,36.97307],[-88.346479,36.972806],[-88.346675,36.971746],[-88.346718,36.97151],[-88.34682,36.970949],[-88.346909,36.970402],[-88.348963,36.959346],[-88.349056,36.95883],[-88.349106,36.958572],[-88.349155,36.958313],[-88.34925,36.957793],[-88.349298,36.95752],[-88.349345,36.957272],[-88.349393,36.957012],[-88.34944,36.95675],[-88.349486,36.956487],[-88.349582,36.955963],[-88.349642,36.955699],[-88.349684,36.955439],[-88.349734,36.95518],[-88.34978,36.95492],[-88.349816,36.954734],[-88.349828,36.954661],[-88.349877,36.954401],[-88.349972,36.953879],[-88.350016,36.953618],[-88.350057,36.953357],[-88.350095,36.953093],[-88.350123,36.952852],[-88.350152,36.952563],[-88.350175,36.952296],[-88.350191,36.952029],[-88.350203,36.951762],[-88.350205,36.951495],[-88.350203,36.95123],[-88.350196,36.950965],[-88.350184,36.950701],[-88.350166,36.950437],[-88.350143,36.950175],[-88.350114,36.949914],[-88.35008,36.949653],[-88.350043,36.949393],[-88.35,36.949135],[-88.349952,36.948878],[-88.349899,36.948623],[-88.349838,36.948369],[-88.349773,36.948117],[-88.349701,36.947866],[-88.349626,36.947616],[-88.349546,36.947367],[-88.349462,36.947118],[-88.349371,36.946869],[-88.349273,36.946618],[-88.34917,36.946369],[-88.349064,36.946119],[-88.34895,36.945869],[-88.348831,36.94562],[-88.348706,36.945372],[-88.348574,36.945124],[-88.348439,36.944876],[-88.3483,36.944628],[-88.348162,36.944377],[-88.348026,36.944125],[-88.347888,36.943873],[-88.347748,36.94362],[-88.34761,36.943367],[-88.34719,36.942611],[-88.347052,36.94236],[-88.346913,36.94211],[-88.346639,36.941611],[-88.346501,36.941362],[-88.346365,36.941113],[-88.346229,36.940869],[-88.34609,36.940617],[-88.345954,36.94037],[-88.345673,36.939867],[-88.345408,36.939387],[-88.345141,36.938899],[-88.345006,36.938654],[-88.344869,36.938409],[-88.344734,36.938163],[-88.344597,36.937916],[-88.344462,36.937669],[-88.344322,36.937423],[-88.344045,36.936927],[-88.34377,36.93643],[-88.343636,36.936181],[-88.343506,36.935933],[-88.343388,36.935683],[-88.343283,36.935431],[-88.343189,36.935176],[-88.343116,36.934917],[-88.343062,36.934656],[-88.343025,36.934394],[-88.343002,36.934132],[-88.342998,36.93387],[-88.343012,36.933608],[-88.343042,36.933348],[-88.343087,36.933091],[-88.343148,36.932835],[-88.343224,36.932582],[-88.343315,36.932331],[-88.343425,36.932085],[-88.343552,36.931843],[-88.343691,36.931605],[-88.343774,36.931479],[-88.343913,36.931302],[-88.34444,36.930741],[-88.345316,36.929894],[-88.345693,36.929601],[-88.346882,36.92883],[-88.347039,36.928642],[-88.347097,36.928548],[-88.346648,36.928158],[-88.346269,36.927827],[-88.345835,36.927473],[-88.345038,36.926764],[-88.344361,36.926173],[-88.343776,36.925671],[-88.342393,36.924496],[-88.342038,36.924189],[-88.341416,36.923643],[-88.340869,36.923092],[-88.340393,36.922521],[-88.339628,36.92148],[-88.339395,36.921157],[-88.338765,36.920313],[-88.338327,36.919748],[-88.338151,36.919582],[-88.33805,36.919466],[-88.33794,36.919352],[-88.337823,36.919236],[-88.337698,36.919122],[-88.337569,36.919011],[-88.337434,36.918903],[-88.337294,36.918797],[-88.337147,36.918693],[-88.336994,36.918591],[-88.336838,36.918491],[-88.336674,36.918394],[-88.336508,36.918299],[-88.336337,36.918207],[-88.335988,36.918024],[-88.334904,36.917463],[-88.334718,36.917368],[-88.334347,36.917173],[-88.334163,36.917071],[-88.333978,36.916966],[-88.333797,36.916858],[-88.333619,36.916747],[-88.333443,36.916636],[-88.333273,36.916524],[-88.333106,36.916411],[-88.332941,36.916294],[-88.332776,36.916172],[-88.332612,36.916048],[-88.332453,36.915923],[-88.332299,36.915797],[-88.332147,36.915671],[-88.332001,36.915544],[-88.331859,36.915417],[-88.331723,36.915289],[-88.33159,36.915162],[-88.331459,36.915032],[-88.331338,36.914907],[-88.331205,36.914768],[-88.331086,36.914639],[-88.33064,36.914137],[-88.330241,36.913674],[-88.329982,36.913363],[-88.329918,36.913286],[-88.329811,36.913149],[-88.329707,36.91301],[-88.329603,36.912868],[-88.3295,36.912723],[-88.3294,36.912575],[-88.329301,36.912423],[-88.329203,36.912266],[-88.329107,36.912107],[-88.329012,36.911947],[-88.328828,36.91163],[-88.328735,36.911467],[-88.32864,36.911303],[-88.328542,36.911137],[-88.328446,36.910971],[-88.328258,36.910641],[-88.328064,36.910308],[-88.327969,36.910139],[-88.327871,36.909969],[-88.327558,36.909433],[-88.327337,36.909049],[-88.326618,36.907819],[-88.326495,36.907606],[-88.32637,36.907392],[-88.326122,36.906963],[-88.326003,36.906751],[-88.32588,36.906536],[-88.325757,36.906324],[-88.32563,36.906113],[-88.325499,36.905904],[-88.325364,36.905697],[-88.325227,36.905493],[-88.325086,36.905292],[-88.324942,36.905093],[-88.324794,36.904895],[-88.324645,36.904702],[-88.324494,36.904512],[-88.32434,36.904325],[-88.324186,36.904142],[-88.324033,36.903966],[-88.323883,36.903797],[-88.323796,36.903703],[-88.323729,36.90363],[-88.323566,36.903461],[-88.323397,36.90329],[-88.323289,36.903183],[-88.323225,36.90312],[-88.323047,36.902949],[-88.322864,36.902778],[-88.322487,36.902443],[-88.322378,36.902351],[-88.322266,36.902256],[-88.322099,36.902116],[-88.321899,36.901956],[-88.321697,36.901799],[-88.321489,36.901643],[-88.321279,36.901488],[-88.321065,36.901335],[-88.320848,36.901186],[-88.320628,36.90104],[-88.320378,36.900874],[-88.320182,36.900755],[-88.319952,36.900616],[-88.319718,36.900478],[-88.319481,36.900341],[-88.319239,36.900207],[-88.318993,36.900075],[-88.318741,36.899945],[-88.318487,36.899818],[-88.318231,36.899694],[-88.317969,36.899572],[-88.317439,36.899334],[-88.317347,36.899294],[-88.317168,36.899216],[-88.316897,36.899096],[-88.316071,36.898739],[-88.315248,36.89838],[-88.314976,36.898259],[-88.314802,36.898183],[-88.314702,36.898139],[-88.313322,36.897547],[-88.313038,36.89743],[-88.312751,36.897316],[-88.312461,36.897206],[-88.312167,36.8971],[-88.31187,36.896998],[-88.31157,36.8969],[-88.311267,36.896804],[-88.310654,36.896621],[-88.309736,36.896356],[-88.309436,36.896271],[-88.308848,36.896106],[-88.30742,36.895697],[-88.306873,36.895542],[-88.306603,36.895463],[-88.306333,36.895379],[-88.306063,36.89529],[-88.305794,36.895194],[-88.305527,36.895092],[-88.305266,36.894984],[-88.305009,36.894868],[-88.304762,36.894747],[-88.30452,36.894622],[-88.304286,36.894492],[-88.304055,36.89436],[-88.303955,36.8943],[-88.30383,36.894224],[-88.303605,36.89409],[-88.303382,36.893954],[-88.303156,36.893818],[-88.302711,36.893544],[-88.302273,36.893273],[-88.301838,36.893007],[-88.301622,36.892877],[-88.300747,36.892354],[-88.300527,36.892224],[-88.300305,36.892096],[-88.300079,36.891968],[-88.299849,36.891843],[-88.299611,36.891719],[-88.299371,36.891597],[-88.298889,36.891359],[-88.298649,36.891243],[-88.29841,36.891129],[-88.298168,36.891017],[-88.297696,36.890803],[-88.297466,36.890701],[-88.296608,36.890326],[-88.296227,36.890158],[-88.295447,36.88981],[-88.295098,36.889657],[-88.295048,36.889724],[-88.294983,36.889822],[-88.294873,36.889983],[-88.294803,36.890084],[-88.294543,36.890465],[-88.294336,36.890761],[-88.294226,36.89092],[-88.293768,36.891593],[-88.293408,36.892118],[-88.293285,36.892296],[-88.293041,36.892657],[-88.292917,36.892839],[-88.29279,36.893021],[-88.292666,36.893203],[-88.292544,36.893385],[-88.292421,36.893565],[-88.292176,36.893916],[-88.292056,36.894091],[-88.291812,36.894443],[-88.291694,36.894618],[-88.291575,36.894791],[-88.291348,36.895132],[-88.291231,36.895305],[-88.291109,36.89548],[-88.290987,36.895658],[-88.29086,36.895838],[-88.290728,36.896012],[-88.2906,36.896196],[-88.290466,36.89637],[-88.290328,36.896541],[-88.290185,36.896707],[-88.290035,36.896865],[-88.289709,36.897162],[-88.289534,36.897301],[-88.289352,36.897429],[-88.289162,36.897548],[-88.288965,36.897658],[-88.288762,36.89776],[-88.288556,36.897859],[-88.288345,36.897954],[-88.288131,36.898048],[-88.2877,36.898233],[-88.287483,36.898319],[-88.28737,36.898367],[-88.287268,36.89841],[-88.287052,36.898507],[-88.286375,36.898788],[-88.286142,36.898882],[-88.285912,36.898977],[-88.285227,36.899265],[-88.284766,36.899461],[-88.284533,36.899561],[-88.284063,36.899761],[-88.28383,36.899861],[-88.283362,36.90006],[-88.28313,36.90016],[-88.282897,36.900259],[-88.282433,36.900454],[-88.281488,36.900856],[-88.281021,36.901051],[-88.280794,36.901147],[-88.280571,36.901245],[-88.280359,36.901347],[-88.280161,36.901455],[-88.279976,36.901569],[-88.279785,36.901697],[-88.279622,36.901815],[-88.279453,36.901949],[-88.279289,36.902089],[-88.279134,36.902235],[-88.278987,36.902386],[-88.278849,36.902539],[-88.278773,36.902628],[-88.278718,36.902694],[-88.278592,36.902852],[-88.278212,36.903341],[-88.277977,36.903637],[-88.277945,36.903675],[-88.277677,36.904011],[-88.277544,36.90418],[-88.277406,36.904348],[-88.277342,36.904424],[-88.27713,36.904684],[-88.276867,36.905022],[-88.276603,36.905355],[-88.276468,36.905516],[-88.27633,36.905671],[-88.276188,36.905818],[-88.27604,36.905955],[-88.275885,36.906084],[-88.275727,36.906199],[-88.275573,36.9063],[-88.275424,36.906394],[-88.275283,36.90649],[-88.275138,36.906577],[-88.274987,36.906656],[-88.274827,36.906734],[-88.274657,36.906812],[-88.274437,36.906908],[-88.274285,36.906973],[-88.274083,36.907052],[-88.273871,36.907118],[-88.273666,36.907219],[-88.273455,36.907317],[-88.273238,36.907398],[-88.27302,36.907485],[-88.272807,36.907574],[-88.272594,36.907659],[-88.27239,36.907746],[-88.272191,36.907837],[-88.272008,36.907935],[-88.271826,36.908046],[-88.271661,36.908167],[-88.271511,36.908304],[-88.27138,36.908454],[-88.271267,36.908616],[-88.271172,36.908787],[-88.271097,36.908965],[-88.27104,36.90915],[-88.270997,36.909336],[-88.27096,36.909522],[-88.270928,36.909707],[-88.270894,36.909894],[-88.27082,36.910264],[-88.270753,36.910623],[-88.270718,36.910801],[-88.270685,36.910977],[-88.270651,36.911152],[-88.270587,36.911511],[-88.270553,36.911693],[-88.270507,36.911873],[-88.270439,36.912044],[-88.270346,36.912205],[-88.27023,36.912354],[-88.270162,36.912423],[-88.270094,36.91249],[-88.269946,36.912613],[-88.269787,36.912724],[-88.26962,36.912827],[-88.269263,36.913029],[-88.269074,36.913129],[-88.268893,36.913242],[-88.268818,36.91327],[-88.268703,36.913314],[-88.268501,36.913387],[-88.268296,36.91344],[-88.268088,36.913468],[-88.267871,36.91348],[-88.267652,36.913473],[-88.267429,36.913456],[-88.266972,36.913412],[-88.266743,36.913392],[-88.266515,36.913383],[-88.266296,36.91339],[-88.266081,36.913418],[-88.265872,36.913466],[-88.265671,36.913537],[-88.265477,36.913621],[-88.265312,36.913739],[-88.265162,36.913867],[-88.265022,36.914012],[-88.264885,36.914174],[-88.264744,36.914347],[-88.264602,36.914524],[-88.264461,36.914702],[-88.264321,36.914885],[-88.264182,36.915069],[-88.264039,36.915253],[-88.263898,36.915437],[-88.263754,36.91562],[-88.263617,36.915801],[-88.263464,36.915963],[-88.263331,36.916131],[-88.263187,36.916282],[-88.263019,36.916413],[-88.262827,36.916523],[-88.262617,36.916614],[-88.262392,36.91668],[-88.262157,36.916743],[-88.261916,36.916789],[-88.261669,36.916831],[-88.261413,36.91688],[-88.261162,36.916923],[-88.260908,36.916969],[-88.260652,36.917023],[-88.260407,36.91708],[-88.260168,36.917148],[-88.259937,36.917225],[-88.259715,36.917312],[-88.259506,36.917412],[-88.259309,36.917529],[-88.25913,36.917663],[-88.258963,36.917807],[-88.258809,36.91795],[-88.258664,36.918112],[-88.258537,36.918277],[-88.258422,36.918447],[-88.258321,36.918624],[-88.258231,36.918807],[-88.258153,36.918991],[-88.258082,36.91918],[-88.258013,36.91937],[-88.257926,36.919559],[-88.257903,36.9196],[-88.257821,36.919741],[-88.257701,36.919917],[-88.257567,36.920083],[-88.257417,36.920241],[-88.257255,36.920389],[-88.257078,36.920531],[-88.256884,36.920662],[-88.256679,36.920778],[-88.25646,36.920882],[-88.256233,36.920972],[-88.255997,36.921045],[-88.255756,36.921102],[-88.25551,36.921146],[-88.255367,36.921164],[-88.255261,36.921177],[-88.25474,36.921231],[-88.25421,36.921283],[-88.253946,36.921306],[-88.253682,36.921326],[-88.253412,36.921336],[-88.253144,36.92135],[-88.252942,36.921356],[-88.252869,36.921358],[-88.252595,36.921369],[-88.252327,36.921372],[-88.2518,36.921383],[-88.251329,36.921387],[-88.251042,36.921388],[-88.250301,36.921394],[-88.249542,36.921406],[-88.249287,36.921407],[-88.249028,36.92141],[-88.248768,36.921418],[-88.24851,36.92142],[-88.248256,36.921417],[-88.247995,36.921424],[-88.247696,36.921424],[-88.247438,36.92142],[-88.247417,36.921502],[-88.247405,36.92156],[-88.247393,36.921627],[-88.24734,36.921964],[-88.247318,36.922141]]},"steps":[{"maneuver":{"type":"depart","location":{"type":"Point","coordinates":[-87.337733,36.539006]},"instruction":"Head southwest on US 79"},"distance":51,"duration":11,"way_name":"US 79","direction":"SW","heading":230,"mode":"driving"},{"maneuver":{"type":"u-turn","location":{"type":"Point","coordinates":[-87.338282,36.53888]},"instruction":"Make a U-turn onto Kraft Street (US 79)"},"distance":66,"duration":5,"way_name":"Kraft Street (US 79)","direction":"E","heading":106,"mode":"driving"},{"maneuver":{"type":"turn left","location":{"type":"Point","coordinates":[-87.337557,36.53874]},"instruction":"Turn left onto Wilma Rudolph Boulevard (US 79;SR 48)"},"distance":3598,"duration":208,"way_name":"Wilma Rudolph Boulevard (US 79;SR 48)","direction":"N","heading":9,"mode":"driving"},{"maneuver":{"type":"continue","location":{"type":"Point","coordinates":[-87.31255,36.563005]},"instruction":"Continue on Wilma Rudolph Boulevard (US 79)"},"distance":5166,"duration":272,"way_name":"Wilma Rudolph Boulevard (US 79)","direction":"NE","heading":48,"mode":"driving"},{"maneuver":{"type":"turn left","location":{"type":"Point","coordinates":[-87.281068,36.601171]},"instruction":"Turn left"},"distance":12,"duration":3,"way_name":"","direction":"NW","heading":307,"mode":"driving"},{"maneuver":{"type":"bear right","location":{"type":"Point","coordinates":[-87.281183,36.601239]},"instruction":"Bear right"},"distance":47,"duration":4,"way_name":"","direction":"N","heading":355,"mode":"driving"},{"maneuver":{"type":"bear left","location":{"type":"Point","coordinates":[-87.281408,36.601615]},"instruction":"Bear left"},"distance":445,"duration":34,"way_name":"","direction":"NW","heading":295,"mode":"driving"},{"maneuver":{"type":"continue","location":{"type":"Point","coordinates":[-87.285551,36.603764]},"instruction":"Continue on I 24"},"distance":90158,"duration":3210,"way_name":"I 24","direction":"NW","heading":325,"mode":"driving"},{"maneuver":{"type":"continue","location":{"type":"Point","coordinates":[-88.088768,37.072516]},"instruction":"Continue on I 24;I 69"},"distance":26416,"duration":940,"way_name":"I 24;I 69","direction":"W","heading":278,"mode":"driving"},{"maneuver":{"type":"continue","location":{"type":"Point","coordinates":[-88.347578,36.990725]},"instruction":"Continue"},"distance":660,"duration":51,"way_name":"","direction":"W","heading":271,"mode":"driving"},{"maneuver":{"type":"continue","location":{"type":"Point","coordinates":[-88.347454,36.991143]},"instruction":"Continue on Purchase Parkway"},"distance":6805,"duration":296,"way_name":"Purchase Parkway","direction":"S","heading":172,"mode":"driving"},{"maneuver":{"type":"continue","location":{"type":"Point","coordinates":[-88.343774,36.931479]},"instruction":"Continue"},"distance":432,"duration":33,"way_name":"","direction":"SW","heading":212,"mode":"driving"},{"maneuver":{"type":"continue","location":{"type":"Point","coordinates":[-88.347039,36.928642]},"instruction":"Continue"},"distance":11,"duration":2,"way_name":"","direction":"SW","heading":209,"mode":"driving"},{"maneuver":{"type":"turn left","location":{"type":"Point","coordinates":[-88.347097,36.928548]},"instruction":"Turn left onto US 68"},"distance":436,"duration":25,"way_name":"US 68","direction":"SE","heading":137,"mode":"driving"},{"maneuver":{"type":"continue","location":{"type":"Point","coordinates":[-88.343776,36.925671]},"instruction":"Continue on US 68;US 641"},"distance":596,"duration":34,"way_name":"US 68;US 641","direction":"SE","heading":137,"mode":"driving"},{"maneuver":{"type":"continue","location":{"type":"Point","coordinates":[-88.339628,36.92148]},"instruction":"Continue on US 68"},"distance":5496,"duration":316,"way_name":"US 68","direction":"SE","heading":150,"mode":"driving"},{"maneuver":{"type":"turn left","location":{"type":"Point","coordinates":[-88.295098,36.889657]},"instruction":"Turn left onto Moors Camp Highway"},"distance":6076,"duration":504,"way_name":"Moors Camp Highway","direction":"NE","heading":28,"mode":"driving"},{"maneuver":{"type":"arrive","location":{"type":"Point","coordinates":[-88.247318,36.922141]},"instruction":"You have arrived at your destination"}}]},{"distance":129995,"duration":6028,"summary":"I 24 - Canton Road (US 68;KY 80)","geometry":{"type":"LineString","coordinates":[[-87.337733,36.539006],[-87.337802,36.538969],[-87.337908,36.53893],[-87.338082,36.538892],[-87.338282,36.53888],[-87.337889,36.538796],[-87.337673,36.53876],[-87.337557,36.53874],[-87.337415,36.539455],[-87.337364,36.53971],[-87.337159,36.540704],[-87.33695,36.541804],[-87.336924,36.54191],[-87.33686,36.5421],[-87.336821,36.542193],[-87.336728,36.542421],[-87.336614,36.542681],[-87.336504,36.542916],[-87.336403,36.543137],[-87.336309,36.543349],[-87.336166,36.54363],[-87.336058,36.543863],[-87.335957,36.544078],[-87.335845,36.544316],[-87.33572,36.544583],[-87.335557,36.544932],[-87.335361,36.545309],[-87.335225,36.545538],[-87.335073,36.545729],[-87.334763,36.546061],[-87.334364,36.546408],[-87.334209,36.54661],[-87.332988,36.547578],[-87.332321,36.548082],[-87.328617,36.550923],[-87.326853,36.55227],[-87.325047,36.553631],[-87.324423,36.554084],[-87.323926,36.554453],[-87.322803,36.555285],[-87.320678,36.556868],[-87.31978,36.557531],[-87.318274,36.558642],[-87.318105,36.558768],[-87.316189,36.560188],[-87.315139,36.560971],[-87.315084,36.561016],[-87.314177,36.561759],[-87.313798,36.562066],[-87.313517,36.562289],[-87.313263,36.562481],[-87.312928,36.562732],[-87.312644,36.562937],[-87.31255,36.563005],[-87.312445,36.563083],[-87.312151,36.563301],[-87.311789,36.563567],[-87.31108,36.564092],[-87.310988,36.564161],[-87.310586,36.564459],[-87.310376,36.564614],[-87.309622,36.565174],[-87.308562,36.565963],[-87.30807,36.566321],[-87.307566,36.5667],[-87.306917,36.567173],[-87.306196,36.567707],[-87.305954,36.567899],[-87.305753,36.568068],[-87.305559,36.568235],[-87.305297,36.568486],[-87.305069,36.568723],[-87.304954,36.568849],[-87.304759,36.569084],[-87.304669,36.569206],[-87.304557,36.569357],[-87.304437,36.569525],[-87.304305,36.569709],[-87.304169,36.569959],[-87.303978,36.570331],[-87.30387,36.570571],[-87.303794,36.570752],[-87.30368,36.571082],[-87.303615,36.571294],[-87.303583,36.571404],[-87.303417,36.571888],[-87.303355,36.572179],[-87.303289,36.572388],[-87.303226,36.572591],[-87.303144,36.572865],[-87.302999,36.573324],[-87.302966,36.573419],[-87.302917,36.573564],[-87.302889,36.573651],[-87.302807,36.573891],[-87.302747,36.574064],[-87.302634,36.574354],[-87.302434,36.574822],[-87.302294,36.57512],[-87.302176,36.575372],[-87.302061,36.575592],[-87.301927,36.575845],[-87.301789,36.576079],[-87.301679,36.576265],[-87.301602,36.576398],[-87.301533,36.576516],[-87.301325,36.576844],[-87.301164,36.57709],[-87.300922,36.577429],[-87.300694,36.577734],[-87.300029,36.578604],[-87.299817,36.578881],[-87.299396,36.579439],[-87.299102,36.57983],[-87.298972,36.580005],[-87.298507,36.580603],[-87.29787,36.581443],[-87.297718,36.581643],[-87.29747,36.58196],[-87.29735,36.582113],[-87.297253,36.582247],[-87.296998,36.582589],[-87.296878,36.58275],[-87.296603,36.583106],[-87.296056,36.583857],[-87.295875,36.584128],[-87.295677,36.584441],[-87.295372,36.584951],[-87.295228,36.585213],[-87.294995,36.585625],[-87.294897,36.585803],[-87.294832,36.585934],[-87.294681,36.586204],[-87.294618,36.58632],[-87.294503,36.586534],[-87.294315,36.58687],[-87.294115,36.587233],[-87.293983,36.587471],[-87.293749,36.587889],[-87.293679,36.588012],[-87.293596,36.588165],[-87.293371,36.58853],[-87.293263,36.588693],[-87.293087,36.588959],[-87.292949,36.589154],[-87.292563,36.58968],[-87.292448,36.589825],[-87.292294,36.590015],[-87.292182,36.590147],[-87.292017,36.590345],[-87.291709,36.5907],[-87.291356,36.591072],[-87.290844,36.591627],[-87.290594,36.591889],[-87.290332,36.592173],[-87.290031,36.59249],[-87.289815,36.592725],[-87.289361,36.593216],[-87.288309,36.594342],[-87.287669,36.595015],[-87.287584,36.595105],[-87.287213,36.595517],[-87.286945,36.595798],[-87.286737,36.596022],[-87.286547,36.596225],[-87.286259,36.596529],[-87.286071,36.596721],[-87.285717,36.59707],[-87.285576,36.597203],[-87.285461,36.597319],[-87.28513,36.597646],[-87.28485,36.597797],[-87.2844,36.598197],[-87.284251,36.59835],[-87.283992,36.598576],[-87.28153,36.600762],[-87.281286,36.600978],[-87.281068,36.601171],[-87.281183,36.601239],[-87.281191,36.60131],[-87.281228,36.60141],[-87.281279,36.601478],[-87.281408,36.601615],[-87.282635,36.602077],[-87.283768,36.602552],[-87.284369,36.602849],[-87.284841,36.603173],[-87.285551,36.603764],[-87.286131,36.604419],[-87.28667,36.605037],[-87.287836,36.606355],[-87.289403,36.608135],[-87.290573,36.609457],[-87.291581,36.610599],[-87.292736,36.611907],[-87.294102,36.613451],[-87.294562,36.613979],[-87.295617,36.615177],[-87.296791,36.616513],[-87.29811,36.617997],[-87.298982,36.618987],[-87.300245,36.620418],[-87.302278,36.622723],[-87.303158,36.623712],[-87.30356,36.624158],[-87.303885,36.624483],[-87.304142,36.624718],[-87.304488,36.625002],[-87.304824,36.625249],[-87.305215,36.625522],[-87.305663,36.625784],[-87.306139,36.626046],[-87.306615,36.626272],[-87.307474,36.626631],[-87.308961,36.627232],[-87.311507,36.628248],[-87.313463,36.629043],[-87.316378,36.63021],[-87.317496,36.630656],[-87.319528,36.631472],[-87.320636,36.631915],[-87.322124,36.632514],[-87.322912,36.63283],[-87.326746,36.634365],[-87.331147,36.636131],[-87.331833,36.636431],[-87.332633,36.636802],[-87.333274,36.637122],[-87.333949,36.637499],[-87.334537,36.637839],[-87.335119,36.638202],[-87.335998,36.63879],[-87.336655,36.639273],[-87.337296,36.63977],[-87.337772,36.640164],[-87.338606,36.640932],[-87.339251,36.641543],[-87.339702,36.641967],[-87.341759,36.643892],[-87.342212,36.644304],[-87.342547,36.644588],[-87.342855,36.644831],[-87.343112,36.64503],[-87.343337,36.645191],[-87.343836,36.645526],[-87.344227,36.645769],[-87.344588,36.645979],[-87.345011,36.646202],[-87.345502,36.646446],[-87.345956,36.646651],[-87.346628,36.646928],[-87.347315,36.647174],[-87.351457,36.648682],[-87.355412,36.650126],[-87.358047,36.651086],[-87.359198,36.65151],[-87.360299,36.651912],[-87.361184,36.652231],[-87.361537,36.652368],[-87.362136,36.652603],[-87.362517,36.652782],[-87.363075,36.653039],[-87.363567,36.653285],[-87.364006,36.653521],[-87.364369,36.653726],[-87.364754,36.653955],[-87.365059,36.654146],[-87.365546,36.654462],[-87.366332,36.655023],[-87.366888,36.655424],[-87.368545,36.656625],[-87.369431,36.657256],[-87.37017,36.657795],[-87.372173,36.659246],[-87.374249,36.660745],[-87.376123,36.662102],[-87.378443,36.663777],[-87.38052,36.665287],[-87.382593,36.666778],[-87.383424,36.667382],[-87.38421,36.667967],[-87.384845,36.668502],[-87.386221,36.669717],[-87.388164,36.67144],[-87.390006,36.673066],[-87.39192,36.674767],[-87.393591,36.676232],[-87.394044,36.6766],[-87.394506,36.676946],[-87.395113,36.677375],[-87.395722,36.677773],[-87.39614,36.678035],[-87.39648,36.678228],[-87.397258,36.678645],[-87.397843,36.678931],[-87.399284,36.679603],[-87.400008,36.679942],[-87.401538,36.680648],[-87.403989,36.681789],[-87.406342,36.68288],[-87.410115,36.684642],[-87.411405,36.68524],[-87.413715,36.68631],[-87.416128,36.687429],[-87.418524,36.68854],[-87.41967,36.68908],[-87.42077,36.689579],[-87.421497,36.689908],[-87.422268,36.690243],[-87.42327,36.69066],[-87.424245,36.691062],[-87.427228,36.692217],[-87.429737,36.693181],[-87.432274,36.694158],[-87.435004,36.695211],[-87.437413,36.696135],[-87.439887,36.697091],[-87.442498,36.698097],[-87.44496,36.699037],[-87.447401,36.699977],[-87.448056,36.700237],[-87.448446,36.700383],[-87.452444,36.701973],[-87.454684,36.702788],[-87.455566,36.703132],[-87.459308,36.704564],[-87.461765,36.705511],[-87.462984,36.705983],[-87.464587,36.706593],[-87.46735,36.707659],[-87.467784,36.707826],[-87.472397,36.709599],[-87.474092,36.710255],[-87.474623,36.71046],[-87.475171,36.710691],[-87.475647,36.710905],[-87.476052,36.711107],[-87.476426,36.71129],[-87.476875,36.711527],[-87.477364,36.711803],[-87.477702,36.712009],[-87.478132,36.71229],[-87.478723,36.712688],[-87.479024,36.712912],[-87.479279,36.713107],[-87.479787,36.713506],[-87.480163,36.713824],[-87.480628,36.714251],[-87.481162,36.714765],[-87.481552,36.715198],[-87.482125,36.715836],[-87.482441,36.716242],[-87.482739,36.716647],[-87.483022,36.717062],[-87.483264,36.717434],[-87.483502,36.717829],[-87.483775,36.718343],[-87.485397,36.72137],[-87.486761,36.723958],[-87.487933,36.726167],[-87.488893,36.727989],[-87.489171,36.728506],[-87.489431,36.728992],[-87.48958,36.729241],[-87.489894,36.729733],[-87.490228,36.730215],[-87.490583,36.730685],[-87.490956,36.731141],[-87.491148,36.731364],[-87.491345,36.731584],[-87.491752,36.732014],[-87.492176,36.732436],[-87.492395,36.732643],[-87.49262,36.732846],[-87.492852,36.733045],[-87.493079,36.733239],[-87.493313,36.733431],[-87.49355,36.733619],[-87.49379,36.733803],[-87.494033,36.733983],[-87.49428,36.734159],[-87.494782,36.734499],[-87.495289,36.734824],[-87.498635,36.736801],[-87.502133,36.738859],[-87.506676,36.741522],[-87.508549,36.742634],[-87.517894,36.74813],[-87.52032,36.749559],[-87.528768,36.754512],[-87.529935,36.755201],[-87.54347,36.763165],[-87.544151,36.763565],[-87.558129,36.771787],[-87.574281,36.78127],[-87.578233,36.783586],[-87.583164,36.786482],[-87.583941,36.786931],[-87.584471,36.787221],[-87.585013,36.787502],[-87.585554,36.787765],[-87.585816,36.787884],[-87.586416,36.788157],[-87.586995,36.788403],[-87.587578,36.788636],[-87.59794,36.792665],[-87.598397,36.792846],[-87.598986,36.793087],[-87.599854,36.793472],[-87.600428,36.793747],[-87.600998,36.794035],[-87.601561,36.794339],[-87.602119,36.794656],[-87.602665,36.794983],[-87.603198,36.795321],[-87.603462,36.795494],[-87.603982,36.795852],[-87.60449,36.796221],[-87.604985,36.7966],[-87.605466,36.796984],[-87.614169,36.804276],[-87.61464,36.804665],[-87.615903,36.805722],[-87.617412,36.806985],[-87.620481,36.809549],[-87.62442,36.812849],[-87.624637,36.813022],[-87.624858,36.81319],[-87.625085,36.813348],[-87.625317,36.813498],[-87.625554,36.813639],[-87.625795,36.813774],[-87.626008,36.813883],[-87.626296,36.814021],[-87.626553,36.814133],[-87.626815,36.814236],[-87.62708,36.814331],[-87.627348,36.814418],[-87.627617,36.814496],[-87.627885,36.814568],[-87.628812,36.814793],[-87.630644,36.815233],[-87.633646,36.815935],[-87.634271,36.816081],[-87.638969,36.81719],[-87.639555,36.817338],[-87.640194,36.817522],[-87.640591,36.817648],[-87.640998,36.817798],[-87.641204,36.817879],[-87.641409,36.817967],[-87.641613,36.81806],[-87.642026,36.818264],[-87.642236,36.818374],[-87.642664,36.818614],[-87.64653,36.820885],[-87.649584,36.822673],[-87.65748,36.827305],[-87.658762,36.828062],[-87.659008,36.828215],[-87.659252,36.828374],[-87.659493,36.828538],[-87.659542,36.828573],[-87.659969,36.828884],[-87.66005,36.828946],[-87.660204,36.829065],[-87.660669,36.829433],[-87.660888,36.829649],[-87.661107,36.829849],[-87.661312,36.830049],[-87.661527,36.83026],[-87.662335,36.831103],[-87.662972,36.831786],[-87.663953,36.832804],[-87.664583,36.833457],[-87.665035,36.833914],[-87.666882,36.835876],[-87.66728,36.83629],[-87.669066,36.838175],[-87.669872,36.83901],[-87.670282,36.839425],[-87.6707,36.839826],[-87.671342,36.840409],[-87.671773,36.840763],[-87.672163,36.841099],[-87.672773,36.841577],[-87.673207,36.841911],[-87.673456,36.842108],[-87.673859,36.842399],[-87.674279,36.842693],[-87.674718,36.842992],[-87.675414,36.843442],[-87.675655,36.843593],[-87.676153,36.843893],[-87.676669,36.844192],[-87.677201,36.844489],[-87.678007,36.844916],[-87.678845,36.845331],[-87.67971,36.845729],[-87.680292,36.845984],[-87.680586,36.846106],[-87.681178,36.846344],[-87.681778,36.846575],[-87.683899,36.847369],[-87.695248,36.851583],[-87.696399,36.852018],[-87.696682,36.85213],[-87.697242,36.852369],[-87.697796,36.852625],[-87.698021,36.852736],[-87.698606,36.853039],[-87.699132,36.853329],[-87.699642,36.853631],[-87.700137,36.853949],[-87.700624,36.854271],[-87.701081,36.854601],[-87.701525,36.854944],[-87.701954,36.855298],[-87.702165,36.855478],[-87.702574,36.855847],[-87.702756,36.85602],[-87.70297,36.856225],[-87.703347,36.856615],[-87.703709,36.857012],[-87.704054,36.857417],[-87.704381,36.857829],[-87.710946,36.866296],[-87.71205,36.867723],[-87.712254,36.867973],[-87.712557,36.868329],[-87.712753,36.868549],[-87.712922,36.86873],[-87.713309,36.869125],[-87.713506,36.869308],[-87.71391,36.869664],[-87.714331,36.870005],[-87.71455,36.870171],[-87.714776,36.870335],[-87.715248,36.870657],[-87.715493,36.870816],[-87.715744,36.870973],[-87.716,36.871124],[-87.716263,36.871271],[-87.71637,36.871329],[-87.717426,36.871898],[-87.717857,36.872134],[-87.718668,36.872566],[-87.722058,36.874394],[-87.725361,36.876163],[-87.726922,36.877012],[-87.727904,36.877575],[-87.728627,36.878007],[-87.729596,36.878602],[-87.73083,36.879372],[-87.73114,36.879622],[-87.732152,36.880371],[-87.732446,36.880602],[-87.732715,36.880835],[-87.732963,36.881074],[-87.733077,36.881197],[-87.734251,36.882549],[-87.735972,36.881342],[-87.736189,36.881187],[-87.737429,36.880305],[-87.737606,36.880175],[-87.738014,36.879854],[-87.739793,36.878393],[-87.74015,36.878108],[-87.740365,36.877945],[-87.740535,36.877828],[-87.74071,36.877716],[-87.740945,36.877579],[-87.741226,36.877432],[-87.741456,36.877331],[-87.741746,36.877222],[-87.741902,36.877209],[-87.742049,36.87719],[-87.74217,36.87717],[-87.742358,36.877125],[-87.742711,36.877058],[-87.742985,36.877019],[-87.743441,36.876975],[-87.744041,36.87694],[-87.74651,36.87681],[-87.748227,36.876719],[-87.749853,36.876622],[-87.750127,36.876599],[-87.750397,36.876568],[-87.750662,36.876528],[-87.750845,36.876495],[-87.750921,36.876482],[-87.751174,36.876429],[-87.751413,36.876372],[-87.751659,36.876303],[-87.752122,36.876153],[-87.752582,36.875992],[-87.754012,36.875455],[-87.755189,36.875013],[-87.758304,36.873849],[-87.760949,36.872861],[-87.761391,36.872696],[-87.762185,36.872398],[-87.762691,36.872209],[-87.76338,36.871962],[-87.76361,36.871884],[-87.764536,36.871583],[-87.765183,36.871387],[-87.765346,36.871339],[-87.765713,36.871232],[-87.766199,36.871096],[-87.769649,36.870176],[-87.772919,36.869303],[-87.774016,36.869015],[-87.774439,36.868899],[-87.776555,36.868329],[-87.777285,36.868141],[-87.777773,36.868031],[-87.778018,36.867985],[-87.778341,36.867934],[-87.778515,36.867909],[-87.778757,36.867874],[-87.779006,36.867846],[-87.779261,36.867822],[-87.77952,36.867806],[-87.780054,36.867787],[-87.781169,36.867784],[-87.783472,36.867787],[-87.783615,36.867787],[-87.78805,36.867791],[-87.791378,36.867795],[-87.791782,36.867795],[-87.792342,36.867785],[-87.792743,36.867764],[-87.792974,36.867742],[-87.793203,36.867711],[-87.793426,36.867672],[-87.793646,36.867626],[-87.793863,36.867571],[-87.794075,36.867508],[-87.794284,36.867437],[-87.794487,36.867359],[-87.794686,36.867276],[-87.79488,36.867186],[-87.795068,36.867089],[-87.795249,36.866987],[-87.795424,36.866879],[-87.795491,36.866832],[-87.795589,36.866754],[-87.795743,36.866642],[-87.795892,36.866517],[-87.796037,36.866388],[-87.79615,36.866279],[-87.796311,36.866113],[-87.79644,36.865966],[-87.79656,36.865812],[-87.796671,36.865651],[-87.796774,36.865487],[-87.796872,36.865318],[-87.797052,36.864971],[-87.79758,36.863863],[-87.797645,36.863727],[-87.797726,36.863567],[-87.79787,36.863299],[-87.797981,36.863117],[-87.798156,36.862863],[-87.798302,36.862676],[-87.798478,36.862481],[-87.798687,36.862271],[-87.798798,36.862103],[-87.798942,36.861991],[-87.799136,36.861853],[-87.799399,36.861687],[-87.799562,36.861598],[-87.799927,36.861421],[-87.800206,36.861303],[-87.80048,36.861204],[-87.800832,36.861102],[-87.801107,36.861035],[-87.801314,36.860991],[-87.801666,36.86093],[-87.802042,36.86088],[-87.807987,36.860225],[-87.808514,36.860163],[-87.808744,36.860131],[-87.809148,36.860066],[-87.809394,36.860016],[-87.809639,36.859959],[-87.809882,36.859895],[-87.810121,36.859824],[-87.810472,36.859703],[-87.810815,36.859563],[-87.810893,36.859528],[-87.811044,36.85946],[-87.811275,36.859347],[-87.811504,36.859223],[-87.81173,36.859087],[-87.811949,36.858945],[-87.812162,36.858794],[-87.812366,36.858636],[-87.81256,36.858472],[-87.812834,36.858215],[-87.812921,36.858128],[-87.813087,36.857948],[-87.813473,36.857491],[-87.817491,36.852555],[-87.817627,36.852388],[-87.817752,36.852234],[-87.822362,36.846571],[-87.82272,36.846135],[-87.823092,36.845699],[-87.823322,36.845451],[-87.82348,36.845294],[-87.82364,36.845143],[-87.823976,36.844849],[-87.824154,36.844704],[-87.824339,36.844561],[-87.824721,36.844289],[-87.825013,36.8441],[-87.825203,36.843986],[-87.825508,36.843814],[-87.825814,36.843658],[-87.826134,36.843505],[-87.826357,36.843404],[-87.826588,36.843306],[-87.826828,36.843213],[-87.827073,36.843124],[-87.827506,36.842986],[-87.827574,36.842964],[-87.827829,36.842891],[-87.828089,36.842823],[-87.828617,36.842704],[-87.829289,36.842571],[-87.832135,36.842026],[-87.833362,36.841795],[-87.834507,36.841561],[-87.834759,36.841505],[-87.835136,36.841413],[-87.835514,36.841314],[-87.835894,36.841208],[-87.836274,36.841095],[-87.836654,36.840974],[-87.837158,36.840804],[-87.840129,36.839772],[-87.840596,36.839614],[-87.840949,36.839503],[-87.841425,36.839367],[-87.841784,36.839275],[-87.842143,36.839191],[-87.842384,36.839141],[-87.842962,36.839029],[-87.844931,36.838664],[-87.845825,36.838505],[-87.846173,36.838455],[-87.846527,36.838412],[-87.846887,36.83838],[-87.847375,36.838355],[-87.847866,36.838347],[-87.848231,36.838356],[-87.848477,36.838368],[-87.848724,36.838386],[-87.849219,36.838429],[-87.851253,36.838634],[-87.852337,36.838743],[-87.852955,36.838819],[-87.853896,36.838962],[-87.855539,36.839234],[-87.856012,36.839307],[-87.856366,36.839357],[-87.856602,36.839384],[-87.856838,36.839406],[-87.857073,36.839422],[-87.857304,36.839432],[-87.857644,36.839431],[-87.857974,36.839416],[-87.858293,36.839386],[-87.858712,36.839329],[-87.85913,36.839254],[-87.859336,36.839208],[-87.859541,36.839156],[-87.860218,36.838953],[-87.861085,36.838675],[-87.861624,36.838492],[-87.861964,36.838361],[-87.862284,36.838221],[-87.862586,36.838074],[-87.862735,36.837997],[-87.863032,36.837825],[-87.863329,36.837631],[-87.863637,36.837406],[-87.863873,36.837208],[-87.864103,36.836995],[-87.864247,36.836848],[-87.864351,36.836734],[-87.864382,36.8367],[-87.864572,36.836472],[-87.864687,36.836321],[-87.864752,36.836236],[-87.865377,36.835389],[-87.866317,36.8341],[-87.8672,36.832899],[-87.867726,36.83216],[-87.868326,36.831307],[-87.868793,36.830618],[-87.871586,36.826467],[-87.8719,36.825999],[-87.872019,36.825815],[-87.872129,36.825628],[-87.872277,36.825342],[-87.872404,36.82505],[-87.87272,36.824167],[-87.874122,36.820139],[-87.874219,36.819877],[-87.874296,36.819706],[-87.874355,36.819589],[-87.874429,36.819459],[-87.874589,36.819225],[-87.874707,36.819078],[-87.874835,36.818936],[-87.874969,36.818803],[-87.875178,36.818622],[-87.875323,36.818509],[-87.875569,36.818328],[-87.876599,36.817587],[-87.877089,36.817235],[-87.877547,36.816901],[-87.878526,36.816201],[-87.879843,36.815248],[-87.880749,36.814597],[-87.881188,36.81429],[-87.881628,36.813999],[-87.882072,36.813727],[-87.882147,36.813683],[-87.882486,36.813483],[-87.88299,36.813204],[-87.885271,36.812017],[-87.885759,36.811775],[-87.886208,36.81158],[-87.886435,36.811491],[-87.886665,36.811408],[-87.88701,36.811294],[-87.887359,36.811194],[-87.887829,36.811084],[-87.888306,36.810995],[-87.888547,36.810957],[-87.888907,36.810912],[-87.889288,36.81088],[-87.889537,36.810867],[-87.889789,36.810859],[-87.890167,36.810859],[-87.890545,36.810872],[-87.890795,36.810888],[-87.891044,36.810912],[-87.892383,36.811061],[-87.892973,36.811116],[-87.893433,36.811147],[-87.893884,36.811153],[-87.896331,36.811115],[-87.89806,36.811091],[-87.898231,36.811082],[-87.898911,36.811021],[-87.899024,36.810999],[-87.899479,36.810866],[-87.899817,36.81074],[-87.900074,36.810628],[-87.900217,36.810556],[-87.900596,36.810382],[-87.900826,36.810271],[-87.90166,36.809835],[-87.902952,36.809192],[-87.903589,36.80886],[-87.904007,36.808623],[-87.904212,36.808498],[-87.904998,36.807998],[-87.905593,36.807632],[-87.906001,36.807409],[-87.906636,36.807093],[-87.907081,36.806893],[-87.907309,36.806798],[-87.907779,36.806616],[-87.908172,36.806482],[-87.908504,36.806377],[-87.909011,36.80623],[-87.909512,36.806104],[-87.910027,36.805995],[-87.910554,36.805899],[-87.911099,36.805814],[-87.912737,36.80559],[-87.913133,36.805534],[-87.915952,36.805136],[-87.916522,36.805052],[-87.917623,36.804891],[-87.918479,36.804784],[-87.918904,36.804743],[-87.919305,36.804735],[-87.919427,36.804732],[-87.919637,36.804736],[-87.91995,36.804754],[-87.920159,36.80477],[-87.920365,36.804794],[-87.920669,36.80484],[-87.920971,36.804897],[-87.921866,36.805101],[-87.923424,36.805457],[-87.923867,36.805545],[-87.924116,36.805588],[-87.924352,36.805616],[-87.924544,36.805627],[-87.92474,36.805639],[-87.92483,36.805642],[-87.925032,36.805648],[-87.925408,36.805646],[-87.92582,36.805632],[-87.926233,36.805603],[-87.926437,36.805581],[-87.926843,36.805524],[-87.927069,36.805485],[-87.928377,36.805257],[-87.928473,36.80524],[-87.930135,36.804919],[-87.930309,36.804885],[-87.931677,36.804599],[-87.932885,36.804354],[-87.93338,36.804236],[-87.933868,36.804101],[-87.934107,36.804024],[-87.934344,36.803942],[-87.934575,36.803856],[-87.9348,36.803766],[-87.935038,36.803662],[-87.935245,36.803572],[-87.935467,36.803466],[-87.935689,36.803353],[-87.936133,36.803109],[-87.936611,36.802836],[-87.937489,36.802334],[-87.938747,36.801613],[-87.939265,36.801316],[-87.940582,36.800562],[-87.941167,36.800246],[-87.941528,36.800067],[-87.94202,36.799841],[-87.942667,36.799572],[-87.942938,36.799469],[-87.943214,36.79937],[-87.943689,36.799211],[-87.944074,36.799094],[-87.944509,36.798973],[-87.944944,36.798865],[-87.945232,36.7988],[-87.94572,36.798704],[-87.945806,36.798687],[-87.946093,36.798637],[-87.946663,36.798555],[-87.947218,36.798492],[-87.947488,36.798467],[-87.947893,36.798438],[-87.948296,36.798416],[-87.948826,36.798403],[-87.949088,36.798401],[-87.949682,36.798417],[-87.950116,36.798441],[-87.950747,36.798488],[-87.957131,36.799036],[-87.958151,36.79911],[-87.958432,36.799123],[-87.95869,36.799128],[-87.95908,36.799125],[-87.959389,36.799114],[-87.959733,36.799092],[-87.959991,36.799069],[-87.960226,36.799041],[-87.960495,36.79901],[-87.96074,36.798972],[-87.961219,36.798887],[-87.961956,36.798736],[-87.962912,36.798539],[-87.963374,36.798453],[-87.963877,36.798377],[-87.964259,36.798337],[-87.964894,36.798292],[-87.966959,36.798187],[-87.967471,36.798161],[-87.968327,36.798118],[-87.968807,36.798103],[-87.969163,36.798103],[-87.969649,36.79812],[-87.970023,36.798146],[-87.970531,36.798201],[-87.970911,36.798256],[-87.971287,36.798323],[-87.971779,36.798431],[-87.972522,36.798619],[-87.982375,36.80132],[-87.983902,36.801723],[-87.986071,36.802461],[-87.986703,36.80265],[-87.987191,36.802761],[-87.987719,36.802858],[-87.988174,36.802911],[-87.98861,36.802945],[-87.989,36.802957],[-87.989298,36.802954],[-87.989691,36.802934],[-87.989898,36.80292],[-87.99022,36.802885],[-87.990596,36.802828],[-87.990857,36.802777],[-87.991308,36.802667],[-87.991633,36.802572],[-87.992048,36.802436],[-87.992338,36.80232],[-87.992661,36.802181],[-87.992947,36.802107],[-87.99316,36.801991],[-87.993372,36.801866],[-87.993579,36.801736],[-87.993783,36.801599],[-87.994084,36.801385],[-87.995705,36.800197],[-87.99837,36.798183],[-88.000689,36.796489],[-88.002918,36.794862],[-88.004626,36.793616],[-88.005165,36.793225],[-88.005457,36.793013],[-88.005963,36.792652],[-88.00658,36.792237],[-88.007086,36.791918],[-88.007594,36.791614],[-88.008034,36.791355],[-88.008634,36.791023],[-88.009207,36.790724],[-88.009705,36.790479],[-88.010614,36.790061],[-88.012744,36.789166],[-88.014219,36.788544],[-88.015914,36.787858],[-88.017754,36.787155],[-88.018756,36.786781],[-88.019204,36.786624],[-88.019849,36.786425],[-88.020617,36.786225],[-88.021614,36.78599],[-88.022473,36.785794],[-88.022778,36.785727],[-88.022936,36.785709],[-88.023185,36.785686],[-88.02344,36.785669],[-88.023682,36.785669],[-88.024053,36.7857],[-88.024396,36.785736],[-88.024671,36.785798],[-88.024909,36.785865],[-88.025132,36.785933],[-88.025507,36.786071],[-88.026228,36.786371],[-88.02659,36.786519],[-88.026818,36.786605],[-88.02699,36.786661],[-88.027182,36.786715],[-88.027365,36.78676],[-88.027575,36.786799],[-88.027808,36.786831],[-88.028003,36.786849],[-88.028233,36.786863],[-88.028484,36.786864],[-88.028761,36.786846],[-88.028947,36.786824],[-88.029192,36.786787],[-88.029429,36.786737],[-88.029607,36.78669],[-88.029818,36.786627],[-88.030206,36.78649],[-88.03067,36.786326],[-88.031297,36.786086],[-88.032119,36.785782],[-88.032656,36.785596],[-88.033219,36.78542],[-88.033728,36.785274],[-88.034021,36.785195],[-88.034331,36.785116],[-88.034591,36.785055],[-88.03492,36.78499],[-88.035224,36.784939],[-88.035816,36.784858],[-88.036299,36.7848],[-88.037572,36.784657],[-88.037956,36.784604],[-88.038249,36.78456],[-88.038495,36.784513],[-88.038665,36.784475],[-88.038962,36.784392],[-88.039296,36.784292],[-88.039967,36.784089],[-88.040684,36.783883],[-88.041007,36.78379],[-88.041361,36.783709],[-88.041587,36.783663],[-88.041805,36.783632],[-88.042187,36.783593],[-88.04246,36.783567],[-88.042905,36.783517],[-88.043236,36.783475],[-88.043679,36.783401],[-88.043886,36.783359],[-88.044296,36.783272],[-88.044627,36.783196],[-88.04503,36.783095],[-88.045551,36.782952],[-88.046918,36.782569],[-88.04769,36.782348],[-88.048514,36.782115],[-88.049111,36.781946],[-88.049613,36.781813],[-88.050138,36.781688],[-88.050547,36.781599],[-88.051126,36.781477],[-88.051455,36.781413],[-88.051757,36.781358],[-88.052422,36.78126],[-88.053133,36.78116],[-88.053758,36.781082],[-88.05505,36.780908],[-88.056077,36.780788],[-88.057001,36.78067],[-88.057803,36.780569],[-88.059832,36.780319],[-88.060676,36.780238],[-88.061233,36.780166],[-88.062356,36.780024],[-88.066408,36.779514],[-88.067035,36.77944],[-88.06813,36.779299],[-88.068931,36.779198],[-88.069741,36.779104],[-88.070478,36.779025],[-88.070679,36.779003],[-88.071108,36.778968],[-88.071717,36.77892],[-88.07249,36.77888],[-88.073489,36.778808],[-88.075079,36.778752],[-88.077368,36.778688],[-88.078174,36.778665],[-88.079189,36.77864],[-88.079995,36.77863],[-88.080244,36.778636],[-88.08045,36.778646],[-88.080629,36.77866],[-88.08079,36.778682],[-88.081576,36.77884],[-88.082937,36.779149],[-88.083848,36.77933],[-88.085015,36.779525],[-88.085556,36.779597],[-88.08605,36.779639],[-88.0863,36.779654],[-88.087052,36.779672],[-88.087919,36.779636],[-88.090252,36.779505],[-88.090947,36.779462],[-88.092095,36.779369],[-88.094047,36.77916],[-88.097018,36.778829],[-88.097291,36.7788],[-88.097771,36.778728],[-88.098401,36.778599],[-88.098898,36.778464],[-88.099435,36.778285],[-88.099874,36.778112],[-88.100836,36.777677],[-88.102744,36.776789],[-88.103864,36.776277],[-88.104595,36.775969],[-88.104994,36.77581],[-88.105401,36.775657],[-88.105883,36.775485],[-88.106569,36.775257],[-88.107293,36.775019],[-88.107715,36.774885],[-88.108329,36.774699],[-88.108708,36.774585],[-88.10918,36.774461],[-88.109629,36.77435],[-88.110113,36.774244],[-88.11075,36.774097],[-88.111544,36.773909],[-88.11222,36.773765],[-88.112728,36.773656],[-88.113348,36.773539],[-88.114535,36.77333],[-88.126158,36.771488],[-88.127318,36.771299],[-88.128549,36.771101],[-88.129699,36.770929],[-88.130487,36.77083],[-88.131092,36.77077],[-88.131752,36.770723],[-88.132256,36.770693],[-88.132755,36.770669],[-88.133132,36.770653],[-88.133315,36.770653],[-88.133428,36.770664],[-88.133509,36.770675],[-88.13361,36.770695],[-88.133748,36.770723],[-88.133969,36.77077],[-88.134125,36.770802],[-88.134266,36.770829],[-88.134347,36.770845],[-88.13446,36.770857],[-88.13457,36.770865],[-88.134651,36.770863],[-88.134736,36.770855],[-88.134872,36.770838],[-88.134965,36.770825],[-88.135197,36.770784],[-88.135493,36.770742],[-88.135677,36.770711],[-88.136079,36.770621],[-88.13639,36.770551],[-88.136694,36.770464],[-88.136917,36.770396],[-88.137012,36.770494],[-88.137589,36.771109],[-88.138239,36.771803],[-88.138418,36.771995],[-88.138472,36.772052],[-88.138573,36.77216],[-88.138882,36.772485],[-88.139032,36.77265],[-88.139184,36.772809],[-88.139365,36.772988],[-88.139494,36.773118],[-88.139648,36.773277],[-88.139797,36.773438],[-88.140119,36.773745],[-88.140284,36.773892],[-88.140447,36.774035],[-88.140645,36.774201],[-88.140776,36.774307],[-88.140945,36.774438],[-88.141116,36.774566],[-88.141293,36.774691],[-88.141471,36.774813],[-88.141517,36.774843],[-88.141652,36.774932],[-88.141833,36.775047],[-88.142015,36.77516],[-88.142562,36.775496],[-88.142744,36.775606],[-88.143292,36.775945],[-88.143478,36.776059],[-88.143667,36.776172],[-88.143857,36.776283],[-88.144048,36.776391],[-88.144247,36.776494],[-88.144452,36.776591],[-88.144666,36.77668],[-88.144887,36.776762],[-88.145109,36.776832],[-88.145354,36.7769],[-88.145604,36.776959],[-88.145862,36.777007],[-88.146125,36.777044],[-88.146296,36.777062],[-88.14639,36.777072],[-88.146655,36.777093],[-88.146919,36.77711],[-88.147186,36.777125],[-88.147722,36.77715],[-88.148515,36.777191],[-88.148772,36.777202],[-88.149027,36.77721],[-88.149281,36.777214],[-88.149532,36.777212],[-88.149786,36.777203],[-88.150042,36.777188],[-88.150303,36.777166],[-88.150566,36.777138],[-88.150829,36.777107],[-88.151358,36.77704],[-88.151621,36.777005],[-88.152414,36.776894],[-88.152677,36.776859],[-88.152937,36.776827],[-88.153195,36.776797],[-88.15345,36.776769],[-88.153703,36.776744],[-88.15395,36.776723],[-88.154195,36.776705],[-88.154443,36.776692],[-88.154695,36.776685],[-88.154949,36.776684],[-88.155468,36.776685],[-88.155989,36.776688],[-88.156763,36.776684],[-88.157021,36.776684],[-88.157277,36.776686],[-88.157532,36.776689],[-88.157782,36.776697],[-88.158026,36.776713],[-88.158263,36.77674],[-88.158492,36.77678],[-88.158643,36.776819],[-88.158713,36.776836],[-88.158928,36.776908],[-88.15914,36.77699],[-88.159348,36.777083],[-88.159565,36.777196],[-88.15995,36.777427],[-88.161417,36.778374],[-88.162315,36.778957],[-88.163285,36.779548],[-88.164309,36.780126],[-88.164835,36.780404],[-88.165362,36.780662],[-88.166157,36.781031],[-88.166962,36.781382],[-88.167502,36.781605],[-88.168352,36.781928],[-88.168847,36.782107],[-88.169637,36.782379],[-88.170678,36.782702],[-88.171738,36.782996],[-88.172558,36.78319],[-88.173413,36.783377],[-88.173998,36.783491],[-88.177781,36.784136],[-88.178946,36.784346],[-88.179449,36.784447],[-88.1804,36.784656],[-88.180677,36.784728],[-88.181236,36.784887],[-88.181784,36.785071],[-88.18206,36.785171],[-88.18285,36.785511],[-88.183358,36.785754],[-88.183828,36.786],[-88.18431,36.78627],[-88.185234,36.786818],[-88.187477,36.788166],[-88.188797,36.788967],[-88.189986,36.789683],[-88.190652,36.790094],[-88.191087,36.790375],[-88.19151,36.790664],[-88.191913,36.790975],[-88.192296,36.791312],[-88.192673,36.791674],[-88.193108,36.792138],[-88.193394,36.792479],[-88.193558,36.792688],[-88.193858,36.793111],[-88.194252,36.793708],[-88.194801,36.794566],[-88.195569,36.795733],[-88.195593,36.795768],[-88.195723,36.795964],[-88.195851,36.796168],[-88.196371,36.796963],[-88.196505,36.797161],[-88.196631,36.797359],[-88.196764,36.797554],[-88.197024,36.79796],[-88.197161,36.798158],[-88.197293,36.798359],[-88.197421,36.79856],[-88.197553,36.798761],[-88.197687,36.79896],[-88.197819,36.799161],[-88.19788,36.799252],[-88.197952,36.799362],[-88.198279,36.799853],[-88.198357,36.799971],[-88.198491,36.800169],[-88.198625,36.80038],[-88.19876,36.800581],[-88.198898,36.800795],[-88.19897,36.800905],[-88.199169,36.801207],[-88.199305,36.801411],[-88.199573,36.801817],[-88.199704,36.802022],[-88.199974,36.802435],[-88.200108,36.802642],[-88.200243,36.802846],[-88.200511,36.803255],[-88.200639,36.803459],[-88.200762,36.803665],[-88.20088,36.803873],[-88.201041,36.804176],[-88.201208,36.804502],[-88.201312,36.804713],[-88.201409,36.804925],[-88.201497,36.805139],[-88.201578,36.805355],[-88.201653,36.805572],[-88.201722,36.80579],[-88.201783,36.806004],[-88.201839,36.806214],[-88.201887,36.806407],[-88.201938,36.806654],[-88.201978,36.806869],[-88.202013,36.807085],[-88.202043,36.807302],[-88.202068,36.807522],[-88.202086,36.807743],[-88.202098,36.807965],[-88.202103,36.808188],[-88.202101,36.808411],[-88.202094,36.808637],[-88.202082,36.808863],[-88.202061,36.809091],[-88.202037,36.809318],[-88.202008,36.809546],[-88.201971,36.809773],[-88.201954,36.809871],[-88.201929,36.81],[-88.20188,36.810227],[-88.201844,36.810372],[-88.201825,36.810453],[-88.201764,36.810679],[-88.201693,36.810903],[-88.201618,36.811128],[-88.201536,36.811358],[-88.201448,36.81158],[-88.201351,36.811801],[-88.201258,36.812036],[-88.201149,36.812246],[-88.201033,36.812458],[-88.200914,36.812673],[-88.200791,36.812884],[-88.200661,36.813094],[-88.200533,36.813304],[-88.200403,36.813515],[-88.200143,36.813935],[-88.199789,36.814513],[-88.19976,36.814563],[-88.199503,36.814983],[-88.199238,36.815414],[-88.198824,36.816079],[-88.19877,36.81617],[-88.198688,36.816301],[-88.198553,36.816524],[-88.198295,36.816969],[-88.198174,36.817189],[-88.197947,36.817633],[-88.197846,36.817854],[-88.197736,36.818078],[-88.197644,36.818307],[-88.197586,36.81854],[-88.197517,36.81877],[-88.197451,36.819002],[-88.19739,36.819233],[-88.197335,36.819466],[-88.197287,36.8197],[-88.197251,36.819941],[-88.19722,36.820182],[-88.197194,36.820422],[-88.197184,36.820568],[-88.197176,36.82068],[-88.197165,36.820905],[-88.197157,36.821145],[-88.197154,36.821303],[-88.197153,36.821383],[-88.197157,36.82162],[-88.197168,36.821853],[-88.19719,36.822083],[-88.197216,36.822311],[-88.197248,36.82254],[-88.197288,36.822768],[-88.197335,36.822994],[-88.197387,36.823218],[-88.197443,36.823441],[-88.197488,36.823602],[-88.197558,36.823839],[-88.197647,36.82411],[-88.197726,36.824332],[-88.197807,36.824549],[-88.197844,36.824645],[-88.197894,36.824775],[-88.197979,36.825],[-88.198055,36.825197],[-88.198237,36.825683],[-88.198267,36.825758],[-88.198325,36.825902],[-88.198415,36.826131],[-88.198503,36.826369],[-88.198592,36.826601],[-88.198681,36.826827],[-88.19887,36.827269],[-88.198952,36.827496],[-88.199045,36.827709],[-88.199146,36.827936],[-88.199249,36.828157],[-88.199364,36.828364],[-88.19947,36.828595],[-88.199608,36.828803],[-88.199748,36.829022],[-88.19989,36.829237],[-88.20004,36.82945],[-88.200195,36.829659],[-88.200355,36.829864],[-88.200486,36.830026],[-88.200684,36.830267],[-88.200831,36.830441],[-88.201021,36.83066],[-88.201113,36.830766],[-88.20153,36.831246],[-88.201869,36.83163],[-88.202033,36.831822],[-88.202191,36.832018],[-88.202347,36.832216],[-88.202509,36.832436],[-88.20264,36.832618],[-88.202776,36.832823],[-88.202908,36.833031],[-88.203031,36.833241],[-88.203145,36.833456],[-88.203255,36.833667],[-88.203349,36.833895],[-88.203438,36.834115],[-88.203517,36.834334],[-88.20355,36.834438],[-88.203588,36.834552],[-88.203719,36.834994],[-88.203847,36.835443],[-88.203915,36.835671],[-88.203982,36.8359],[-88.204051,36.836127],[-88.204192,36.836575],[-88.20427,36.836795],[-88.204294,36.836853],[-88.204359,36.837011],[-88.204462,36.837223],[-88.204584,36.837428],[-88.204721,36.837629],[-88.204873,36.837824],[-88.205036,36.838014],[-88.205205,36.838201],[-88.205378,36.838387],[-88.205553,36.838571],[-88.205732,36.838753],[-88.205904,36.838935],[-88.206247,36.839293],[-88.206419,36.83947],[-88.206593,36.839647],[-88.206768,36.839827],[-88.206936,36.840011],[-88.207124,36.840194],[-88.207303,36.840381],[-88.207667,36.840755],[-88.208205,36.841315],[-88.208382,36.841503],[-88.208557,36.841691],[-88.208733,36.841876],[-88.20891,36.84206],[-88.209257,36.842426],[-88.20943,36.842607],[-88.209606,36.842789],[-88.20978,36.842971],[-88.209958,36.843154],[-88.210137,36.843334],[-88.210315,36.843524],[-88.210492,36.843707],[-88.210666,36.843889],[-88.211012,36.844245],[-88.211183,36.844418],[-88.211363,36.844581],[-88.211433,36.844638],[-88.211555,36.844733],[-88.211757,36.844873],[-88.21197,36.845],[-88.212196,36.845114],[-88.212437,36.845212],[-88.212688,36.845294],[-88.212784,36.845317],[-88.212948,36.845357],[-88.213217,36.845401],[-88.213489,36.84543],[-88.213764,36.845449],[-88.214039,36.845462],[-88.214315,36.845472],[-88.214692,36.845484],[-88.215725,36.845519],[-88.21601,36.845528],[-88.216573,36.845549],[-88.217044,36.845561],[-88.217685,36.84557],[-88.218201,36.84557],[-88.21852,36.845571],[-88.218794,36.84557],[-88.219601,36.84557],[-88.220345,36.845568],[-88.221096,36.845564],[-88.221586,36.845563],[-88.221988,36.845561],[-88.224109,36.845551],[-88.225548,36.845549],[-88.225928,36.84555],[-88.226457,36.845549],[-88.227517,36.845542],[-88.227773,36.845542],[-88.228521,36.84554],[-88.228766,36.845538],[-88.229482,36.845538],[-88.23077,36.845534],[-88.230958,36.845535],[-88.231137,36.845538],[-88.231657,36.845534],[-88.232037,36.845532],[-88.232479,36.84553],[-88.232652,36.845531],[-88.233207,36.84553],[-88.233573,36.845528],[-88.234463,36.845528],[-88.235386,36.845523],[-88.23562,36.845523],[-88.236136,36.845519],[-88.23695,36.845527],[-88.237221,36.845532],[-88.237495,36.845547],[-88.237766,36.845575],[-88.237904,36.8456],[-88.238029,36.845621],[-88.238284,36.845682],[-88.238534,36.845759],[-88.238772,36.84585],[-88.239001,36.845957],[-88.239218,36.846076],[-88.239352,36.846162],[-88.239422,36.846208],[-88.23962,36.846353],[-88.240005,36.846654],[-88.240869,36.847346],[-88.244726,36.850508],[-88.244942,36.850656],[-88.245168,36.850795],[-88.245391,36.850921],[-88.245641,36.851045],[-88.245889,36.851152],[-88.246143,36.851247],[-88.246399,36.851331],[-88.250924,36.852685],[-88.251202,36.852756],[-88.25132,36.85278],[-88.251484,36.852814],[-88.251781,36.852845],[-88.252065,36.852865],[-88.252355,36.852873],[-88.25244,36.852872],[-88.252642,36.852868],[-88.252927,36.852854],[-88.255768,36.852656],[-88.25606,36.85264],[-88.256351,36.852627],[-88.256642,36.852619],[-88.256928,36.852616],[-88.257212,36.852617],[-88.257494,36.852624],[-88.257725,36.852635],[-88.258047,36.852662],[-88.258315,36.852698],[-88.258582,36.852746],[-88.258839,36.852802],[-88.259091,36.852867],[-88.259335,36.852941],[-88.259403,36.852965],[-88.259573,36.853026],[-88.259806,36.853122],[-88.260032,36.853229],[-88.260255,36.853348],[-88.260472,36.853478],[-88.260626,36.85358],[-88.26089,36.853769],[-88.261085,36.853932],[-88.261268,36.854106],[-88.261442,36.854292],[-88.261603,36.854487],[-88.269592,36.866142],[-88.273261,36.871495],[-88.275566,36.874857],[-88.275655,36.874988],[-88.275921,36.875384],[-88.276603,36.876379],[-88.27674,36.876581],[-88.276879,36.876782],[-88.277017,36.876985],[-88.277159,36.877188],[-88.27744,36.877598],[-88.277596,36.877823],[-88.277866,36.878218],[-88.278147,36.878633],[-88.278179,36.878679],[-88.278429,36.87905],[-88.278474,36.879114],[-88.278572,36.879259],[-88.278716,36.879468],[-88.279007,36.879885],[-88.279155,36.88009],[-88.279305,36.88029],[-88.279458,36.880488],[-88.279612,36.880683],[-88.279769,36.880876],[-88.279907,36.881039],[-88.280093,36.881256],[-88.280261,36.881442],[-88.280433,36.881626],[-88.280609,36.881807],[-88.280787,36.881986],[-88.280965,36.882161],[-88.281148,36.882334],[-88.281332,36.882504],[-88.28152,36.88267],[-88.281709,36.882834],[-88.2819,36.882995],[-88.282092,36.883152],[-88.282287,36.883308],[-88.282486,36.883461],[-88.282688,36.883615],[-88.282895,36.88377],[-88.283113,36.883926],[-88.283157,36.883956],[-88.283336,36.88408],[-88.283568,36.884237],[-88.283803,36.884392],[-88.284044,36.884544],[-88.284286,36.884691],[-88.28436,36.884734],[-88.28453,36.884834],[-88.284775,36.884974],[-88.28502,36.88511],[-88.285267,36.885242],[-88.285515,36.88537],[-88.285766,36.885494],[-88.286016,36.885615],[-88.286266,36.885731],[-88.286772,36.885959],[-88.287051,36.886083],[-88.287536,36.8863],[-88.287727,36.886384],[-88.288804,36.886865],[-88.289303,36.887086],[-88.290827,36.887757],[-88.292356,36.888441],[-88.292615,36.888556],[-88.292871,36.888671],[-88.293124,36.888782],[-88.293371,36.888892],[-88.293845,36.8891],[-88.294069,36.8892],[-88.294486,36.889384],[-88.294681,36.889469],[-88.294859,36.88955],[-88.295027,36.889624],[-88.295098,36.889657],[-88.295048,36.889724],[-88.294983,36.889822],[-88.294873,36.889983],[-88.294803,36.890084],[-88.294543,36.890465],[-88.294336,36.890761],[-88.294226,36.89092],[-88.293768,36.891593],[-88.293408,36.892118],[-88.293285,36.892296],[-88.293041,36.892657],[-88.292917,36.892839],[-88.29279,36.893021],[-88.292666,36.893203],[-88.292544,36.893385],[-88.292421,36.893565],[-88.292176,36.893916],[-88.292056,36.894091],[-88.291812,36.894443],[-88.291694,36.894618],[-88.291575,36.894791],[-88.291348,36.895132],[-88.291231,36.895305],[-88.291109,36.89548],[-88.290987,36.895658],[-88.29086,36.895838],[-88.290728,36.896012],[-88.2906,36.896196],[-88.290466,36.89637],[-88.290328,36.896541],[-88.290185,36.896707],[-88.290035,36.896865],[-88.289709,36.897162],[-88.289534,36.897301],[-88.289352,36.897429],[-88.289162,36.897548],[-88.288965,36.897658],[-88.288762,36.89776],[-88.288556,36.897859],[-88.288345,36.897954],[-88.288131,36.898048],[-88.2877,36.898233],[-88.287483,36.898319],[-88.28737,36.898367],[-88.287268,36.89841],[-88.287052,36.898507],[-88.286375,36.898788],[-88.286142,36.898882],[-88.285912,36.898977],[-88.285227,36.899265],[-88.284766,36.899461],[-88.284533,36.899561],[-88.284063,36.899761],[-88.28383,36.899861],[-88.283362,36.90006],[-88.28313,36.90016],[-88.282897,36.900259],[-88.282433,36.900454],[-88.281488,36.900856],[-88.281021,36.901051],[-88.280794,36.901147],[-88.280571,36.901245],[-88.280359,36.901347],[-88.280161,36.901455],[-88.279976,36.901569],[-88.279785,36.901697],[-88.279622,36.901815],[-88.279453,36.901949],[-88.279289,36.902089],[-88.279134,36.902235],[-88.278987,36.902386],[-88.278849,36.902539],[-88.278773,36.902628],[-88.278718,36.902694],[-88.278592,36.902852],[-88.278212,36.903341],[-88.277977,36.903637],[-88.277945,36.903675],[-88.277677,36.904011],[-88.277544,36.90418],[-88.277406,36.904348],[-88.277342,36.904424],[-88.27713,36.904684],[-88.276867,36.905022],[-88.276603,36.905355],[-88.276468,36.905516],[-88.27633,36.905671],[-88.276188,36.905818],[-88.27604,36.905955],[-88.275885,36.906084],[-88.275727,36.906199],[-88.275573,36.9063],[-88.275424,36.906394],[-88.275283,36.90649],[-88.275138,36.906577],[-88.274987,36.906656],[-88.274827,36.906734],[-88.274657,36.906812],[-88.274437,36.906908],[-88.274285,36.906973],[-88.274083,36.907052],[-88.273871,36.907118],[-88.273666,36.907219],[-88.273455,36.907317],[-88.273238,36.907398],[-88.27302,36.907485],[-88.272807,36.907574],[-88.272594,36.907659],[-88.27239,36.907746],[-88.272191,36.907837],[-88.272008,36.907935],[-88.271826,36.908046],[-88.271661,36.908167],[-88.271511,36.908304],[-88.27138,36.908454],[-88.271267,36.908616],[-88.271172,36.908787],[-88.271097,36.908965],[-88.27104,36.90915],[-88.270997,36.909336],[-88.27096,36.909522],[-88.270928,36.909707],[-88.270894,36.909894],[-88.27082,36.910264],[-88.270753,36.910623],[-88.270718,36.910801],[-88.270685,36.910977],[-88.270651,36.911152],[-88.270587,36.911511],[-88.270553,36.911693],[-88.270507,36.911873],[-88.270439,36.912044],[-88.270346,36.912205],[-88.27023,36.912354],[-88.270162,36.912423],[-88.270094,36.91249],[-88.269946,36.912613],[-88.269787,36.912724],[-88.26962,36.912827],[-88.269263,36.913029],[-88.269074,36.913129],[-88.268893,36.913242],[-88.268818,36.91327],[-88.268703,36.913314],[-88.268501,36.913387],[-88.268296,36.91344],[-88.268088,36.913468],[-88.267871,36.91348],[-88.267652,36.913473],[-88.267429,36.913456],[-88.266972,36.913412],[-88.266743,36.913392],[-88.266515,36.913383],[-88.266296,36.91339],[-88.266081,36.913418],[-88.265872,36.913466],[-88.265671,36.913537],[-88.265477,36.913621],[-88.265312,36.913739],[-88.265162,36.913867],[-88.265022,36.914012],[-88.264885,36.914174],[-88.264744,36.914347],[-88.264602,36.914524],[-88.264461,36.914702],[-88.264321,36.914885],[-88.264182,36.915069],[-88.264039,36.915253],[-88.263898,36.915437],[-88.263754,36.91562],[-88.263617,36.915801],[-88.263464,36.915963],[-88.263331,36.916131],[-88.263187,36.916282],[-88.263019,36.916413],[-88.262827,36.916523],[-88.262617,36.916614],[-88.262392,36.91668],[-88.262157,36.916743],[-88.261916,36.916789],[-88.261669,36.916831],[-88.261413,36.91688],[-88.261162,36.916923],[-88.260908,36.916969],[-88.260652,36.917023],[-88.260407,36.91708],[-88.260168,36.917148],[-88.259937,36.917225],[-88.259715,36.917312],[-88.259506,36.917412],[-88.259309,36.917529],[-88.25913,36.917663],[-88.258963,36.917807],[-88.258809,36.91795],[-88.258664,36.918112],[-88.258537,36.918277],[-88.258422,36.918447],[-88.258321,36.918624],[-88.258231,36.918807],[-88.258153,36.918991],[-88.258082,36.91918],[-88.258013,36.91937],[-88.257926,36.919559],[-88.257903,36.9196],[-88.257821,36.919741],[-88.257701,36.919917],[-88.257567,36.920083],[-88.257417,36.920241],[-88.257255,36.920389],[-88.257078,36.920531],[-88.256884,36.920662],[-88.256679,36.920778],[-88.25646,36.920882],[-88.256233,36.920972],[-88.255997,36.921045],[-88.255756,36.921102],[-88.25551,36.921146],[-88.255367,36.921164],[-88.255261,36.921177],[-88.25474,36.921231],[-88.25421,36.921283],[-88.253946,36.921306],[-88.253682,36.921326],[-88.253412,36.921336],[-88.253144,36.92135],[-88.252942,36.921356],[-88.252869,36.921358],[-88.252595,36.921369],[-88.252327,36.921372],[-88.2518,36.921383],[-88.251329,36.921387],[-88.251042,36.921388],[-88.250301,36.921394],[-88.249542,36.921406],[-88.249287,36.921407],[-88.249028,36.92141],[-88.248768,36.921418],[-88.24851,36.92142],[-88.248256,36.921417],[-88.247995,36.921424],[-88.247696,36.921424],[-88.247438,36.92142],[-88.247417,36.921502],[-88.247405,36.92156],[-88.247393,36.921627],[-88.24734,36.921964],[-88.247318,36.922141]]},"steps":[{"maneuver":{"type":"depart","location":{"type":"Point","coordinates":[-87.337733,36.539006]},"instruction":"Head southwest on US 79"},"distance":51,"duration":11,"way_name":"US 79","direction":"SW","heading":230,"mode":"driving"},{"maneuver":{"type":"u-turn","location":{"type":"Point","coordinates":[-87.338282,36.53888]},"instruction":"Make a U-turn onto Kraft Street (US 79)"},"distance":66,"duration":5,"way_name":"Kraft Street (US 79)","direction":"E","heading":106,"mode":"driving"},{"maneuver":{"type":"turn left","location":{"type":"Point","coordinates":[-87.337557,36.53874]},"instruction":"Turn left onto Wilma Rudolph Boulevard (US 79;SR 48)"},"distance":3598,"duration":208,"way_name":"Wilma Rudolph Boulevard (US 79;SR 48)","direction":"N","heading":9,"mode":"driving"},{"maneuver":{"type":"continue","location":{"type":"Point","coordinates":[-87.31255,36.563005]},"instruction":"Continue on Wilma Rudolph Boulevard (US 79)"},"distance":5166,"duration":272,"way_name":"Wilma Rudolph Boulevard (US 79)","direction":"NE","heading":48,"mode":"driving"},{"maneuver":{"type":"turn left","location":{"type":"Point","coordinates":[-87.281068,36.601171]},"instruction":"Turn left"},"distance":12,"duration":3,"way_name":"","direction":"NW","heading":307,"mode":"driving"},{"maneuver":{"type":"bear right","location":{"type":"Point","coordinates":[-87.281183,36.601239]},"instruction":"Bear right"},"distance":47,"duration":4,"way_name":"","direction":"N","heading":355,"mode":"driving"},{"maneuver":{"type":"bear left","location":{"type":"Point","coordinates":[-87.281408,36.601615]},"instruction":"Bear left"},"distance":445,"duration":34,"way_name":"","direction":"NW","heading":295,"mode":"driving"},{"maneuver":{"type":"continue","location":{"type":"Point","coordinates":[-87.285551,36.603764]},"instruction":"Continue on I 24"},"distance":51129,"duration":1820,"way_name":"I 24","direction":"NW","heading":325,"mode":"driving"},{"maneuver":{"type":"continue","location":{"type":"Point","coordinates":[-87.73083,36.879372]},"instruction":"Continue"},"distance":468,"duration":37,"way_name":"","direction":"NW","heading":317,"mode":"driving"},{"maneuver":{"type":"turn left","location":{"type":"Point","coordinates":[-87.734251,36.882549]},"instruction":"Turn left onto Hopkinsville Road (US 68;KY 80)"},"distance":5978,"duration":273,"way_name":"Hopkinsville Road (US 68;KY 80)","direction":"SW","heading":229,"mode":"driving"},{"maneuver":{"type":"continue","location":{"type":"Point","coordinates":[-87.79615,36.866279]},"instruction":"Continue on US 68;KY 80"},"distance":7059,"duration":321,"way_name":"US 68;KY 80","direction":"SW","heading":217,"mode":"driving"},{"maneuver":{"type":"continue","location":{"type":"Point","coordinates":[-87.860218,36.838953]},"instruction":"Continue on Canton Road (US 68;KY 80)"},"distance":20327,"duration":927,"way_name":"Canton Road (US 68;KY 80)","direction":"W","heading":249,"mode":"driving"},{"maneuver":{"type":"continue","location":{"type":"Point","coordinates":[-88.056077,36.780788]},"instruction":"Continue on Hopkinsville Road (US 68;KY 80)"},"distance":2204,"duration":101,"way_name":"Hopkinsville Road (US 68;KY 80)","direction":"W","heading":261,"mode":"driving"},{"maneuver":{"type":"continue","location":{"type":"Point","coordinates":[-88.080629,36.77866]},"instruction":"Continue on Canton Road (US 68;KY 80)"},"distance":3164,"duration":144,"way_name":"Canton Road (US 68;KY 80)","direction":"W","heading":278,"mode":"driving"},{"maneuver":{"type":"continue","location":{"type":"Point","coordinates":[-88.114535,36.77333]},"instruction":"Continue on Eggners Ferry Bridge (US 68;KY 80)"},"distance":1056,"duration":48,"way_name":"Eggners Ferry Bridge (US 68;KY 80)","direction":"W","heading":259,"mode":"driving"},{"maneuver":{"type":"continue","location":{"type":"Point","coordinates":[-88.126158,36.771488]},"instruction":"Continue on US 68;KY 80"},"distance":976,"duration":45,"way_name":"US 68;KY 80","direction":"W","heading":259,"mode":"driving"},{"maneuver":{"type":"turn right","location":{"type":"Point","coordinates":[-88.136917,36.770396]},"instruction":"Turn right onto US 68"},"distance":6281,"duration":359,"way_name":"US 68","direction":"NW","heading":324,"mode":"driving"},{"maneuver":{"type":"continue","location":{"type":"Point","coordinates":[-88.195569,36.795733]},"instruction":"Continue on Jefferson Davis Highway (US 68)"},"distance":443,"duration":25,"way_name":"Jefferson Davis Highway (US 68)","direction":"NW","heading":328,"mode":"driving"},{"maneuver":{"type":"continue","location":{"type":"Point","coordinates":[-88.19788,36.799252]},"instruction":"Continue on US 68"},"distance":75,"duration":4,"way_name":"US 68","direction":"NW","heading":333,"mode":"driving"},{"maneuver":{"type":"continue","location":{"type":"Point","coordinates":[-88.198279,36.799853]},"instruction":"Continue on Jefferson Davis Highway (US 68)"},"distance":755,"duration":43,"way_name":"Jefferson Davis Highway (US 68)","direction":"NW","heading":334,"mode":"driving"},{"maneuver":{"type":"continue","location":{"type":"Point","coordinates":[-88.201783,36.806004]},"instruction":"Continue on US 68"},"distance":14619,"duration":836,"way_name":"US 68","direction":"N","heading":0,"mode":"driving"},{"maneuver":{"type":"turn right","location":{"type":"Point","coordinates":[-88.295098,36.889657]},"instruction":"Turn right onto Moors Camp Highway"},"distance":6076,"duration":503,"way_name":"Moors Camp Highway","direction":"NE","heading":28,"mode":"driving"},{"maneuver":{"type":"arrive","location":{"type":"Point","coordinates":[-88.247318,36.922141]},"instruction":"You have arrived at your destination"}}]}]}
\ No newline at end of file
+{"waypoints":[{"location":[-87.337738,36.539007],"name":""},{"location":[-88.247315,36.922142],"name":"Moors Camp Highway"}],"routes":[{"legs":[{"steps":[],"weight":14724.1,"distance":147882.2,"summary":"","duration":7390.2}],"weight_name":"routability","geometry":"yo_~EzcatOpF|b@~Do^cx@oTukCuxDakEsuCctCppCqrDfuJwzCplEenErsN{lCzkB_yJhaTssB~mCczAbpEc~AneBwdAl_DkcBpwAs|O`rYweHroUqrJrcNclAhgP`|A~aHaRf}Bnl@phExcBdaFbqCppDvc@lzBltB`tBbCh}ArsAgXjsDbZraBmk@za@rX~iDacD`gA}_DcoCe}C{y@mkC","weight":14724.1,"distance":147882.2,"duration":7390.2}],"code":"Ok"}
\ No newline at end of file
diff --git a/tests/moors_geojson.json b/tests/moors_geojson.json
new file mode 100644
index 0000000..0bdfcfe
--- /dev/null
+++ b/tests/moors_geojson.json
@@ -0,0 +1,1 @@
+{"waypoints":[{"location":[-87.337738,36.539007],"name":""},{"location":[-88.247315,36.922142],"name":"Moors Camp Highway"}],"routes":[{"legs":[{"steps":[],"weight":14724.1,"distance":147882.2,"summary":"","duration":7390.2}],"weight_name":"routability","geometry":{"coordinates":[[-87.337738,36.539007],[-87.343494,36.537804],[-87.338454,36.536835],[-87.335013,36.545984],[-87.305297,36.568487],[-87.281198,36.601138],[-87.304489,36.625003],[-87.36437,36.653726],[-87.397258,36.678645],[-87.477364,36.711803],[-87.494782,36.734499],[-87.602665,36.794983],[-87.625554,36.813639],[-87.659008,36.828215],[-87.675414,36.843442],[-87.701081,36.854601],[-87.715248,36.870657],[-87.851418,36.957318],[-87.966719,37.004484],[-88.044255,37.063927],[-88.132632,37.076274],[-88.179185,37.061378],[-88.199394,37.064426],[-88.231642,37.057149],[-88.267833,37.041023],[-88.296239,37.017644],[-88.315987,37.011757],[-88.334724,36.992973],[-88.349808,36.992313],[-88.345772,36.978769],[-88.350114,36.949914],[-88.343002,36.934132],[-88.347097,36.928548],[-88.320848,36.901186],[-88.295098,36.889657],[-88.269787,36.912724],[-88.247315,36.922142]],"type":"LineString"},"weight":14724.1,"distance":147882.2,"duration":7390.2}],"code":"Ok"}
\ No newline at end of file
diff --git a/tests/test_directions.py b/tests/test_directions.py
index 2628d26..d9c4fe4 100644
--- a/tests/test_directions.py
+++ b/tests/test_directions.py
@@ -25,7 +25,7 @@ def test_class_attrs():
"""Get expected class attr values"""
serv = mapbox.Directions()
assert serv.api_name == 'directions'
- assert serv.api_version == 'v4'
+ assert serv.api_version == 'v5'
@responses.activate
@@ -36,15 +36,15 @@ def test_directions(cache):
responses.add(
responses.GET,
- 'https://api.mapbox.com/v4/directions/mapbox.driving/-87.337875%2C36.539157%3B-88.247681%2C36.922175.json?access_token=pk.test',
+ 'https://api.mapbox.com/directions/v5/mapbox/driving/' +
+ '-87.337875%2C36.539157%3B-88.247681%2C36.922175.json?access_token=pk.test',
match_querystring=True,
body=body, status=200,
content_type='application/json')
res = mapbox.Directions(access_token='pk.test', cache=cache).directions(points)
assert res.status_code == 200
- assert sorted(res.json()['routes'][0].keys()) == ['distance', 'duration', 'geometry', 'steps', 'summary']
- assert sorted(res.json().keys()) == ['destination', 'origin', 'routes', 'waypoints']
+ assert 'distance' in res.json()['routes'][0].keys()
@responses.activate
@@ -54,7 +54,8 @@ def test_directions_geojson():
responses.add(
responses.GET,
- 'https://api.mapbox.com/v4/directions/mapbox.driving/-87.337875%2C36.539157%3B-88.247681%2C36.922175.json?access_token=pk.test',
+ 'https://api.mapbox.com/directions/v5/mapbox/driving/'
+ '-87.337875%2C36.539157%3B-88.247681%2C36.922175.json?access_token=pk.test',
match_querystring=True,
body=body, status=200,
content_type='application/json')
@@ -62,23 +63,46 @@ def test_directions_geojson():
res = mapbox.Directions(access_token='pk.test').directions(points)
fc = res.geojson()
assert fc['type'] == 'FeatureCollection'
- assert sorted(fc['features'][0]['properties'].keys()) == ['distance', 'duration', 'summary']
- assert fc['features'][0]['geometry']['type'] == "LineString"
+ assert fc['features'][0]['geometry']['type'] == 'LineString'
+
+
[email protected]
+def test_directions_geojson_as_geojson():
+ with open('tests/moors_geojson.json') as fh:
+ body = fh.read()
+
+ responses.add(
+ responses.GET,
+ 'https://api.mapbox.com/directions/v5/mapbox/driving/'
+ '-87.337875%2C36.539157%3B-88.247681%2C36.922175.json?access_token=pk.test'
+ '&geometries=geojson',
+ match_querystring=True,
+ body=body, status=200,
+ content_type='application/json')
+
+ res = mapbox.Directions(access_token='pk.test').directions(
+ points, geometries='geojson')
+ fc = res.geojson()
+ assert fc['type'] == 'FeatureCollection'
+ assert fc['features'][0]['geometry']['type'] == 'LineString'
def test_invalid_profile():
with pytest.raises(ValueError):
mapbox.Directions(access_token='pk.test').directions(
- None, profile='bogus')
+ points, profile='bogus')
@responses.activate
def test_direction_params():
- params = "&alternatives=false&instructions=html&geometry=polyline&steps=false"
+ params = "&alternatives=false&geometries=polyline&overview=false&steps=false" \
+ "&continue_straight=false&annotations=distance%2Cspeed&language=en" \
+ "&radiuses=10%3Bunlimited"
responses.add(
responses.GET,
- 'https://api.mapbox.com/v4/directions/mapbox.driving/-87.337875%2C36.539157%3B-88.247681%2C36.922175.json?access_token=pk.test' + params,
+ 'https://api.mapbox.com/directions/v5/mapbox/driving/'
+ '-87.337875%2C36.539157%3B-88.247681%2C36.922175.json?access_token=pk.test' + params,
match_querystring=True,
body="not important, only testing URI templating", status=200,
content_type='application/json')
@@ -86,19 +110,149 @@ def test_direction_params():
res = mapbox.Directions(access_token='pk.test').directions(
points,
alternatives=False,
- instructions='html',
- geometry='polyline',
+ geometries='polyline',
+ overview=False,
+ continue_straight=True,
+ annotations=['distance', 'speed'],
+ language='en',
+ waypoint_snapping=[10, 'unlimited'],
steps=False)
assert res.status_code == 200
[email protected]
+def test_direction_backwards_compat():
+ """Ensure old calls to directions method work against v5 API
+ """
+ responses.add(
+ responses.GET,
+ 'https://api.mapbox.com/directions/v5/mapbox/cycling/'
+ '-87.337875%2C36.539157%3B-88.247681%2C36.922175.json?access_token=pk.test'
+ '&geometries=polyline',
+ match_querystring=True,
+ body="not important, only testing URI templating", status=200,
+ content_type='application/json')
+
+ res = mapbox.Directions(access_token='pk.test').directions(
+ points,
+ geometry='polyline', # plural in v5
+ profile='mapbox.cycling', # '/' delimited in v5
+ )
+ # TODO instructions parameter removed in v5
+ assert res.status_code == 200
+
+
[email protected]
+def test_direction_bearings():
+ responses.add(
+ responses.GET,
+ 'https://api.mapbox.com/directions/v5/mapbox/driving/'
+ '-87.337875%2C36.539157%3B-88.247681%2C36.922175.json?access_token=pk.test'
+ '&radiuses=10%3B20&bearings=270%2C45%3B315%2C90',
+ match_querystring=True,
+ body="not important, only testing URI templating", status=200,
+ content_type='application/json')
+
+ res = mapbox.Directions(access_token='pk.test').directions(
+ points,
+ waypoint_snapping=[(10, 270, 45), (20, 315, 90)])
+ assert res.status_code == 200
+
+
[email protected]
+def test_direction_bearings_none():
+ responses.add(
+ responses.GET,
+ 'https://api.mapbox.com/directions/v5/mapbox/driving/'
+ '-87.337875%2C36.539157%3B-88.247681%2C36.922175.json?access_token=pk.test'
+ '&radiuses=10%3B20&bearings=%3B315%2C90',
+ match_querystring=True,
+ body="not important, only testing URI templating", status=200,
+ content_type='application/json')
+
+ res = mapbox.Directions(access_token='pk.test').directions(
+ points,
+ waypoint_snapping=[10, (20, 315, 90)])
+ assert res.status_code == 200
+
+
def test_invalid_geom_encoding():
service = mapbox.Directions(access_token='pk.test')
with pytest.raises(mapbox.errors.ValidationError):
service._validate_geom_encoding('wkb')
-def test_invalid_instruction_format():
+def test_v4_profile_aliases():
service = mapbox.Directions(access_token='pk.test')
- with pytest.raises(mapbox.errors.ValidationError):
- service._validate_instruction_format('markdown')
+ assert 'mapbox/cycling' == service._validate_profile('mapbox.cycling')
+
+
+def test_invalid_annotations():
+ service = mapbox.Directions(access_token='pk.test')
+ with pytest.raises(mapbox.errors.InvalidParameterError):
+ service._validate_annotations(['awesomeness'])
+
+
+def test_invalid_geom_overview():
+ service = mapbox.Directions(access_token='pk.test')
+ with pytest.raises(mapbox.errors.InvalidParameterError):
+ service._validate_geom_overview('infinite')
+
+
+def test_invalid_radiuses():
+ service = mapbox.Directions(access_token='pk.test')
+ with pytest.raises(mapbox.errors.InvalidParameterError) as e:
+ service._validate_radius('forever')
+ assert 'not a valid radius' in str(e)
+
+
+def test_invalid_number_of_bearings():
+ service = mapbox.Directions(access_token='pk.test')
+ with pytest.raises(mapbox.errors.InvalidParameterError) as e:
+ service._validate_snapping([1, 2, 3], points)
+ assert 'exactly one' in str(e)
+
+
+def test_invalid_bearing_tuple():
+ service = mapbox.Directions(access_token='pk.test')
+ with pytest.raises(mapbox.errors.InvalidParameterError) as e:
+ service._validate_snapping([(270, 45, 'extra'), (315,)], points)
+ assert 'bearing tuple' in str(e)
+
+
+def test_snapping_bearing_none():
+ service = mapbox.Directions(access_token='pk.test')
+ bearings, radii = service._validate_snapping([(10, 270, 45), None], points)
+ assert bearings == [(270, 45), None]
+ assert radii == [10, None]
+
+
+def test_snapping_radii_none():
+ service = mapbox.Directions(access_token='pk.test')
+ bearings, radii = service._validate_snapping([(10, 270, 45), None], points)
+ assert bearings == [(270, 45), None]
+ assert radii == [10, None]
+
+
+def test_validate_radius_none():
+ service = mapbox.Directions(access_token='pk.test')
+ assert service._validate_radius(None) is None
+
+
+def test_validate_radius_invalid():
+ service = mapbox.Directions(access_token='pk.test')
+ with pytest.raises(mapbox.errors.InvalidParameterError) as e:
+ service._validate_radius(-1)
+
+
+def test_invalid_bearing_domain():
+ service = mapbox.Directions(access_token='pk.test')
+ with pytest.raises(mapbox.errors.InvalidParameterError) as e:
+ service._validate_snapping([(-1, 90), (315, 90)], points)
+ assert 'between 0 and 360' in str(e)
+
+
+def test_bearings_without_radius():
+ with pytest.raises(TypeError):
+ mapbox.Directions(access_token='pk.test').directions(
+ waypoint_snapping=[(270, 45), (270, 45)])
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": -1,
"issue_text_score": 3,
"test_score": -1
},
"num_modified_files": 4
} | 0.15 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest-cov",
"responses",
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
boto3==1.23.10
botocore==1.26.10
CacheControl==0.12.14
certifi==2021.5.30
charset-normalizer==2.0.12
click==8.0.4
click-plugins==1.1.1
cligj==0.7.2
coverage==6.2
coveralls==3.3.1
distlib==0.3.9
docopt==0.6.2
filelock==3.4.1
idna==3.10
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
iso3166==2.1.1
jmespath==0.10.0
-e git+https://github.com/mapbox/mapbox-sdk-py.git@e95e63b401eca29004aeea72f7a99592ea591215#egg=mapbox
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
msgpack==1.0.5
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
platformdirs==2.4.0
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
polyline==1.4.0
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-cov==4.0.0
python-dateutil==2.9.0.post0
requests==2.27.1
responses==0.17.0
s3transfer==0.5.2
six==1.17.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tomli==1.2.3
tox==3.28.0
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
uritemplate==4.1.1
urllib3==1.26.20
virtualenv==20.17.1
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: mapbox-sdk-py
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- boto3==1.23.10
- botocore==1.26.10
- cachecontrol==0.12.14
- charset-normalizer==2.0.12
- click==8.0.4
- click-plugins==1.1.1
- cligj==0.7.2
- coverage==6.2
- coveralls==3.3.1
- distlib==0.3.9
- docopt==0.6.2
- filelock==3.4.1
- idna==3.10
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iso3166==2.1.1
- jmespath==0.10.0
- msgpack==1.0.5
- platformdirs==2.4.0
- polyline==1.4.0
- pytest-cov==4.0.0
- python-dateutil==2.9.0.post0
- requests==2.27.1
- responses==0.17.0
- s3transfer==0.5.2
- six==1.17.0
- tomli==1.2.3
- tox==3.28.0
- uritemplate==4.1.1
- urllib3==1.26.20
- virtualenv==20.17.1
prefix: /opt/conda/envs/mapbox-sdk-py
| [
"tests/test_directions.py::test_class_attrs",
"tests/test_directions.py::test_directions[None]",
"tests/test_directions.py::test_directions[cache1]",
"tests/test_directions.py::test_directions_geojson",
"tests/test_directions.py::test_directions_geojson_as_geojson",
"tests/test_directions.py::test_direction_params",
"tests/test_directions.py::test_direction_backwards_compat",
"tests/test_directions.py::test_direction_bearings",
"tests/test_directions.py::test_direction_bearings_none",
"tests/test_directions.py::test_v4_profile_aliases",
"tests/test_directions.py::test_invalid_annotations",
"tests/test_directions.py::test_invalid_geom_overview",
"tests/test_directions.py::test_invalid_radiuses",
"tests/test_directions.py::test_invalid_number_of_bearings",
"tests/test_directions.py::test_invalid_bearing_tuple",
"tests/test_directions.py::test_snapping_bearing_none",
"tests/test_directions.py::test_snapping_radii_none",
"tests/test_directions.py::test_validate_radius_none",
"tests/test_directions.py::test_validate_radius_invalid",
"tests/test_directions.py::test_invalid_bearing_domain"
]
| []
| [
"tests/test_directions.py::test_invalid_profile",
"tests/test_directions.py::test_invalid_geom_encoding",
"tests/test_directions.py::test_bearings_without_radius"
]
| []
| MIT License | 1,458 | [
"CHANGES",
"mapbox/services/directions.py",
"mapbox/encoding.py",
"docs/geocoding.md"
]
| [
"CHANGES",
"mapbox/services/directions.py",
"mapbox/encoding.py",
"docs/geocoding.md"
]
|
|
google__mobly-263 | 067621c8fbb771b35cf0a7c6de6c42a6c452321f | 2017-07-13 23:05:56 | 9bb2ab41518a2f037178888f9e606fc42394ffb0 | dthkao:
Review status: 0 of 2 files reviewed at latest revision, 2 unresolved discussions.
---
*[mobly/base_test.py, line 359 at r1](https://reviewable.io:443/reviews/google/mobly/263#-Kp0u61nmBZ4dqzUVPiF:-Kp0u61oruK5KEpK27tn:b982v3x) ([raw file](https://github.com/google/mobly/blob/581a59e46299149554ab6e9c94da99a3868d0258/mobly/base_test.py#L359)):*
> ```Python
> except Exception as e:
> logging.exception(e)
> tr_record.add_error('teardown_test', e)
> ```
So this is a deeper issue, but if a test fails in the test case, and is marked as fail, and then teardown throws an error because part of the test wasn't executed, the result now becomes an error. Is that intended?
---
*[mobly/base_test.py, line 378 at r1](https://reviewable.io:443/reviews/google/mobly/263#-Kp0sYZQ6VbNc8OP1KcQ:-Kp0sYZQ6VbNc8OP1KcR:bgoqojn) ([raw file](https://github.com/google/mobly/blob/581a59e46299149554ab6e9c94da99a3868d0258/mobly/base_test.py#L378)):*
> ```Python
> tr_record.test_pass()
> finally:
> if tr_record.result in (records.TestResultEnums.TEST_RESULT_ERROR,
> ```
Tangential, but is there any need for a distinction between what should be done if an error occurred vs a failure? Do we need a self.on_error()?
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/263)*
<!-- Sent from Reviewable.io -->
xpconanfan:
Review status: 0 of 2 files reviewed at latest revision, 2 unresolved discussions.
---
*[mobly/base_test.py, line 359 at r1](https://reviewable.io:443/reviews/google/mobly/263#-Kp0u61nmBZ4dqzUVPiF:-Kp0zsfLmcc91mE7p0S_:b-vlp9v4) ([raw file](https://github.com/google/mobly/blob/581a59e46299149554ab6e9c94da99a3868d0258/mobly/base_test.py#L359)):*
<details><summary><i>Previously, dthkao (David T.H. Kao) wrote…</i></summary><blockquote>
So this is a deeper issue, but if a test fails in the test case, and is marked as fail, and then teardown throws an error because part of the test wasn't executed, the result now becomes an error. Is that intended?
</blockquote></details>
This has always been the expected behavior and is not changed in this PR.
We specifically did this to be consistent with pyunit.
---
*[mobly/base_test.py, line 378 at r1](https://reviewable.io:443/reviews/google/mobly/263#-Kp0sYZQ6VbNc8OP1KcQ:-Kp1-4WKVsMsfpArDYuc:b7r6m37) ([raw file](https://github.com/google/mobly/blob/581a59e46299149554ab6e9c94da99a3868d0258/mobly/base_test.py#L378)):*
<details><summary><i>Previously, dthkao (David T.H. Kao) wrote…</i></summary><blockquote>
Tangential, but is there any need for a distinction between what should be done if an error occurred vs a failure? Do we need a self.on_error()?
</blockquote></details>
We specifically removed `on_error` at one point...
Because we found out that everybdoy just duped the code between on_fail and on_error with no difference...
---
*Comments from [Reviewable](https://reviewable.io:443/reviews/google/mobly/263)*
<!-- Sent from Reviewable.io -->
| diff --git a/mobly/controllers/android_device_lib/callback_handler.py b/mobly/controllers/android_device_lib/callback_handler.py
index 4207f47..1ab67d8 100644
--- a/mobly/controllers/android_device_lib/callback_handler.py
+++ b/mobly/controllers/android_device_lib/callback_handler.py
@@ -83,13 +83,14 @@ class CallbackHandler(object):
(timeout, MAX_TIMEOUT))
timeout *= 1000 # convert to milliseconds for java side
try:
- raw_event = self._event_client.eventWaitAndGet(self._id,
- event_name, timeout)
+ raw_event = self._event_client.eventWaitAndGet(
+ self._id, event_name, timeout)
except Exception as e:
if 'EventSnippetException: timeout.' in str(e):
raise TimeoutError(
- 'Timeout waiting for event "%s" triggered by %s (%s).' %
- (event_name, self._method_name, self._id))
+ 'Timed out after waiting %ss for event "%s" triggered by'
+ ' %s (%s).' % (timeout, event_name, self._method_name,
+ self._id))
raise
return snippet_event.from_dict(raw_event)
diff --git a/mobly/controllers/android_device_lib/snippet_client.py b/mobly/controllers/android_device_lib/snippet_client.py
index 3d85e40..f7f473b 100644
--- a/mobly/controllers/android_device_lib/snippet_client.py
+++ b/mobly/controllers/android_device_lib/snippet_client.py
@@ -24,15 +24,27 @@ _INSTRUMENTATION_RUNNER_PACKAGE = (
'com.google.android.mobly.snippet.SnippetRunner')
# TODO(adorokhine): delete this in Mobly 1.6 when snippet v0 support is removed.
-_LAUNCH_CMD_V0 = ('am instrument -w -e action start -e port %s %s/' +
+_LAUNCH_CMD_V0 = ('%s am instrument -w -e action start -e port %s %s/' +
_INSTRUMENTATION_RUNNER_PACKAGE)
_LAUNCH_CMD_V1 = (
- 'am instrument -w -e action start %s/' + _INSTRUMENTATION_RUNNER_PACKAGE)
+ '%s am instrument -w -e action start %s/' + _INSTRUMENTATION_RUNNER_PACKAGE)
_STOP_CMD = (
'am instrument -w -e action stop %s/' + _INSTRUMENTATION_RUNNER_PACKAGE)
+# Test that uses UiAutomation requires the shell session to be maintained while
+# test is in progress. However, this requirement does not hold for the test that
+# deals with device USB disconnection (Once device disconnects, the shell
+# session that started the instrument ends, and UiAutomation fails with error:
+# "UiAutomation not connected"). To keep the shell session and redirect
+# stdin/stdout/stderr, use "setsid" or "nohup" while launching the
+# instrumentation test. Because these commands may not be available in every
+# android system, try to use them only if exists.
+_SETSID_COMMAND = 'setsid'
+
+_NOHUP_COMMAND = 'nohup'
+
# Maximum time to wait for a v0 snippet to start on the device (10 minutes).
# TODO(adorokhine): delete this in Mobly 1.6 when snippet v0 support is removed.
_APP_START_WAIT_TIME_V0 = 10 * 60
@@ -60,7 +72,7 @@ class SnippetClient(jsonrpc_client_base.JsonRpcClientBase):
def __init__(self, package, adb_proxy, log=logging.getLogger()):
"""Initializes a SnippetClient.
-
+
Args:
package: (str) The package name of the apk where the snippets are
defined.
@@ -77,13 +89,14 @@ class SnippetClient(jsonrpc_client_base.JsonRpcClientBase):
"""Overrides superclass. Launches a snippet app and connects to it."""
self._check_app_installed()
+ persists_shell_cmd = self._get_persist_command()
# Try launching the app with the v1 protocol. If that fails, fall back
# to v0 for compatibility. Use info here so people know exactly what's
# happening here, which is helpful since they need to create their own
# instrumentations and manifest.
self.log.info('Launching snippet apk %s with protocol v1',
self.package)
- cmd = _LAUNCH_CMD_V1 % self.package
+ cmd = _LAUNCH_CMD_V1 % (persists_shell_cmd, self.package)
start_time = time.time()
self._proc = self._do_start_app(cmd)
@@ -106,7 +119,7 @@ class SnippetClient(jsonrpc_client_base.JsonRpcClientBase):
# Reuse the host port as the device port in v0 snippet. This isn't
# safe in general, but the protocol is deprecated.
self.device_port = self.host_port
- cmd = _LAUNCH_CMD_V0 % (self.device_port, self.package)
+ cmd = _LAUNCH_CMD_V0 % (persists_shell_cmd, self.device_port, self.package)
self._proc = self._do_start_app(cmd)
self._connect_to_v0()
self._launch_version = 'v0'
@@ -291,3 +304,17 @@ class SnippetClient(jsonrpc_client_base.JsonRpcClientBase):
return line
self.log.debug('Discarded line from instrumentation output: "%s"',
line)
+
+ def _get_persist_command(self):
+ """Check availability and return path of command if available."""
+ for command in [_SETSID_COMMAND, _NOHUP_COMMAND]:
+ try:
+ if command in self._adb.shell('which %s' % command):
+ return command
+ except adb.AdbError:
+ continue
+ self.log.warning('No %s and %s commands available to launch instrument '
+ 'persistently, tests that depend on UiAutomator and '
+ 'at the same time performs USB disconnection may fail',
+ _SETSID_COMMAND, _NOHUP_COMMAND)
+ return ''
| Procedure functions triggered incorrectly
If a test throws an exception, and the teardown also throws an exception, `on_fail` is executed twice. | google/mobly | diff --git a/mobly/base_test.py b/mobly/base_test.py
index b3ccf43..7353afd 100644
--- a/mobly/base_test.py
+++ b/mobly/base_test.py
@@ -357,14 +357,11 @@ class BaseTestClass(object):
except Exception as e:
logging.exception(e)
tr_record.add_error('teardown_test', e)
- self._exec_procedure_func(self._on_fail, tr_record)
except (signals.TestFailure, AssertionError) as e:
tr_record.test_fail(e)
- self._exec_procedure_func(self._on_fail, tr_record)
except signals.TestSkip as e:
# Test skipped.
tr_record.test_skip(e)
- self._exec_procedure_func(self._on_skip, tr_record)
except (signals.TestAbortClass, signals.TestAbortAll) as e:
# Abort signals, pass along.
tr_record.test_fail(e)
@@ -372,15 +369,19 @@ class BaseTestClass(object):
except signals.TestPass as e:
# Explicit test pass.
tr_record.test_pass(e)
- self._exec_procedure_func(self._on_pass, tr_record)
except Exception as e:
# Exception happened during test.
tr_record.test_error(e)
- self._exec_procedure_func(self._on_fail, tr_record)
else:
tr_record.test_pass()
- self._exec_procedure_func(self._on_pass, tr_record)
finally:
+ if tr_record.result in (records.TestResultEnums.TEST_RESULT_ERROR,
+ records.TestResultEnums.TEST_RESULT_FAIL):
+ self._exec_procedure_func(self._on_fail, tr_record)
+ elif tr_record.result == records.TestResultEnums.TEST_RESULT_PASS:
+ self._exec_procedure_func(self._on_pass, tr_record)
+ elif tr_record.result == records.TestResultEnums.TEST_RESULT_SKIP:
+ self._exec_procedure_func(self._on_skip, tr_record)
self.results.add_record(tr_record)
def _assert_function_name_in_stack(self, expected_func_name):
diff --git a/tests/mobly/base_test_test.py b/tests/mobly/base_test_test.py
index db615dd..35c29f2 100755
--- a/tests/mobly/base_test_test.py
+++ b/tests/mobly/base_test_test.py
@@ -201,7 +201,12 @@ class BaseTestTest(unittest.TestCase):
on_fail_call_check.assert_called_once_with("haha")
def test_setup_test_fail_by_exception(self):
+ mock_on_fail = mock.Mock()
+
class MockBaseTest(base_test.BaseTestClass):
+ def on_fail(self, *args):
+ mock_on_fail('on_fail')
+
def setup_test(self):
raise Exception(MSG_EXPECTED_EXCEPTION)
@@ -211,6 +216,7 @@ class BaseTestTest(unittest.TestCase):
bt_cls = MockBaseTest(self.mock_test_cls_configs)
bt_cls.run(test_names=["test_something"])
+ mock_on_fail.assert_called_once_with('on_fail')
actual_record = bt_cls.results.error[0]
self.assertEqual(actual_record.test_name, self.mock_test_name)
self.assertEqual(actual_record.details, MSG_EXPECTED_EXCEPTION)
@@ -328,6 +334,9 @@ class BaseTestTest(unittest.TestCase):
def teardown_test(self):
my_mock("teardown_test")
+ def on_pass(self, test_name, begin_time):
+ never_call()
+
def test_something(self):
raise Exception(MSG_EXPECTED_EXCEPTION)
@@ -349,6 +358,9 @@ class BaseTestTest(unittest.TestCase):
def on_fail(self, test_name, begin_time):
my_mock("on_fail")
+ def on_pass(self, test_name, begin_time):
+ never_call()
+
def teardown_test(self):
raise Exception(MSG_EXPECTED_EXCEPTION)
@@ -373,6 +385,9 @@ class BaseTestTest(unittest.TestCase):
def on_fail(self, test_name, begin_time):
my_mock("on_fail")
+ def on_pass(self, test_name, begin_time):
+ never_call()
+
def test_something(self):
asserts.assert_true(False, MSG_EXPECTED_EXCEPTION)
@@ -387,6 +402,35 @@ class BaseTestTest(unittest.TestCase):
"Requested 1, Skipped 0")
self.assertEqual(bt_cls.results.summary_str(), expected_summary)
+ def test_on_fail_executed_if_both_test_and_teardown_test_fails(self):
+ on_fail_mock = mock.MagicMock()
+
+ class MockBaseTest(base_test.BaseTestClass):
+ def on_fail(self, test_name, begin_time):
+ on_fail_mock("on_fail")
+
+ def on_pass(self, test_name, begin_time):
+ never_call()
+
+ def teardown_test(self):
+ raise Exception(MSG_EXPECTED_EXCEPTION + 'ha')
+
+ def test_something(self):
+ raise Exception(MSG_EXPECTED_EXCEPTION)
+
+ bt_cls = MockBaseTest(self.mock_test_cls_configs)
+ bt_cls.run()
+ on_fail_mock.assert_called_once_with("on_fail")
+ actual_record = bt_cls.results.error[0]
+ self.assertEqual(actual_record.test_name, self.mock_test_name)
+ self.assertEqual(actual_record.details, MSG_EXPECTED_EXCEPTION)
+ self.assertEqual(actual_record.extra_errors,
+ {'teardown_test': 'This is an expected exception.ha'})
+ self.assertIsNone(actual_record.extras)
+ expected_summary = ("Error 1, Executed 1, Failed 0, Passed 0, "
+ "Requested 1, Skipped 0")
+ self.assertEqual(bt_cls.results.summary_str(), expected_summary)
+
def test_on_fail_executed_if_test_setup_fails_by_exception(self):
my_mock = mock.MagicMock()
diff --git a/tests/mobly/controllers/android_device_lib/callback_handler_test.py b/tests/mobly/controllers/android_device_lib/callback_handler_test.py
index a701d51..f288ef3 100755
--- a/tests/mobly/controllers/android_device_lib/callback_handler_test.py
+++ b/tests/mobly/controllers/android_device_lib/callback_handler_test.py
@@ -34,6 +34,7 @@ MOCK_RAW_EVENT = {
class CallbackHandlerTest(unittest.TestCase):
"""Unit tests for mobly.controllers.android_device_lib.callback_handler.
"""
+
def test_timeout_value(self):
self.assertGreaterEqual(jsonrpc_client_base._SOCKET_READ_TIMEOUT,
callback_handler.MAX_TIMEOUT)
@@ -64,9 +65,9 @@ class CallbackHandlerTest(unittest.TestCase):
event_client=mock_event_client,
ret_value=None,
method_name=None)
- expected_msg = 'Timeout waiting for event "ha" .*'
+ expected_msg = 'Timed out after waiting .*s for event "ha" .*'
with self.assertRaisesRegex(callback_handler.TimeoutError,
- expected_msg):
+ expected_msg):
handler.waitAndGet('ha')
def test_wait_for_event(self):
@@ -101,7 +102,7 @@ class CallbackHandlerTest(unittest.TestCase):
return False
with self.assertRaisesRegex(callback_handler.TimeoutError,
- expected_msg):
+ expected_msg):
handler.waitForEvent('AsyncTaskResult', some_condition, 0.01)
diff --git a/tests/mobly/controllers/android_device_lib/snippet_client_test.py b/tests/mobly/controllers/android_device_lib/snippet_client_test.py
index 010064c..beb9262 100755
--- a/tests/mobly/controllers/android_device_lib/snippet_client_test.py
+++ b/tests/mobly/controllers/android_device_lib/snippet_client_test.py
@@ -18,6 +18,7 @@ from builtins import bytes
import mock
from future.tests.base import unittest
+from mobly.controllers.android_device_lib import adb
from mobly.controllers.android_device_lib import jsonrpc_client_base
from mobly.controllers.android_device_lib import snippet_client
from tests.lib import jsonrpc_client_test_base
@@ -51,6 +52,8 @@ class MockAdbProxy(object):
return bytes('instrumentation:{p}/{r} (target={p})'.format(
p=MOCK_PACKAGE_NAME,
r=snippet_client._INSTRUMENTATION_RUNNER_PACKAGE), 'utf-8')
+ elif 'which' in params:
+ return ''
def __getattr__(self, name):
"""All calls to the none-existent functions in adb proxy would
@@ -175,6 +178,73 @@ class SnippetClientTest(jsonrpc_client_test_base.JsonRpcClientTestBase):
client.start_app_and_connect()
self.assertEqual(123, client.device_port)
+ @mock.patch('mobly.controllers.android_device_lib.snippet_client.'
+ 'SnippetClient._do_start_app')
+ @mock.patch('mobly.controllers.android_device_lib.snippet_client.'
+ 'SnippetClient._check_app_installed')
+ @mock.patch('mobly.controllers.android_device_lib.snippet_client.'
+ 'SnippetClient._read_protocol_line')
+ @mock.patch('mobly.controllers.android_device_lib.snippet_client.'
+ 'SnippetClient._connect_to_v1')
+ @mock.patch('mobly.controllers.android_device_lib.snippet_client.'
+ 'utils.get_available_host_port')
+ def test_snippet_start_app_and_connect_v1_persistent_session(
+ self, mock_get_port, mock_connect_to_v1, mock_read_protocol_line,
+ mock_check_app_installed, mock_do_start_app):
+
+ def _mocked_shell(arg):
+ if 'setsid' in arg:
+ raise adb.AdbError('cmd', 'stdout', 'stderr', 'ret_code')
+ else:
+ return 'nohup'
+
+ mock_get_port.return_value = 123
+ mock_read_protocol_line.side_effect = [
+ 'SNIPPET START, PROTOCOL 1 234',
+ 'SNIPPET SERVING, PORT 1234',
+ 'SNIPPET START, PROTOCOL 1 234',
+ 'SNIPPET SERVING, PORT 1234',
+ 'SNIPPET START, PROTOCOL 1 234',
+ 'SNIPPET SERVING, PORT 1234',
+ ]
+
+ # Test 'setsid' exists
+ client = self._make_client()
+ client._adb.shell = mock.Mock(return_value='setsid')
+ client.start_app_and_connect()
+ cmd_setsid = '%s am instrument -w -e action start %s/%s' % (
+ snippet_client._SETSID_COMMAND,
+ MOCK_PACKAGE_NAME,
+ snippet_client._INSTRUMENTATION_RUNNER_PACKAGE)
+ mock_do_start_app.assert_has_calls(mock.call(cmd_setsid))
+
+ # Test 'setsid' does not exist, but 'nohup' exsits
+ client = self._make_client()
+ client._adb.shell = _mocked_shell
+ client.start_app_and_connect()
+ cmd_nohup = '%s am instrument -w -e action start %s/%s' % (
+ snippet_client._NOHUP_COMMAND,
+ MOCK_PACKAGE_NAME,
+ snippet_client._INSTRUMENTATION_RUNNER_PACKAGE)
+ mock_do_start_app.assert_has_calls([
+ mock.call(cmd_setsid),
+ mock.call(cmd_nohup)
+ ])
+
+ # Test both 'setsid' and 'nohup' do not exist
+ client._adb.shell = mock.Mock(
+ side_effect=adb.AdbError('cmd', 'stdout', 'stderr', 'ret_code'))
+ client = self._make_client()
+ client.start_app_and_connect()
+ cmd_not_persist = ' am instrument -w -e action start %s/%s' % (
+ MOCK_PACKAGE_NAME,
+ snippet_client._INSTRUMENTATION_RUNNER_PACKAGE)
+ mock_do_start_app.assert_has_calls([
+ mock.call(cmd_setsid),
+ mock.call(cmd_nohup),
+ mock.call(cmd_not_persist)
+ ])
+
@mock.patch('socket.create_connection')
@mock.patch('mobly.controllers.android_device_lib.snippet_client.'
'utils.start_standing_subprocess')
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 2
} | 1.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | coverage==7.8.0
exceptiongroup==1.2.2
execnet==2.1.1
future==1.0.0
iniconfig==2.1.0
-e git+https://github.com/google/mobly.git@067621c8fbb771b35cf0a7c6de6c42a6c452321f#egg=mobly
mock==1.0.1
packaging==24.2
pluggy==1.5.0
portpicker==1.6.0
psutil==7.0.0
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
pytz==2025.2
PyYAML==6.0.2
timeout-decorator==0.5.0
tomli==2.2.1
typing_extensions==4.13.0
| name: mobly
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.8.0
- exceptiongroup==1.2.2
- execnet==2.1.1
- future==1.0.0
- iniconfig==2.1.0
- mock==1.0.1
- packaging==24.2
- pluggy==1.5.0
- portpicker==1.6.0
- psutil==7.0.0
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- pytz==2025.2
- pyyaml==6.0.2
- timeout-decorator==0.5.0
- tomli==2.2.1
- typing-extensions==4.13.0
prefix: /opt/conda/envs/mobly
| [
"tests/mobly/controllers/android_device_lib/callback_handler_test.py::CallbackHandlerTest::test_wait_and_get_timeout",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect_v1_persistent_session"
]
| []
| [
"tests/mobly/base_test_test.py::BaseTestTest::test_abort_class_in_test",
"tests/mobly/base_test_test.py::BaseTestTest::test_abort_setup_class",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_equal_fail",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_equal_fail_with_msg",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_equal_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_fail_with_noop",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_fail_with_wrong_error",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_fail_with_wrong_regex",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_regex_fail_with_noop",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_regex_fail_with_wrong_error",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_regex_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_true",
"tests/mobly/base_test_test.py::BaseTestTest::test_both_teardown_and_test_body_raise_exceptions",
"tests/mobly/base_test_test.py::BaseTestTest::test_cli_test_selection_fail_by_convention",
"tests/mobly/base_test_test.py::BaseTestTest::test_cli_test_selection_override_self_tests_list",
"tests/mobly/base_test_test.py::BaseTestTest::test_current_test_name",
"tests/mobly/base_test_test.py::BaseTestTest::test_default_execution_of_all_tests",
"tests/mobly/base_test_test.py::BaseTestTest::test_explicit_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_explicit_pass_but_teardown_test_raises_an_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_fail",
"tests/mobly/base_test_test.py::BaseTestTest::test_failure_in_procedure_functions_is_recorded",
"tests/mobly/base_test_test.py::BaseTestTest::test_failure_to_call_procedure_function_is_recorded",
"tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_call_outside_of_setup_generated_tests",
"tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_dup_test_name",
"tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_run",
"tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_selected_run",
"tests/mobly/base_test_test.py::BaseTestTest::test_implicit_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_missing_requested_test_func",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_both_test_and_teardown_test_fails",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_teardown_test_fails",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_test_fails",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_test_setup_fails_by_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_raise_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_pass_raise_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_self_tests_list",
"tests/mobly/base_test_test.py::BaseTestTest::test_self_tests_list_fail_by_convention",
"tests/mobly/base_test_test.py::BaseTestTest::test_setup_and_teardown_execution_count",
"tests/mobly/base_test_test.py::BaseTestTest::test_setup_class_fail_by_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_setup_test_fail_by_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_setup_test_fail_by_test_signal",
"tests/mobly/base_test_test.py::BaseTestTest::test_skip",
"tests/mobly/base_test_test.py::BaseTestTest::test_skip_if",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_assert_fail",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_executed_if_setup_test_fails",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_executed_if_test_fails",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_executed_if_test_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_raise_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_uncaught_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_basic",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_None",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_overwrite",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_overwrite_by_optional_param_list",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_overwrite_by_required_param_list",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_optional",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_optional_missing",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_optional_with_default",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_required",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_required_missing",
"tests/mobly/controllers/android_device_lib/callback_handler_test.py::CallbackHandlerTest::test_event_dict_to_snippet_event",
"tests/mobly/controllers/android_device_lib/callback_handler_test.py::CallbackHandlerTest::test_timeout_value",
"tests/mobly/controllers/android_device_lib/callback_handler_test.py::CallbackHandlerTest::test_wait_for_event",
"tests/mobly/controllers/android_device_lib/callback_handler_test.py::CallbackHandlerTest::test_wait_for_event_negative",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_check_app_installed_fail_app_not_installed",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_check_app_installed_fail_not_instrumented",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_check_app_installed_fail_target_not_installed",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_check_app_installed_normal",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_restore_event_client",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect_no_valid_line",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect_unknown_protocol",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect_v0",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect_v0_header_junk",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect_v1",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect_v1_header_junk",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_event_client"
]
| []
| Apache License 2.0 | 1,459 | [
"mobly/controllers/android_device_lib/snippet_client.py",
"mobly/controllers/android_device_lib/callback_handler.py"
]
| [
"mobly/controllers/android_device_lib/snippet_client.py",
"mobly/controllers/android_device_lib/callback_handler.py"
]
|
asottile__add-trailing-comma-8 | 8d87f678b13ac1497b688173e94d21d8371746dc | 2017-07-14 02:56:11 | 49a0d757435b4962c58f8d4f48ba85c7f2f5256f | diff --git a/add_trailing_comma.py b/add_trailing_comma.py
index 49e98c7..bd16709 100644
--- a/add_trailing_comma.py
+++ b/add_trailing_comma.py
@@ -15,9 +15,10 @@ from tokenize_rt import UNIMPORTANT_WS
Offset = collections.namedtuple('Offset', ('line', 'utf8_byte_offset'))
Call = collections.namedtuple('Call', ('node', 'star_args', 'arg_offsets'))
-Func = collections.namedtuple('Func', ('node', 'arg_offsets'))
+Func = collections.namedtuple('Func', ('node', 'star_args', 'arg_offsets'))
Literal = collections.namedtuple('Literal', ('node', 'braces', 'backtrack'))
Literal.__new__.__defaults__ = (False,)
+Fix = collections.namedtuple('Fix', ('braces', 'initial_indent'))
NEWLINES = frozenset(('NEWLINE', 'NL'))
NON_CODING_TOKENS = frozenset(('COMMENT', 'NL', UNIMPORTANT_WS))
@@ -141,28 +142,39 @@ class FindNodes(ast.NodeVisitor):
self.generic_visit(node)
def visit_FunctionDef(self, node):
- has_starargs = (
- node.args.vararg or node.args.kwarg or
- # python 3 only
- getattr(node.args, 'kwonlyargs', None)
- )
+ has_starargs = False
+ args = list(node.args.args)
+
+ if node.args.vararg:
+ if isinstance(node.args.vararg, ast.AST): # pragma: no cover (py3)
+ args.append(node.args.vararg)
+ has_starargs = True
+ if node.args.kwarg:
+ if isinstance(node.args.kwarg, ast.AST): # pragma: no cover (py3)
+ args.append(node.args.kwarg)
+ has_starargs = True
+ py3_kwonlyargs = getattr(node.args, 'kwonlyargs', None)
+ if py3_kwonlyargs: # pragma: no cover (py3)
+ args.extend(py3_kwonlyargs)
+ has_starargs = True
+
orig = node.lineno
is_multiline = False
offsets = set()
- for argnode in node.args.args:
+ for argnode in args:
offset = _to_offset(argnode)
if offset.line > orig:
is_multiline = True
offsets.add(offset)
- if is_multiline and not has_starargs:
+ if is_multiline:
key = Offset(node.lineno, node.col_offset)
- self.funcs[key] = Func(node, offsets)
+ self.funcs[key] = Func(node, has_starargs, offsets)
self.generic_visit(node)
-def _fix_inner(brace_start, brace_end, first_brace, tokens):
+def _find_simple(brace_start, brace_end, first_brace, tokens):
brace_stack = [first_brace]
for i in range(first_brace + 1, len(tokens)):
@@ -183,12 +195,6 @@ def _fix_inner(brace_start, brace_end, first_brace, tokens):
if tokens[first_brace].line == tokens[last_brace].line:
return
- # Figure out if either of the braces are "hugging"
- hug_open = tokens[first_brace + 1].name not in NON_CODING_TOKENS
- hug_close = tokens[last_brace - 1].name not in NON_CODING_TOKENS
- if hug_open and tokens[last_brace - 1].src in END_BRACES:
- hug_open = hug_close = False
-
# determine the initial indentation
i = first_brace
while i >= 0 and tokens[i].name not in NEWLINES:
@@ -199,51 +205,10 @@ def _fix_inner(brace_start, brace_end, first_brace, tokens):
else:
initial_indent = 0
- # fix open hugging
- if hug_open:
- new_indent = initial_indent + 4
-
- tokens[first_brace + 1:first_brace + 1] = [
- Token('NL', '\n'), Token(UNIMPORTANT_WS, ' ' * new_indent),
- ]
- last_brace += 2
-
- # Adust indentation for the rest of the things
- min_indent = None
- indents = []
- for i in range(first_brace + 3, last_brace):
- if tokens[i - 1].name == 'NL' and tokens[i].name == UNIMPORTANT_WS:
- if min_indent is None:
- min_indent = len(tokens[i].src)
- elif len(tokens[i].src) < min_indent:
- min_indent = len(tokens[i].src)
-
- indents.append(i)
+ return Fix(braces=(first_brace, last_brace), initial_indent=initial_indent)
- for i in indents:
- oldlen = len(tokens[i].src)
- newlen = oldlen - min_indent + new_indent
- tokens[i] = tokens[i]._replace(src=' ' * newlen)
- # fix close hugging
- if hug_close:
- tokens[last_brace:last_brace] = [
- Token('NL', '\n'),
- Token(UNIMPORTANT_WS, ' ' * initial_indent),
- ]
- last_brace += 2
-
- # From there, we can walk backwards and decide whether a comma is needed
- i = last_brace - 1
- while tokens[i].name in NON_CODING_TOKENS:
- i -= 1
-
- # If we're not a hugging paren, we can insert a comma
- if tokens[i].src != ',' and i + 1 != last_brace:
- tokens.insert(i + 1, Token('OP', ','))
-
-
-def _fix_call(call, i, tokens):
+def _find_call(call, i, tokens):
# When we get a `call` object, the ast refers to it as this:
#
# func_name(arg, arg, arg)
@@ -273,10 +238,10 @@ def _fix_call(call, i, tokens):
else:
raise AssertionError('Past end?')
- _fix_inner(brace_start, brace_end, first_brace, tokens)
+ return _find_simple(brace_start, brace_end, first_brace, tokens)
-def _fix_literal(literal, i, tokens):
+def _find_literal(literal, i, tokens):
brace_start, brace_end = literal.braces
# tuples are evil, we need to backtrack to find the opening paren
@@ -289,7 +254,60 @@ def _fix_literal(literal, i, tokens):
if tokens[i].src != brace_start:
return
- _fix_inner(brace_start, brace_end, i, tokens)
+ return _find_simple(brace_start, brace_end, i, tokens)
+
+
+def _fix_comma_and_unhug(fix_data, add_comma, tokens):
+ first_brace, last_brace = fix_data.braces
+
+ # Figure out if either of the braces are "hugging"
+ hug_open = tokens[first_brace + 1].name not in NON_CODING_TOKENS
+ hug_close = tokens[last_brace - 1].name not in NON_CODING_TOKENS
+ if hug_open and tokens[last_brace - 1].src in END_BRACES:
+ hug_open = hug_close = False
+
+ # fix open hugging
+ if hug_open:
+ new_indent = fix_data.initial_indent + 4
+
+ tokens[first_brace + 1:first_brace + 1] = [
+ Token('NL', '\n'), Token(UNIMPORTANT_WS, ' ' * new_indent),
+ ]
+ last_brace += 2
+
+ # Adust indentation for the rest of the things
+ min_indent = None
+ indents = []
+ for i in range(first_brace + 3, last_brace):
+ if tokens[i - 1].name == 'NL' and tokens[i].name == UNIMPORTANT_WS:
+ if min_indent is None:
+ min_indent = len(tokens[i].src)
+ elif len(tokens[i].src) < min_indent:
+ min_indent = len(tokens[i].src)
+
+ indents.append(i)
+
+ for i in indents:
+ oldlen = len(tokens[i].src)
+ newlen = oldlen - min_indent + new_indent
+ tokens[i] = tokens[i]._replace(src=' ' * newlen)
+
+ # fix close hugging
+ if hug_close:
+ tokens[last_brace:last_brace] = [
+ Token('NL', '\n'),
+ Token(UNIMPORTANT_WS, ' ' * fix_data.initial_indent),
+ ]
+ last_brace += 2
+
+ # From there, we can walk backwards and decide whether a comma is needed
+ i = last_brace - 1
+ while tokens[i].name in NON_CODING_TOKENS:
+ i -= 1
+
+ # If we're not a hugging paren, we can insert a comma
+ if add_comma and tokens[i].src != ',' and i + 1 != last_brace:
+ tokens.insert(i + 1, Token('OP', ','))
def _fix_src(contents_text, py35_plus):
@@ -305,16 +323,25 @@ def _fix_src(contents_text, py35_plus):
tokens = src_to_tokens(contents_text)
for i, token in reversed(tuple(enumerate(tokens))):
key = Offset(token.line, token.utf8_byte_offset)
+ add_comma = True
+ fix_data = None
+
if key in visitor.calls:
call = visitor.calls[key]
# Only fix stararg calls if asked to
- if not call.star_args or py35_plus:
- _fix_call(call, i, tokens)
- elif key in visitor.literals:
- _fix_literal(visitor.literals[key], i, tokens)
+ add_comma = not call.star_args or py35_plus
+ fix_data = _find_call(call, i, tokens)
elif key in visitor.funcs:
+ func = visitor.funcs[key]
+ # any amount of starargs excludes adding a comma for defs
+ add_comma = not func.star_args
# functions can be treated as calls
- _fix_call(visitor.funcs[key], i, tokens)
+ fix_data = _find_call(func, i, tokens)
+ elif key in visitor.literals:
+ fix_data = _find_literal(visitor.literals[key], i, tokens)
+
+ if fix_data is not None:
+ _fix_comma_and_unhug(fix_data, add_comma, tokens)
return tokens_to_src(tokens)
| microbug: `def f(*args):` and `f(*args)` are not unhugged
### minimal example
```python
def f(
*args): pass
f(
*args)
```
### expected
```python
def f(
*args
): pass
f(
*args
)
```
### actual
no change
### explain
these two types are pruned during the trailing comma determination, but should still apply unhugging | asottile/add-trailing-comma | diff --git a/tests/add_trailing_comma_test.py b/tests/add_trailing_comma_test.py
index 810b741..c016b12 100644
--- a/tests/add_trailing_comma_test.py
+++ b/tests/add_trailing_comma_test.py
@@ -11,6 +11,7 @@ from add_trailing_comma import _fix_src
from add_trailing_comma import main
+xfailif_py2 = pytest.mark.xfail(sys.version_info < (3,), reason='py3+')
xfailif_lt_py35 = pytest.mark.xfail(sys.version_info < (3, 5), reason='py35+')
@@ -264,7 +265,7 @@ def test_noop_tuple_literal_without_braces():
# *args forbid trailing commas
'def f(\n'
' *args\n'
- '): pass'
+ '): pass',
# **kwargs forbid trailing commas
'def f(\n'
' **kwargs\n'
@@ -415,12 +416,56 @@ def test_noop_unhugs(src):
' 1,\n'
')',
),
+ (
+ 'f(\n'
+ ' *args)',
+
+ 'f(\n'
+ ' *args\n'
+ ')',
+ ),
),
)
def test_fix_unhugs(src, expected):
assert _fix_src(src, py35_plus=False) == expected
+@xfailif_py2
[email protected](
+ ('src', 'expected'),
+ (
+ # python 2 doesn't give offset information for starargs
+ (
+ 'def f(\n'
+ ' *args): pass',
+
+ 'def f(\n'
+ ' *args\n'
+ '): pass',
+ ),
+ (
+ 'def f(\n'
+ ' **kwargs): pass',
+
+ 'def f(\n'
+ ' **kwargs\n'
+ '): pass',
+ ),
+ # python 2 doesn't kwonlyargs
+ (
+ 'def f(\n'
+ ' *, kw=1, kw2=2): pass',
+
+ 'def f(\n'
+ ' *, kw=1, kw2=2\n'
+ '): pass',
+ ),
+ ),
+)
+def test_fix_unhugs_py3_only(src, expected):
+ assert _fix_src(src, py35_plus=False) == expected
+
+
def test_main_trivial():
assert main(()) == 0
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -r requirements-dev.txt && pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/asottile/add-trailing-comma.git@8d87f678b13ac1497b688173e94d21d8371746dc#egg=add_trailing_comma
cfgv==3.4.0
coverage==7.8.0
distlib==0.3.9
exceptiongroup==1.2.2
filelock==3.18.0
flake8==7.2.0
identify==2.6.9
iniconfig==2.1.0
mccabe==0.7.0
nodeenv==1.9.1
packaging==24.2
platformdirs==4.3.7
pluggy==1.5.0
pre_commit==4.2.0
pycodestyle==2.13.0
pyflakes==3.3.2
pytest==8.3.5
PyYAML==6.0.2
tokenize_rt==6.1.0
tomli==2.2.1
virtualenv==20.29.3
| name: add-trailing-comma
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cfgv==3.4.0
- coverage==7.8.0
- distlib==0.3.9
- exceptiongroup==1.2.2
- filelock==3.18.0
- flake8==7.2.0
- identify==2.6.9
- iniconfig==2.1.0
- mccabe==0.7.0
- nodeenv==1.9.1
- packaging==24.2
- platformdirs==4.3.7
- pluggy==1.5.0
- pre-commit==4.2.0
- pycodestyle==2.13.0
- pyflakes==3.3.2
- pytest==8.3.5
- pyyaml==6.0.2
- tokenize-rt==6.1.0
- tomli==2.2.1
- virtualenv==20.29.3
prefix: /opt/conda/envs/add-trailing-comma
| [
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs_py3_only[def"
]
| [
"tests/add_trailing_comma_test.py::test_fixes_literals[x",
"tests/add_trailing_comma_test.py::test_fixes_py35_plus_literals[x"
]
| [
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x(1)]",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[tuple(\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x(\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x((\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[(\\n",
"tests/add_trailing_comma_test.py::test_py35_plus_rewrite",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[(1,",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[[1,",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[{1,",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[{1:",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[y(*args1,",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[y(**kwargs1,",
"tests/add_trailing_comma_test.py::test_fixes_calls[x(\\n",
"tests/add_trailing_comma_test.py::test_fixes_calls[foo()(\\n",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[(1,",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[[1,",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[{1,",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[{1:",
"tests/add_trailing_comma_test.py::test_noop_tuple_literal_without_braces",
"tests/add_trailing_comma_test.py::test_noop_function_defs[def",
"tests/add_trailing_comma_test.py::test_fixes_defs[def",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f(x,",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f(\\n",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f((\\n",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f([\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(a,\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[with",
"tests/add_trailing_comma_test.py::test_fix_unhugs[if",
"tests/add_trailing_comma_test.py::test_fix_unhugs[{'foo':",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(g(\\n",
"tests/add_trailing_comma_test.py::test_main_trivial",
"tests/add_trailing_comma_test.py::test_main_noop",
"tests/add_trailing_comma_test.py::test_main_changes_a_file",
"tests/add_trailing_comma_test.py::test_main_syntax_error",
"tests/add_trailing_comma_test.py::test_main_non_utf8_bytes",
"tests/add_trailing_comma_test.py::test_main_py35_plus_argument_star_args",
"tests/add_trailing_comma_test.py::test_main_py35_plus_argument_star_star_kwargs"
]
| []
| MIT License | 1,460 | [
"add_trailing_comma.py"
]
| [
"add_trailing_comma.py"
]
|
|
pydicom__pydicom-410 | db94409999965965a0e73b53db5d89dfc3707e47 | 2017-07-14 09:41:20 | bef49851e7c3b70edd43cc40fc84fe905e78d5ba | diff --git a/Makefile b/Makefile
index f67bdaab1..c5c61355e 100644
--- a/Makefile
+++ b/Makefile
@@ -17,9 +17,6 @@ test: test-code test-doc
doc:
make -C doc html
-doc-noplot:
- make -C doc html-noplot
-
clean:
find . -name "*.so" -o -name "*.pyc" -o -name "*.md5" -o -name "*.pyd" -o -name "*~" | xargs rm -f
find . -name "*.pyx" -exec ./tools/rm_pyx_c_file.sh {} \;
diff --git a/build_tools/circle/build_doc.sh b/build_tools/circle/build_doc.sh
index b22f59e80..c353badee 100755
--- a/build_tools/circle/build_doc.sh
+++ b/build_tools/circle/build_doc.sh
@@ -78,7 +78,7 @@ then
MAKE_TARGET="dist LATEXMKOPTS=-halt-on-error"
elif [[ "$build_type" =~ ^QUICK ]]
then
- MAKE_TARGET=html-noplot
+ MAKE_TARGET=html
else
MAKE_TARGET=html
fi
diff --git a/pydicom/filereader.py b/pydicom/filereader.py
index 8735955f7..aa575f5a4 100644
--- a/pydicom/filereader.py
+++ b/pydicom/filereader.py
@@ -1,16 +1,17 @@
-# filereader.py
"""Read a dicom media file"""
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at https://github.com/darcymason/pydicom
from __future__ import absolute_import
+
# Need zlib and io.BytesIO for deflate-compressed file
import os.path
import warnings
import zlib
from io import BytesIO
+from pydicom.misc import size_in_bytes
from pydicom.tag import TupleTag
from pydicom.dataelem import RawDataElement
from pydicom.util.hexutil import bytes2hex
@@ -20,15 +21,8 @@ from pydicom.compat import in_py2
from pydicom import compat
from pydicom import config # don't import datetime_conversion directly
from pydicom.config import logger
-
-stat_available = True
-try:
- from os import stat
-except ImportError:
- stat_available = False
-
from pydicom.errors import InvalidDicomError
-import pydicom.uid # for Implicit/Explicit/Little/Big Endian transfer syntax UIDs
+import pydicom.uid # for transfer syntax UIDs
from pydicom.filebase import DicomFile
from pydicom.dataset import Dataset, FileDataset
from pydicom.dicomdir import DicomDir
@@ -39,12 +33,19 @@ from pydicom.sequence import Sequence
from pydicom.fileutil import read_undefined_length_value
from struct import Struct, unpack
from sys import byteorder
+
+try:
+ from os import stat
+except ImportError:
+ stat = None
+
sys_is_little_endian = (byteorder == 'little')
class DicomIter(object):
"""Iterator over DICOM data elements created from a file-like object
"""
+
def __init__(self, fp, stop_when=None, force=False):
"""Read the preamble and meta info and prepare iterator for remainder of file.
@@ -109,13 +110,15 @@ class DicomIter(object):
yield self.file_meta_info[tag]
for data_element in data_element_generator(self.fp,
- self._is_implicit_VR, self._is_little_endian,
+ self._is_implicit_VR,
+ self._is_little_endian,
stop_when=self.stop_when):
yield data_element
def data_element_generator(fp, is_implicit_VR, is_little_endian,
- stop_when=None, defer_size=None, encoding=default_encoding):
+ stop_when=None, defer_size=None,
+ encoding=default_encoding):
"""Create a generator to efficiently return the raw data elements.
Parameters
@@ -182,6 +185,7 @@ def data_element_generator(fp, is_implicit_VR, is_little_endian,
logger_debug = logger.debug
debugging = config.debugging
element_struct_unpack = element_struct.unpack
+ defer_size = size_in_bytes(defer_size)
while True:
# Read tag, VR, length, get ready to read value
@@ -233,9 +237,10 @@ def data_element_generator(fp, is_implicit_VR, is_little_endian,
# Reading the value
# First case (most common): reading a value with a defined length
if length != 0xFFFFFFFF:
- # don't defer loading of Specific Character Set value as it is needed
- # immediately to get the character encoding for other tags
- if defer_size is not None and length > defer_size and tag != (0x08, 0x05):
+ # don't defer loading of Specific Character Set value as it is
+ # needed immediately to get the character encoding for other tags
+ if defer_size is not None and length > defer_size and tag != (
+ 0x08, 0x05):
# Flag as deferred by setting value to None, and skip bytes
value = None
logger_debug("Defer size exceeded. "
@@ -248,13 +253,18 @@ def data_element_generator(fp, is_implicit_VR, is_little_endian,
if length > 12:
dotdot = "..."
logger_debug("%08x: %-34s %s %r %s" % (value_tell,
- bytes2hex(value[:12]), dotdot, value[:12], dotdot))
+ bytes2hex(
+ value[:12]),
+ dotdot,
+ value[:12], dotdot))
# If the tag is (0008,0005) Specific Character Set, then store it
if tag == (0x08, 0x05):
from pydicom.values import convert_string
- encoding = convert_string(value, is_little_endian, encoding=default_encoding)
- # Store the encoding value in the generator for use with future elements (SQs)
+ encoding = convert_string(value, is_little_endian,
+ encoding=default_encoding)
+ # Store the encoding value in the generator
+ # for use with future elements (SQs)
encoding = convert_encodings(encoding)
yield RawDataElement(tag, VR, length, value, value_tell,
@@ -273,7 +283,8 @@ def data_element_generator(fp, is_implicit_VR, is_little_endian,
try:
VR = dictionary_VR(tag)
except KeyError:
- # Look ahead to see if it consists of items and is thus a SQ
+ # Look ahead to see if it consists of items
+ # and is thus a SQ
next_tag = TupleTag(unpack(endian_chr + "HH", fp_read(4)))
# Rewind the file
fp.seek(fp_tell() - 4)
@@ -295,11 +306,14 @@ def data_element_generator(fp, is_implicit_VR, is_little_endian,
value = read_undefined_length_value(fp, is_little_endian,
delimiter, defer_size)
- # If the tag is (0008,0005) Specific Character Set, then store it
+ # If the tag is (0008,0005) Specific Character Set,
+ # then store it
if tag == (0x08, 0x05):
from pydicom.values import convert_string
- encoding = convert_string(value, is_little_endian, encoding=default_encoding)
- # Store the encoding value in the generator for use with future elements (SQs)
+ encoding = convert_string(value, is_little_endian,
+ encoding=default_encoding)
+ # Store the encoding value in the generator for use
+ # with future elements (SQs)
encoding = convert_encodings(encoding)
yield RawDataElement(tag, VR, length, value, value_tell,
@@ -307,7 +321,8 @@ def data_element_generator(fp, is_implicit_VR, is_little_endian,
def read_dataset(fp, is_implicit_VR, is_little_endian, bytelength=None,
- stop_when=None, defer_size=None, parent_encoding=default_encoding):
+ stop_when=None, defer_size=None,
+ parent_encoding=default_encoding):
"""Return a Dataset instance containing the next dataset in the file.
Parameters
@@ -389,7 +404,8 @@ def read_sequence(fp, is_implicit_VR, is_little_endian, bytelength, encoding,
return seq
-def read_sequence_item(fp, is_implicit_VR, is_little_endian, encoding, offset=0):
+def read_sequence_item(fp, is_implicit_VR, is_little_endian, encoding,
+ offset=0):
"""Read and return a single sequence item, i.e. a Dataset"""
seq_item_tell = fp.tell() + offset
if is_little_endian:
@@ -399,15 +415,17 @@ def read_sequence_item(fp, is_implicit_VR, is_little_endian, encoding, offset=0)
try:
bytes_read = fp.read(8)
group, element, length = unpack(tag_length_format, bytes_read)
- except:
+ except BaseException:
raise IOError("No tag to read at file position "
"{0:05x}".format(fp.tell() + offset))
tag = (group, element)
if tag == SequenceDelimiterTag: # No more items, time to stop reading
- logger.debug("{0:08x}: {1}".format(fp.tell() - 8 + offset, "End of Sequence"))
+ logger.debug(
+ "{0:08x}: {1}".format(fp.tell() - 8 + offset, "End of Sequence"))
if length != 0:
logger.warning("Expected 0x00000000 after delimiter, found 0x%x, "
- "at position 0x%x" % (length, fp.tell() - 4 + offset))
+ "at position 0x%x" % (
+ length, fp.tell() - 4 + offset))
return None
if tag != ItemTag:
logger.warning("Expected sequence item with tag %s at file position "
@@ -429,11 +447,12 @@ def read_sequence_item(fp, is_implicit_VR, is_little_endian, encoding, offset=0)
def _read_command_set_elements(fp):
- """Return a Dataset containing any Command Set (0000,eeee) elements in `fp`.
+ """Return a Dataset containing any Command Set (0000,eeee) elements
+ in `fp`.
Command Set elements are always Implicit VR Little Endian (as per PS3.7
- Section 6.3). Once any Command Set elements are read `fp` will be positioned
- at the start of the next group of elements.
+ Section 6.3). Once any Command Set elements are read `fp` will be
+ positioned at the start of the next group of elements.
Parameters
----------
@@ -446,6 +465,7 @@ def _read_command_set_elements(fp):
The command set elements as a Dataset instance. May be empty if no
command set elements are present.
"""
+
def _not_group_0000(tag, VR, length):
"""Return True if the tag is not in group 0x0000, False otherwise."""
return (tag.group != 0)
@@ -474,9 +494,10 @@ def _read_file_meta_info(fp):
The File Meta elements as a Dataset instance. May be empty if no
File Meta are present.
"""
+
def _not_group_0002(tag, VR, length):
"""Return True if the tag is not in group 0x0002, False otherwise."""
- return (tag.group != 2)
+ return tag.group != 2
start_file_meta = fp.tell()
file_meta = read_dataset(fp, is_implicit_VR=False, is_little_endian=True,
@@ -491,9 +512,9 @@ def _read_file_meta_info(fp):
logger.info("_read_file_meta_info: (0002,0000) 'File Meta "
"Information Group Length' value doesn't match the "
"actual File Meta Information length ({0} vs {1} "
- "bytes).".format(
- file_meta.FileMetaInformationGroupLength,
- length_file_meta))
+ "bytes)."
+ .format(file_meta.FileMetaInformationGroupLength,
+ length_file_meta))
return file_meta
@@ -512,11 +533,11 @@ def read_file_meta_info(filename):
def read_preamble(fp, force):
"""Return the 128-byte DICOM preamble in `fp` if present.
- `fp` should be positioned at the start of the file-like. If the preamble and
- prefix are found then after reading `fp` will be positioned at the first
- byte after the prefix (byte offset 133). If either the preamble or prefix
- are missing and `force` is True then after reading `fp` will be positioned
- at the start of the file-like.
+ `fp` should be positioned at the start of the file-like. If the preamble
+ and prefix are found then after reading `fp` will be positioned at the
+ first byte after the prefix (byte offset 133). If either the preamble or
+ prefix are missing and `force` is True then after reading `fp` will be
+ positioned at the start of the file-like.
Parameters
----------
@@ -551,10 +572,11 @@ def read_preamble(fp, force):
logger.debug("Reading File Meta Information prefix...")
magic = fp.read(4)
if magic != b"DICM" and force:
- logger.info("File is not conformant with the DICOM File Format: 'DICM' "
- "prefix is missing from the File Meta Information header "
- "or the header itself is missing. Assuming no header and "
- "continuing.")
+ logger.info(
+ "File is not conformant with the DICOM File Format: 'DICM' "
+ "prefix is missing from the File Meta Information header "
+ "or the header itself is missing. Assuming no header and "
+ "continuing.")
preamble = None
fp.seek(0)
elif magic != b"DICM" and not force:
@@ -598,13 +620,15 @@ def read_partial(fileobj, stop_when=None, defer_size=None, force=False):
read_file
More generic file reading function.
"""
- ## Read File Meta Information
+ # Read File Meta Information
+
# Read preamble (if present)
preamble = read_preamble(fileobj, force)
# Read any File Meta Information group (0002,eeee) elements (if present)
file_meta_dataset = _read_file_meta_info(fileobj)
- ## Read Dataset
+ # Read Dataset
+
# Read any Command Set group (0000,eeee) elements (if present)
command_set = _read_command_set_elements(fileobj)
@@ -612,14 +636,14 @@ def read_partial(fileobj, stop_when=None, defer_size=None, force=False):
peek = fileobj.read(1)
fileobj.seek(-1, 1)
- # `filobj` should be positioned at the start of the dataset by this point,
+ # `filobj` should be positioned at the start of the dataset by this point.
# Ensure we have appropriate values for `is_implicit_VR` and
- # `is_little_endian` before we try decoding. We assume an initial
- # transfer syntax of implicit VR little endian and correct it as necessary
+ # `is_little_endian` before we try decoding. We assume an initial
+ # transfer syntax of implicit VR little endian and correct it as necessary
is_implicit_VR = True
is_little_endian = True
transfer_syntax = file_meta_dataset.get("TransferSyntaxUID")
- if peek == b'': # EOF
+ if peek == b'': # EOF
pass
elif transfer_syntax is None: # issue 258
# If no TransferSyntaxUID element then we have to try and figure out
@@ -693,11 +717,12 @@ def read_partial(fileobj, stop_when=None, defer_size=None, force=False):
def read_file(fp, defer_size=None, stop_before_pixels=False, force=False):
"""Read and parse a DICOM dataset stored in the DICOM File Format.
- Read a DICOM dataset stored in accordance with the DICOM File Format (DICOM
- Standard Part 10 Section 7). If the dataset is not stored in accordance
- with the File Format (i.e. the preamble and prefix are missing, there are
- missing required Type 1 File Meta Information Group elements or the entire
- File Meta Information is missing) then you will have to set `force` to True.
+ Read a DICOM dataset stored in accordance with the DICOM File Format
+ (DICOM Standard Part 10 Section 7). If the dataset is not stored in
+ accordance with the File Format (i.e. the preamble and prefix are missing,
+ there are missing required Type 1 File Meta Information Group elements
+ or the entire File Meta Information is missing) then you will have to
+ set `force` to True.
Parameters
----------
@@ -772,10 +797,8 @@ def read_file(fp, defer_size=None, stop_before_pixels=False, force=False):
logger.debug("Caller passed file name")
logger.debug("-" * 80)
- # Convert size to defer reading into bytes, and store in file object
- # if defer_size is not None:
- # defer_size = size_in_bytes(defer_size)
- # fp.defer_size = defer_size
+ # Convert size to defer reading into bytes
+ defer_size = size_in_bytes(defer_size)
# Iterate through all items and store them --include file meta if present
stop_when = None
@@ -816,7 +839,8 @@ def read_dicomdir(filename="DICOMDIR"):
ds = read_file(filename)
# Here, check that it is in fact DicomDir
if not isinstance(ds, DicomDir):
- msg = u"File '{0}' is not a Media Storage Directory file".format(filename)
+ msg = u"File '{0}' is not a Media Storage Directory file".format(
+ filename)
raise InvalidDicomError(msg)
return ds
@@ -824,7 +848,7 @@ def read_dicomdir(filename="DICOMDIR"):
def data_element_offset_to_value(is_implicit_VR, VR):
"""Return number of bytes from start of data element to start of value"""
if is_implicit_VR:
- offset = 8 # tag of 4 plus 4-byte length
+ offset = 8 # tag of 4 plus 4-byte length
else:
if VR in extra_length_VRs:
offset = 12 # tag 4 + 2 VR + 2 reserved + 4 length
@@ -846,7 +870,7 @@ def read_deferred_data_element(fileobj_type, filename, timestamp,
if not os.path.exists(filename):
raise IOError(u"Deferred read -- original file "
"{0:s} is missing".format(filename))
- if stat_available and (timestamp is not None):
+ if stat is not None and (timestamp is not None):
statinfo = os.stat(filename)
if statinfo.st_mtime != timestamp:
warnings.warn("Deferred read warning -- file modification time "
@@ -867,10 +891,12 @@ def read_deferred_data_element(fileobj_type, filename, timestamp,
fp.close()
if data_elem.VR != raw_data_elem.VR:
raise ValueError("Deferred read VR {0:s} does not match "
- "original {1:s}".format(data_elem.VR, raw_data_elem.VR))
+ "original {1:s}".format(data_elem.VR,
+ raw_data_elem.VR))
if data_elem.tag != raw_data_elem.tag:
raise ValueError("Deferred read tag {0!r} does not match "
- "original {1!r}".format(data_elem.tag, raw_data_elem.tag))
+ "original {1!r}".format(data_elem.tag,
+ raw_data_elem.tag))
# Everything is ok, now this object should act like usual DataElement
return data_elem
diff --git a/pydicom/fileutil.py b/pydicom/fileutil.py
index 146fa8ebb..e3eab5c5d 100644
--- a/pydicom/fileutil.py
+++ b/pydicom/fileutil.py
@@ -1,11 +1,12 @@
-# fileutil.py
-"""Functions for reading to certain bytes, e.g. delimiters"""
+"""Functions for reading to certain bytes, e.g. delimiters."""
# Copyright (c) 2009-2012 Darcy Mason
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at https://github.com/darcymason/pydicom
from struct import pack, unpack
+
+from pydicom.misc import size_in_bytes
from pydicom.tag import TupleTag, Tag
from pydicom.datadict import dictionary_description
@@ -21,16 +22,20 @@ def absorb_delimiter_item(fp, is_little_endian, delimiter):
group, elem, length = unpack(struct_format, fp.read(8))
tag = TupleTag((group, elem))
if tag != delimiter:
- msg = "Did not find expected delimiter '%s'" % dictionary_description(delimiter)
- msg += ", instead found %s at file position 0x%x" % (str(tag), fp.tell() - 8)
+ msg = ("Did not find expected delimiter '%s'" %
+ dictionary_description(delimiter))
+ msg += ", instead found %s at file position 0x%x" % (
+ str(tag), fp.tell() - 8)
logger.warn(msg)
fp.seek(fp.tell() - 8)
return
- logger.debug("%04x: Found Delimiter '%s'", fp.tell() - 8, dictionary_description(delimiter))
+ logger.debug("%04x: Found Delimiter '%s'", fp.tell() - 8,
+ dictionary_description(delimiter))
if length == 0:
logger.debug("%04x: Read 0 bytes after delimiter", fp.tell() - 4)
else:
- logger.debug("%04x: Expected 0x00000000 after delimiter, found 0x%x", fp.tell() - 4, length)
+ logger.debug("%04x: Expected 0x00000000 after delimiter, found 0x%x",
+ fp.tell() - 4, length)
def find_bytes(fp, bytes_to_find, read_size=128, rewind=True):
@@ -38,6 +43,7 @@ def find_bytes(fp, bytes_to_find, read_size=128, rewind=True):
Parameters
----------
+ fp : file-like object
bytes_to_find : str
Contains the bytes to find. Must be in correct
endian order already.
@@ -56,25 +62,27 @@ def find_bytes(fp, bytes_to_find, read_size=128, rewind=True):
search_rewind = len(bytes_to_find) - 1
found = False
- EOF = False
+ eof = False
while not found:
chunk_start = fp.tell()
bytes_read = fp.read(read_size)
if len(bytes_read) < read_size:
- # try again - if still don't get required amount, this is last block
+ # try again - if still don't get required amount,
+ # this is the last block
new_bytes = fp.read(read_size - len(bytes_read))
bytes_read += new_bytes
if len(bytes_read) < read_size:
- EOF = True # but will still check whatever we did get
+ eof = True # but will still check whatever we did get
index = bytes_read.find(bytes_to_find)
if index != -1:
found = True
- elif EOF:
+ elif eof:
if rewind:
fp.seek(data_start)
return None
else:
- fp.seek(fp.tell() - search_rewind) # rewind a bit in case delimiter crossed read_size boundary
+ # rewind a bit in case delimiter crossed read_size boundary
+ fp.seek(fp.tell() - search_rewind)
# if get here then have found the byte string
found_at = chunk_start + index
if rewind:
@@ -84,18 +92,25 @@ def find_bytes(fp, bytes_to_find, read_size=128, rewind=True):
return found_at
-def read_undefined_length_value(fp, is_little_endian, delimiter_tag, defer_size=None,
+def read_undefined_length_value(fp, is_little_endian, delimiter_tag,
+ defer_size=None,
read_size=128):
- """Read until the delimiter tag found and return the value; ignore the delimiter.
+ """Read until the delimiter tag found and return the value;
+ ignore the delimiter.
- On completion, the file will be set to the first byte after the delimiter and its
- following four zero bytes.
+ On completion, the file will be set to the first byte after the delimiter
+ and its following four zero bytes.
Parameters
----------
fp : a file-like object
is_little_endian : boolean
True if file transfer syntax is little endian, else False.
+ delimiter_tag : BaseTag
+ tag used as and marker for reading
+ defer_size : int, None, optional
+ Size to avoid loading large elements in memory.
+ See ``filereader.read_file`` for more parameter info.
read_size : int
Number of bytes to read at one time.
@@ -119,18 +134,20 @@ def read_undefined_length_value(fp, is_little_endian, delimiter_tag, defer_size=
bytes_to_find = pack(bytes_format, delimiter_tag.group, delimiter_tag.elem)
found = False
- EOF = False
+ eof = False
value_chunks = []
+ defer_size = size_in_bytes(defer_size)
byte_count = 0 # for defer_size checks
while not found:
chunk_start = fp.tell()
bytes_read = fp.read(read_size)
if len(bytes_read) < read_size:
- # try again - if still don't get required amount, this is last block
+ # try again - if still don't get required amount,
+ # this is the last block
new_bytes = fp.read(read_size - len(bytes_read))
bytes_read += new_bytes
if len(bytes_read) < read_size:
- EOF = True # but will still check whatever we did get
+ eof = True # but will still check whatever we did get
index = bytes_read.find(bytes_to_find)
if index != -1:
found = True
@@ -141,26 +158,31 @@ def read_undefined_length_value(fp, is_little_endian, delimiter_tag, defer_size=
fp.seek(chunk_start + index + 4) # rewind to end of delimiter
length = fp.read(4)
if length != b"\0\0\0\0":
- msg = "Expected 4 zero bytes after undefined length delimiter at pos {0:04x}"
+ msg = ("Expected 4 zero bytes after undefined length delimiter"
+ " at pos {0:04x}")
logger.error(msg.format(fp.tell() - 4))
- elif EOF:
+ elif eof:
fp.seek(data_start)
- raise EOFError("End of file reached before delimiter {0!r} found".format(delimiter_tag))
+ raise EOFError(
+ "End of file reached before delimiter {0!r} found".format(
+ delimiter_tag))
else:
- fp.seek(fp.tell() - search_rewind) # rewind a bit in case delimiter crossed read_size boundary
+ # rewind a bit in case delimiter crossed read_size boundary
+ fp.seek(fp.tell() - search_rewind)
# accumulate the bytes read (not including the rewind)
new_bytes = bytes_read[:-search_rewind]
byte_count += len(new_bytes)
if defer_size is None or byte_count < defer_size:
value_chunks.append(new_bytes)
# if get here then have found the byte string
- if defer_size is not None and defer_size >= defer_size:
+ if defer_size is not None and byte_count >= defer_size:
return None
else:
return b"".join(value_chunks)
-def find_delimiter(fp, delimiter, is_little_endian, read_size=128, rewind=True):
+def find_delimiter(fp, delimiter, is_little_endian, read_size=128,
+ rewind=True):
"""Return file position where 4-byte delimiter is located.
Parameters
@@ -181,13 +203,15 @@ def find_delimiter(fp, delimiter, is_little_endian, read_size=128, rewind=True):
if not is_little_endian:
struct_format = ">H"
delimiter = Tag(delimiter)
- bytes_to_find = pack(struct_format, delimiter.group) + pack(struct_format, delimiter.elem)
+ bytes_to_find = pack(struct_format, delimiter.group) + pack(struct_format,
+ delimiter.elem)
return find_bytes(fp, bytes_to_find, read_size=read_size, rewind=rewind)
-def length_of_undefined_length(fp, delimiter, is_little_endian, read_size=128, rewind=True):
- """Search through the file to find the delimiter and return the length of the data
- element.
+def length_of_undefined_length(fp, delimiter, is_little_endian, read_size=128,
+ rewind=True):
+ """Search through the file to find the delimiter and return the length
+ of the data element.
Parameters
----------
@@ -206,11 +230,12 @@ def length_of_undefined_length(fp, delimiter, is_little_endian, read_size=128, r
Notes
-----
- Note the data element that the delimiter starts is not read here, the calling
- routine must handle that. Delimiter must be 4 bytes long.
+ Note the data element that the delimiter starts is not read here,
+ the calling routine must handle that. Delimiter must be 4 bytes long.
"""
data_start = fp.tell()
- delimiter_pos = find_delimiter(fp, delimiter, is_little_endian, rewind=rewind)
+ delimiter_pos = find_delimiter(fp, delimiter, is_little_endian,
+ rewind=rewind)
length = delimiter_pos - data_start
return length
@@ -222,7 +247,9 @@ def read_delimiter_item(fp, delimiter):
"""
found = fp.read(4)
if found != delimiter:
- logger.warn("Expected delimitor %s, got %s at file position 0x%x", Tag(delimiter), Tag(found), fp.tell() - 4)
+ logger.warn("Expected delimitor %s, got %s at file position 0x%x",
+ Tag(delimiter), Tag(found), fp.tell() - 4)
length = fp.read_UL()
if length != 0:
- logger.warn("Expected delimiter item to have length 0, got %d at file position 0x%x", length, fp.tell() - 4)
+ logger.warn("Expected delimiter item to have length 0, "
+ "got %d at file position 0x%x", length, fp.tell() - 4)
diff --git a/pydicom/misc.py b/pydicom/misc.py
index 2540a0b47..5e5e5c4f8 100644
--- a/pydicom/misc.py
+++ b/pydicom/misc.py
@@ -12,6 +12,8 @@ _size_factors = dict(KB=1024, MB=1024 * 1024, GB=1024 * 1024 * 1024)
def size_in_bytes(expr):
"""Return the number of bytes for a defer_size argument to read_file()
"""
+ if expr is None:
+ return None
try:
return int(expr)
except ValueError:
@@ -20,13 +22,15 @@ def size_in_bytes(expr):
val = float(expr[:-2]) * _size_factors[unit]
return val
else:
- raise ValueError("Unable to parse length with unit '{0:s}'".format(unit))
+ raise ValueError(
+ "Unable to parse length with unit '{0:s}'".format(unit))
def is_dicom(file):
"""Boolean specifying if file is a proper DICOM file.
- This function is a pared down version of read_preamble meant for a fast return.
+ This function is a pared down version of read_preamble meant for a
+ fast return.
The file is read for a proper preamble ('DICM'), returning True if so,
and False otherwise. This is a conservative approach.
@@ -45,9 +49,6 @@ def is_dicom(file):
raise IOError("File passed was not a valid file")
# TODO: error is only in Py3; what's a better Py2/3 error?
fp = open(file, 'rb')
- preamble = fp.read(0x80)
+ fp.read(0x80) # preamble
magic = fp.read(4)
- if magic == b"DICM":
- return True
- else:
- return False
+ return magic == b"DICM"
diff --git a/pydicom/util/leanread.py b/pydicom/util/leanread.py
index 59df2292f..296e9777c 100644
--- a/pydicom/util/leanread.py
+++ b/pydicom/util/leanread.py
@@ -1,10 +1,11 @@
-# leanread.py
"""Read a dicom media file"""
# Copyright (c) 2013 Darcy Mason
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at https://github.com/darcymason/pydicom
+from pydicom.misc import size_in_bytes
+from struct import Struct, unpack
extra_length_VRs_b = (b'OB', b'OW', b'OF', b'SQ', b'UN', b'UT')
ExplicitVRLittleEndian = b'1.2.840.10008.1.2.1'
@@ -17,9 +18,6 @@ ItemDelimiterTag = 0xFFFEE00D # end of Sequence Item
SequenceDelimiterTag = 0xFFFEE0DD # end of Sequence of undefined length
-from struct import Struct, unpack
-
-
class dicomfile(object):
"""Context-manager based DICOM file object with data element iteration"""
@@ -46,7 +44,8 @@ class dicomfile(object):
# Yield the file meta info elements
file_meta_gen = data_element_generator(self.fobj, is_implicit_VR=False,
is_little_endian=True,
- stop_when=lambda gp, elem: gp != 2)
+ stop_when=lambda gp,
+ elem: gp != 2)
for data_elem in file_meta_gen:
if data_elem[0] == (0x0002, 0x0010):
transfer_syntax_uid = data_elem[3]
@@ -57,12 +56,14 @@ class dicomfile(object):
if transfer_syntax_uid.endswith(b' ') or \
transfer_syntax_uid.endswith(b'\0'):
transfer_syntax_uid = transfer_syntax_uid[:-1]
- is_implicit_VR, is_little_endian = transfer_syntax(transfer_syntax_uid)
+ is_implicit_VR, is_little_endian = transfer_syntax(
+ transfer_syntax_uid)
# print is_implicit_VR
else:
raise NotImplementedError("No transfer syntax in file meta info")
- ds_gen = data_element_generator(self.fobj, is_implicit_VR, is_little_endian)
+ ds_gen = data_element_generator(self.fobj, is_implicit_VR,
+ is_little_endian)
for data_elem in ds_gen:
yield data_elem
@@ -114,6 +115,7 @@ def data_element_generator(fp, is_implicit_VR, is_little_endian,
fp_read = fp.read
fp_tell = fp.tell
element_struct_unpack = element_struct.unpack
+ defer_size = size_in_bytes(defer_size)
while True:
# Read tag, VR, length, get ready to read value
@@ -166,7 +168,8 @@ def data_element_generator(fp, is_implicit_VR, is_little_endian,
try:
VR = dictionary_VR(tag)
except KeyError:
- # Look ahead to see if it consists of items and is thus a SQ
+ # Look ahead to see if it consists of items and
+ # is thus a SQ
next_tag = TupleTag(unpack(endian_chr + "HH", fp_read(4)))
# Rewind the file
fp.seek(fp_tell() - 4)
@@ -180,7 +183,8 @@ def data_element_generator(fp, is_implicit_VR, is_little_endian,
# yield DataElement(tag, VR, seq, value_tell,
# is_undefined_length=True)
else:
- raise NotImplementedError("This reader does not handle undefined length except for SQ")
+ raise NotImplementedError("This reader does not handle "
+ "undefined length except for SQ")
from pydicom.fileio.fileutil import read_undefined_length_value
delimiter = SequenceDelimiterTag
| Circle CI crashes when encounters html-noplot rule
see [here](https://circleci.com/gh/pydicom/pydicom/58?utm_campaign=vcs-integration-link&utm_medium=referral&utm_source=github-build-link) | pydicom/pydicom | diff --git a/tests/test_filereader.py b/tests/test_filereader.py
index 7a07748c5..57d634ddf 100644
--- a/tests/test_filereader.py
+++ b/tests/test_filereader.py
@@ -14,47 +14,24 @@ import shutil
import sys
import tempfile
import unittest
-from pydicom.util.testing.warncheck import assertWarns
-
-try:
- unittest.skipUnless
-except AttributeError:
- try:
- import unittest2 as unittest
- except ImportError:
- print("unittest2 is required for testing in python2.6")
# os.stat is only available on Unix and Windows XXX Mac?
# Not sure if on other platforms the import fails, or the call to it??
-stat_available = True
try:
from os import stat # NOQA
-except:
- stat_available = False
+except ImportError:
+ stat = None
-have_numpy = True
try:
import numpy # NOQA
-except:
- have_numpy = False
-
-from pydicom.dataset import Dataset, FileDataset
-from pydicom.dataelem import DataElement
-from pydicom.filebase import DicomBytesIO
-from pydicom.filereader import read_file, data_element_generator
-from pydicom.errors import InvalidDicomError
-from pydicom.dataset import PropertyError
-from pydicom.tag import Tag, TupleTag
-from pydicom.uid import ImplicitVRLittleEndian
-import pydicom.valuerep
+except ImportError:
+ numpy = None
-have_jpeg_ls = True
try:
import jpeg_ls
except ImportError:
- have_jpeg_ls = False
+ jpeg_ls = None
-have_pillow = True
try:
from PIL import Image as PILImg
except ImportError:
@@ -63,13 +40,26 @@ except ImportError:
import Image as PILImg
except ImportError:
# Neither worked, so it's likely not installed.
- have_pillow = False
+ PILImg = None
+
+from pydicom.dataset import Dataset, FileDataset
+from pydicom.dataelem import DataElement
+from pydicom.filereader import read_file
+from pydicom.errors import InvalidDicomError
+from pydicom.tag import Tag, TupleTag
+from pydicom.uid import ImplicitVRLittleEndian
+from pydicom.util.testing.warncheck import assertWarns
+import pydicom.valuerep
+have_numpy = numpy is not None
+have_jpeg_ls = jpeg_ls is not None
+have_pillow = PILImg is not None
test_dir = os.path.dirname(__file__)
test_files = os.path.join(test_dir, 'test_files')
-empty_number_tags_name = os.path.join(test_files, "reportsi_with_empty_number_tags.dcm")
+empty_number_tags_name = os.path.join(test_files,
+ "reportsi_with_empty_number_tags.dcm")
rtplan_name = os.path.join(test_files, "rtplan.dcm")
rtdose_name = os.path.join(test_files, "rtdose.dcm")
ct_name = os.path.join(test_files, "CT_small.dcm")
@@ -77,14 +67,16 @@ mr_name = os.path.join(test_files, "MR_small.dcm")
truncated_mr_name = os.path.join(test_files, "MR_truncated.dcm")
jpeg2000_name = os.path.join(test_files, "JPEG2000.dcm")
jpeg2000_lossless_name = os.path.join(test_files, "MR_small_jp2klossless.dcm")
-jpeg_ls_lossless_name = os.path.join(test_files, "MR_small_jpeg_ls_lossless.dcm")
+jpeg_ls_lossless_name = os.path.join(test_files,
+ "MR_small_jpeg_ls_lossless.dcm")
jpeg_lossy_name = os.path.join(test_files, "JPEG-lossy.dcm")
jpeg_lossless_name = os.path.join(test_files, "JPEG-LL.dcm")
deflate_name = os.path.join(test_files, "image_dfl.dcm")
rtstruct_name = os.path.join(test_files, "rtstruct.dcm")
priv_SQ_name = os.path.join(test_files, "priv_SQ.dcm")
nested_priv_SQ_name = os.path.join(test_files, "nested_priv_SQ.dcm")
-meta_missing_tsyntax_name = os.path.join(test_files, "meta_missing_tsyntax.dcm")
+meta_missing_tsyntax_name = os.path.join(test_files,
+ "meta_missing_tsyntax.dcm")
no_meta_group_length = os.path.join(test_files, "no_meta_group_length.dcm")
gzip_name = os.path.join(test_files, "zipMR.gz")
color_px_name = os.path.join(test_files, "color-px.dcm")
@@ -93,8 +85,10 @@ explicit_vr_le_no_meta = os.path.join(test_files, "ExplVR_LitEndNoMeta.dcm")
explicit_vr_be_no_meta = os.path.join(test_files, "ExplVR_BigEndNoMeta.dcm")
emri_name = os.path.join(test_files, "emri_small.dcm")
emri_big_endian_name = os.path.join(test_files, "emri_small_big_endian.dcm")
-emri_jpeg_ls_lossless = os.path.join(test_files, "emri_small_jpeg_ls_lossless.dcm")
-emri_jpeg_2k_lossless = os.path.join(test_files, "emri_small_jpeg_2k_lossless.dcm")
+emri_jpeg_ls_lossless = os.path.join(test_files,
+ "emri_small_jpeg_ls_lossless.dcm")
+emri_jpeg_2k_lossless = os.path.join(test_files,
+ "emri_small_jpeg_2k_lossless.dcm")
color_3d_jpeg_baseline = os.path.join(test_files, "color3d_jpeg_baseline.dcm")
dir_name = os.path.dirname(sys.argv[0])
save_dir = os.getcwd()
@@ -104,7 +98,7 @@ def isClose(a, b, epsilon=0.000001):
"""Compare within some tolerance, to avoid machine roundoff differences"""
try:
a.append # see if is a list
- except: # (is not)
+ except BaseException: # (is not)
return abs(a - b) < epsilon
else:
if len(a) != len(b):
@@ -117,7 +111,8 @@ def isClose(a, b, epsilon=0.000001):
class ReaderTests(unittest.TestCase):
def testEmptyNumbersTag(self):
- """Tests that an empty tag with a number VR (FL, UL, SL, US, SS, FL, FD, OF) reads as an empty string"""
+ """Tests that an empty tag with a number VR (FL, UL, SL, US,
+ SS, FL, FD, OF) reads as an empty string"""
empty_number_tags_ds = read_file(empty_number_tags_name)
self.assertEqual(empty_number_tags_ds.ExaminedBodyThickness, '')
self.assertEqual(empty_number_tags_ds.SimpleFrameList, '')
@@ -135,56 +130,72 @@ class ReaderTests(unittest.TestCase):
self.assertTrue(ds is not None)
def testRTPlan(self):
- """Returns correct values for sample data elements in test RT Plan file"""
+ """Returns correct values for sample data elements in test
+ RT Plan file.
+ """
plan = read_file(rtplan_name)
beam = plan.BeamSequence[0]
- cp0, cp1 = beam.ControlPointSequence # if not two controlpoints, then this would raise exception
+ # if not two controlpoints, then this would raise exception
+ cp0, cp1 = beam.ControlPointSequence
- self.assertEqual(beam.TreatmentMachineName, "unit001", "Incorrect unit name")
+ self.assertEqual(beam.TreatmentMachineName, "unit001",
+ "Incorrect unit name")
self.assertEqual(beam.TreatmentMachineName, beam[0x300a, 0x00b2].value,
- "beam TreatmentMachineName does not match the value accessed by tag number")
+ "beam TreatmentMachineName does not match "
+ "the value accessed by tag number")
- got = cp1.ReferencedDoseReferenceSequence[0].CumulativeDoseReferenceCoefficient
+ got = cp1.ReferencedDoseReferenceSequence[
+ 0].CumulativeDoseReferenceCoefficient
DS = pydicom.valuerep.DS
expected = DS('0.9990268')
self.assertTrue(got == expected,
- "Cum Dose Ref Coeff not the expected value (CP1, Ref'd Dose Ref")
+ "Cum Dose Ref Coeff not the expected value "
+ "(CP1, Ref'd Dose Ref")
got = cp0.BeamLimitingDevicePositionSequence[0].LeafJawPositions
self.assertTrue(got[0] == DS('-100') and got[1] == DS('100.0'),
"X jaws not as expected (control point 0)")
def testRTDose(self):
- """Returns correct values for sample data elements in test RT Dose file"""
+ """Returns correct values for sample data elements in test
+ RT Dose file"""
dose = read_file(rtdose_name)
self.assertEqual(dose.FrameIncrementPointer, Tag((0x3004, 0x000c)),
"Frame Increment Pointer not the expected value")
self.assertEqual(dose.FrameIncrementPointer, dose[0x28, 9].value,
- "FrameIncrementPointer does not match the value accessed by tag number")
+ "FrameIncrementPointer does not match the value "
+ "accessed by tag number")
- # try a value that is nested the deepest (so deep I break it into two steps!)
- fract = dose.ReferencedRTPlanSequence[0].ReferencedFractionGroupSequence[0]
+ # try a value that is nested the deepest
+ # (so deep I break it into two steps!)
+ fract = \
+ dose.ReferencedRTPlanSequence[0].ReferencedFractionGroupSequence[0]
beamnum = fract.ReferencedBeamSequence[0].ReferencedBeamNumber
self.assertEqual(beamnum, 1, "Beam number not the expected value")
def testCT(self):
- """Returns correct values for sample data elements in test CT file...."""
+ """Returns correct values for sample data elements in test CT file."""
ct = read_file(ct_name)
- self.assertEqual(ct.file_meta.ImplementationClassUID, '1.3.6.1.4.1.5962.2',
+ self.assertEqual(ct.file_meta.ImplementationClassUID,
+ '1.3.6.1.4.1.5962.2',
"ImplementationClassUID not the expected value")
self.assertEqual(ct.file_meta.ImplementationClassUID,
ct.file_meta[0x2, 0x12].value,
- "ImplementationClassUID does not match the value accessed by tag number")
- # (0020, 0032) Image Position (Patient) [-158.13580300000001, -179.035797, -75.699996999999996]
+ "ImplementationClassUID does not match the value "
+ "accessed by tag number")
+ # (0020, 0032) Image Position (Patient)
+ # [-158.13580300000001, -179.035797, -75.699996999999996]
got = ct.ImagePositionPatient
DS = pydicom.valuerep.DS
expected = [DS('-158.135803'), DS('-179.035797'), DS('-75.699997')]
- self.assertTrue(got == expected, "ImagePosition(Patient) values not as expected."
+ self.assertTrue(got == expected,
+ "ImagePosition(Patient) values not as expected."
"got {0}, expected {1}".format(got, expected))
self.assertEqual(ct.Rows, 128, "Rows not 128")
self.assertEqual(ct.Columns, 128, "Columns not 128")
self.assertEqual(ct.BitsStored, 16, "Bits Stored not 16")
- self.assertEqual(len(ct.PixelData), 128 * 128 * 2, "Pixel data not expected length")
+ self.assertEqual(len(ct.PixelData), 128 * 128 * 2,
+ "Pixel data not expected length")
# Also test private elements name can be resolved:
expected = "[Duration of X-ray on]"
@@ -194,20 +205,24 @@ class ReaderTests(unittest.TestCase):
@unittest.skipUnless(have_numpy, "Numpy not installed")
def testCTPixelData(self):
- """Check that we can read pixel data. Tests that we get last one in array."""
+ """Check that we can read pixel data.
+ Tests that we get last one in array.
+ """
ct = read_file(ct_name)
expected = 909
got = ct.pixel_array[-1][-1]
- msg = "Did not get correct value for last pixel: expected %d, got %r" % (expected, got)
+ msg = ("Did not get correct value for last pixel: "
+ "expected %d, got %r" % (expected, got))
self.assertEqual(expected, got, msg)
def testNoForce(self):
- """Raises exception if missing DICOM header and force==False..........."""
+ """Raises exception if missing DICOM header and force==False."""
self.assertRaises(InvalidDicomError, read_file, rtstruct_name)
def testRTstruct(self):
- """Returns correct values for sample elements in test RTSTRUCT file...."""
- # RTSTRUCT test file has complex nested sequences -- see rtstruct.dump file
+ """Returns correct values for sample elements in test RTSTRUCT file."""
+ # RTSTRUCT test file has complex nested sequences
+ # -- see rtstruct.dump file
# Also has no DICOM header ... so tests 'force' argument of read_file
rtss = read_file(rtstruct_name, force=True)
@@ -230,7 +245,8 @@ class ReaderTests(unittest.TestCase):
self.assertEqual(expected, got, msg)
def testDir(self):
- """Returns correct dir attributes for both Dataset and DICOM names (python >= 2.6).."""
+ """Returns correct dir attributes for both Dataset and DICOM names
+ (python >= 2.6).."""
# Only python >= 2.6 calls __dir__ for dir() call
rtss = read_file(rtstruct_name, force=True)
# sample some expected 'dir' values
@@ -238,7 +254,8 @@ class ReaderTests(unittest.TestCase):
expect_in_dir = ['pixel_array', 'add_new', 'ROIContourSequence',
'StructureSetDate']
for name in expect_in_dir:
- self.assertTrue(name in got_dir, "Expected name '%s' in dir()" % name)
+ self.assertTrue(name in got_dir,
+ "Expected name '%s' in dir()" % name)
# Now check for some items in dir() of a nested item
roi0 = rtss.ROIContourSequence[0]
@@ -246,52 +263,65 @@ class ReaderTests(unittest.TestCase):
expect_in_dir = ['pixel_array', 'add_new', 'ReferencedROINumber',
'ROIDisplayColor']
for name in expect_in_dir:
- self.assertTrue(name in got_dir, "Expected name '%s' in dir()" % name)
+ self.assertTrue(name in got_dir,
+ "Expected name '%s' in dir()" % name)
def testMR(self):
- """Returns correct values for sample data elements in test MR file....."""
+ """Returns correct values for sample data elements in test MR file."""
mr = read_file(mr_name)
# (0010, 0010) Patient's Name 'CompressedSamples^MR1'
mr.decode()
- self.assertEqual(mr.PatientName, 'CompressedSamples^MR1', "Wrong patient name")
+ self.assertEqual(mr.PatientName, 'CompressedSamples^MR1',
+ "Wrong patient name")
self.assertEqual(mr.PatientName, mr[0x10, 0x10].value,
- "Name does not match value found when accessed by tag number")
+ "Name does not match value found when "
+ "accessed by tag number")
got = mr.PixelSpacing
DS = pydicom.valuerep.DS
expected = [DS('0.3125'), DS('0.3125')]
self.assertTrue(got == expected, "Wrong pixel spacing")
def testDeflate(self):
- """Returns correct values for sample data elements in test compressed (zlib deflate) file"""
- # Everything after group 2 is compressed. If we can read anything else, the decompression must have been ok.
+ """Returns correct values for sample data elements in test compressed
+ (zlib deflate) file
+ """
+ # Everything after group 2 is compressed.
+ # If we can read anything else, the decompression must have been ok.
ds = read_file(deflate_name)
got = ds.ConversionType
expected = "WSD"
- self.assertEqual(got, expected, "Attempted to read deflated file data element Conversion Type, expected '%s', got '%s'" % (expected, got))
+ self.assertEqual(got, expected,
+ "Attempted to read deflated file data element "
+ "Conversion Type, expected '%s', got '%s'" % (
+ expected, got))
def testNoPixelsRead(self):
- """Returns all data elements before pixels using stop_before_pixels=False"""
+ """Returns all data elements before pixels using
+ stop_before_pixels=False.
+ """
# Just check the tags, and a couple of values
ctpartial = read_file(ct_name, stop_before_pixels=True)
ctpartial_tags = sorted(ctpartial.keys())
ctfull = read_file(ct_name)
ctfull_tags = sorted(ctfull.keys())
- msg = "Tag list of partial CT read (except pixel tag and padding) did not match full read"
+ msg = ("Tag list of partial CT read (except pixel tag and padding) "
+ "did not match full read")
msg += "\nExpected: %r\nGot %r" % (ctfull_tags[:-2], ctpartial_tags)
missing = [Tag(0x7fe0, 0x10), Tag(0xfffc, 0xfffc)]
self.assertEqual(ctfull_tags, ctpartial_tags + missing, msg)
def testPrivateSQ(self):
- """Can read private undefined length SQ without error...................."""
- # From issues 91, 97, 98. Bug introduced by fast reading, due to VR=None
- # in raw data elements, then an undefined length private item VR is looked up,
- # and there is no such tag, generating an exception
+ """Can read private undefined length SQ without error."""
+ # From issues 91, 97, 98. Bug introduced by fast reading, due to
+ # VR=None in raw data elements, then an undefined length private
+ # item VR is looked up, and there is no such tag,
+ # generating an exception
# Simply read the file, in 0.9.5 this generated an exception
read_file(priv_SQ_name)
def testNestedPrivateSQ(self):
- """Can successfully read a private SQ which contains additional SQ's....."""
+ """Can successfully read a private SQ which contains additional SQs."""
# From issue 113. When a private SQ of undefined length is used, the
# sequence is read in and the length of the SQ is determined upon
# identification of the SQ termination sequence. When using nested
@@ -303,7 +333,8 @@ class ReaderTests(unittest.TestCase):
# Make sure that the entire dataset was read in
pixel_data_tag = TupleTag((0x7fe0, 0x10))
self.assertTrue(pixel_data_tag in ds,
- "Entire dataset was not parsed properly. PixelData is not present")
+ "Entire dataset was not parsed properly. "
+ "PixelData is not present")
# Check that the DataElement is indeed a Sequence
tag = TupleTag((0x01, 0x01))
@@ -328,44 +359,54 @@ class ReaderTests(unittest.TestCase):
"Expected a value of %s, got %s'" % (expected, got))
def testNoMetaGroupLength(self):
- """Read file with no group length in file meta..........................."""
+ """Read file with no group length in file meta."""
# Issue 108 -- iView example file with no group length (0002,0002)
# Originally crashed, now check no exception, but also check one item
# in file_meta, and second one in followinsg dataset
ds = read_file(no_meta_group_length)
got = ds.InstanceCreationDate
expected = "20111130"
- self.assertEqual(got, expected, "Sample data element after file meta with no group length failed, expected '%s', got '%s'" % (expected, got))
+ self.assertEqual(got, expected,
+ "Sample data element after file meta with no "
+ "group length failed, expected '%s', got '%s'" % (
+ expected, got))
def testNoTransferSyntaxInMeta(self):
- """Read file with file_meta, but has no TransferSyntaxUID in it............"""
+ """Read file with file_meta, but has no TransferSyntaxUID in it."""
# From issue 258: if file has file_meta but no TransferSyntaxUID in it,
# should assume default transfer syntax
- ds = read_file(meta_missing_tsyntax_name) # is dicom default transfer syntax
+ ds = read_file(
+ meta_missing_tsyntax_name) # is dicom default transfer syntax
# Repeat one test from nested private sequence test to maker sure
# file was read correctly
pixel_data_tag = TupleTag((0x7fe0, 0x10))
self.assertTrue(pixel_data_tag in ds,
- "Failed to properly read a file with no Transfer Syntax in file_meta")
+ "Failed to properly read a file with no "
+ "Transfer Syntax in file_meta")
def testExplicitVRLittleEndianNoMeta(self):
- """Read file without file meta with Little Endian Explicit VR dataset...."""
+ """Read file without file meta with Little Endian Explicit VR dataset.
+ """
# Example file from CMS XiO 5.0 and above
# Still need to force read data since there is no 'DICM' marker present
ds = read_file(explicit_vr_le_no_meta, force=True)
got = ds.InstanceCreationDate
expected = "20150529"
- self.assertEqual(got, expected, "Sample data element from dataset failed, expected '%s', got '%s'" % (expected, got))
+ self.assertEqual(got, expected,
+ "Sample data element from dataset failed, "
+ "expected '%s', got '%s'" % (expected, got))
def testExplicitVRBigEndianNoMeta(self):
- """Read file without file meta with Big Endian Explicit VR dataset......."""
+ """Read file without file meta with Big Endian Explicit VR dataset."""
# Example file from CMS XiO 5.0 and above
# Still need to force read data since there is no 'DICM' marker present
ds = read_file(explicit_vr_be_no_meta, force=True)
got = ds.InstanceCreationDate
expected = "20150529"
- self.assertEqual(got, expected, "Sample data element from dataset failed, expected '%s', got '%s'" % (expected, got))
+ self.assertEqual(got, expected,
+ "Sample data element from dataset failed, "
+ "expected '%s', got '%s'" % (expected, got))
def testPlanarConfig(self):
px_data_ds = read_file(color_px_name)
@@ -407,7 +448,8 @@ class ReaderTests(unittest.TestCase):
self.assertEqual(ds[0x7fe00010].VR, 'OB')
def test_long_specific_char_set(self):
- """Test that specific character set is read even if it is longer than defer_size"""
+ """Test that specific character set is read even if it is longer
+ than defer_size"""
ds = Dataset()
long_specific_char_set_value = ['ISO 2022IR 100'] * 9
@@ -422,52 +464,62 @@ class ReaderTests(unittest.TestCase):
def test_no_preamble_file_meta_dataset(self):
"""Test correct read of group 2 elements with no preamble."""
- bytestream = b'\x02\x00\x02\x00\x55\x49\x16\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31' \
- b'\x30\x30\x30\x38\x2e\x35\x2e\x31\x2e\x31\x2e\x39\x00\x02\x00\x10\x00' \
- b'\x55\x49\x12\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30\x38' \
- b'\x2e\x31\x2e\x32\x00\x20\x20\x10\x00\x02\x00\x00\x00\x01\x00\x20\x20' \
- b'\x20\x00\x06\x00\x00\x00\x4e\x4f\x52\x4d\x41\x4c'
+ bytestream = (b'\x02\x00\x02\x00\x55\x49\x16\x00\x31\x2e\x32\x2e'
+ b'\x38\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x35\x2e'
+ b'\x31\x2e\x31\x2e\x39\x00\x02\x00\x10\x00\x55\x49'
+ b'\x12\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30'
+ b'\x30\x30\x38\x2e\x31\x2e\x32\x00\x20\x20\x10\x00'
+ b'\x02\x00\x00\x00\x01\x00\x20\x20\x20\x00\x06\x00'
+ b'\x00\x00\x4e\x4f\x52\x4d\x41\x4c')
fp = BytesIO(bytestream)
ds = read_file(fp, force=True)
self.assertTrue('MediaStorageSOPClassUID' in ds.file_meta)
- self.assertEqual(ds.file_meta.TransferSyntaxUID, ImplicitVRLittleEndian)
+ self.assertEqual(ds.file_meta.TransferSyntaxUID,
+ ImplicitVRLittleEndian)
self.assertEqual(ds.Polarity, 'NORMAL')
self.assertEqual(ds.ImageBoxPosition, 1)
def test_no_preamble_command_group_dataset(self):
"""Test correct read of group 0 and 2 elements with no preamble."""
- bytestream = b'\x02\x00\x02\x00\x55\x49\x16\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31' \
- b'\x30\x30\x30\x38\x2e\x35\x2e\x31\x2e\x31\x2e\x39\x00\x02\x00\x10\x00' \
- b'\x55\x49\x12\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30\x38' \
- b'\x2e\x31\x2e\x32\x00' \
- b'\x20\x20\x10\x00\x02\x00\x00\x00\x01\x00\x20\x20' \
- b'\x20\x00\x06\x00\x00\x00\x4e\x4f\x52\x4d\x41\x4c' \
- b'\x00\x00\x10\x01\x02\x00\x00\x00\x03\x00'
+ bytestream = (b'\x02\x00\x02\x00\x55\x49\x16\x00\x31\x2e\x32\x2e'
+ b'\x38\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x35\x2e'
+ b'\x31\x2e\x31\x2e\x39\x00\x02\x00\x10\x00\x55\x49'
+ b'\x12\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30'
+ b'\x30\x30\x38\x2e\x31\x2e\x32\x00'
+ b'\x20\x20\x10\x00\x02\x00\x00\x00\x01\x00\x20\x20'
+ b'\x20\x00\x06\x00\x00\x00\x4e\x4f\x52\x4d\x41\x4c'
+ b'\x00\x00\x10\x01\x02\x00\x00\x00\x03\x00')
fp = BytesIO(bytestream)
ds = read_file(fp, force=True)
self.assertTrue('MediaStorageSOPClassUID' in ds.file_meta)
- self.assertEqual(ds.file_meta.TransferSyntaxUID, ImplicitVRLittleEndian)
+ self.assertEqual(ds.file_meta.TransferSyntaxUID,
+ ImplicitVRLittleEndian)
self.assertEqual(ds.Polarity, 'NORMAL')
self.assertEqual(ds.ImageBoxPosition, 1)
self.assertEqual(ds.MessageID, 3)
def test_group_length_wrong(self):
- """Test file is read correctly even if FileMetaInformationGroupLength is incorrect."""
- bytestream = b'\x02\x00\x00\x00\x55\x4C\x04\x00\x0A\x00\x00\x00' \
- b'\x02\x00\x02\x00\x55\x49\x16\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31' \
- b'\x30\x30\x30\x38\x2e\x35\x2e\x31\x2e\x31\x2e\x39\x00\x02\x00\x10\x00' \
- b'\x55\x49\x12\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30\x38' \
- b'\x2e\x31\x2e\x32\x00' \
- b'\x20\x20\x10\x00\x02\x00\x00\x00\x01\x00\x20\x20' \
- b'\x20\x00\x06\x00\x00\x00\x4e\x4f\x52\x4d\x41\x4c'
+ """Test file is read correctly even if FileMetaInformationGroupLength
+ is incorrect.
+ """
+ bytestream = (b'\x02\x00\x00\x00\x55\x4C\x04\x00\x0A\x00\x00\x00'
+ b'\x02\x00\x02\x00\x55\x49\x16\x00\x31\x2e\x32\x2e'
+ b'\x38\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x35\x2e'
+ b'\x31\x2e\x31\x2e\x39\x00\x02\x00\x10\x00\x55\x49'
+ b'\x12\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30'
+ b'\x30\x30\x38\x2e\x31\x2e\x32\x00'
+ b'\x20\x20\x10\x00\x02\x00\x00\x00\x01\x00\x20\x20'
+ b'\x20\x00\x06\x00\x00\x00\x4e\x4f\x52\x4d\x41\x4c')
fp = BytesIO(bytestream)
ds = read_file(fp, force=True)
- self.assertFalse(len(bytestream) - 12 == ds.file_meta.FileMetaInformationGroupLength)
+ self.assertFalse(len(
+ bytestream) - 12 == ds.file_meta.FileMetaInformationGroupLength)
self.assertTrue(ds.file_meta.FileMetaInformationGroupLength == 10)
self.assertTrue('MediaStorageSOPClassUID' in ds.file_meta)
- self.assertEqual(ds.file_meta.TransferSyntaxUID, ImplicitVRLittleEndian)
+ self.assertEqual(ds.file_meta.TransferSyntaxUID,
+ ImplicitVRLittleEndian)
self.assertEqual(ds.Polarity, 'NORMAL')
self.assertEqual(ds.ImageBoxPosition, 1)
@@ -475,18 +527,19 @@ class ReaderTests(unittest.TestCase):
"""Test reading only preamble, command and meta elements"""
preamble = b'\x00' * 128
prefix = b'DICM'
- command = b'\x00\x00\x00\x00\x04\x00\x00\x00\x38' \
- b'\x00\x00\x00\x00\x00\x02\x00\x12\x00\x00' \
- b'\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31' \
- b'\x30\x30\x30\x38\x2e\x31\x2e\x31\x00\x00' \
- b'\x00\x00\x01\x02\x00\x00\x00\x30\x00\x00' \
- b'\x00\x10\x01\x02\x00\x00\x00\x07\x00\x00' \
- b'\x00\x00\x08\x02\x00\x00\x00\x01\x01'
- meta = b'\x02\x00\x00\x00\x55\x4C\x04\x00\x0A\x00\x00\x00' \
- b'\x02\x00\x02\x00\x55\x49\x16\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31' \
- b'\x30\x30\x30\x38\x2e\x35\x2e\x31\x2e\x31\x2e\x39\x00\x02\x00\x10\x00' \
- b'\x55\x49\x12\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30\x38' \
- b'\x2e\x31\x2e\x32\x00'
+ command = (b'\x00\x00\x00\x00\x04\x00\x00\x00\x38'
+ b'\x00\x00\x00\x00\x00\x02\x00\x12\x00\x00'
+ b'\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31'
+ b'\x30\x30\x30\x38\x2e\x31\x2e\x31\x00\x00'
+ b'\x00\x00\x01\x02\x00\x00\x00\x30\x00\x00'
+ b'\x00\x10\x01\x02\x00\x00\x00\x07\x00\x00'
+ b'\x00\x00\x08\x02\x00\x00\x00\x01\x01')
+ meta = (b'\x02\x00\x00\x00\x55\x4C\x04\x00\x0A\x00\x00\x00'
+ b'\x02\x00\x02\x00\x55\x49\x16\x00\x31\x2e\x32\x2e'
+ b'\x38\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x35\x2e'
+ b'\x31\x2e\x31\x2e\x39\x00\x02\x00\x10\x00\x55\x49'
+ b'\x12\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30'
+ b'\x30\x30\x38\x2e\x31\x2e\x32\x00')
bytestream = preamble + prefix + meta + command
fp = BytesIO(bytestream)
@@ -498,11 +551,12 @@ class ReaderTests(unittest.TestCase):
"""Test reading only preamble and meta elements"""
preamble = b'\x00' * 128
prefix = b'DICM'
- meta = b'\x02\x00\x00\x00\x55\x4C\x04\x00\x0A\x00\x00\x00' \
- b'\x02\x00\x02\x00\x55\x49\x16\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31' \
- b'\x30\x30\x30\x38\x2e\x35\x2e\x31\x2e\x31\x2e\x39\x00\x02\x00\x10\x00' \
- b'\x55\x49\x12\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30\x38' \
- b'\x2e\x31\x2e\x32\x00'
+ meta = (b'\x02\x00\x00\x00\x55\x4C\x04\x00\x0A\x00\x00\x00'
+ b'\x02\x00\x02\x00\x55\x49\x16\x00\x31\x2e\x32\x2e'
+ b'\x38\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x35\x2e'
+ b'\x31\x2e\x31\x2e\x39\x00\x02\x00\x10\x00\x55\x49'
+ b'\x12\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30'
+ b'\x30\x30\x38\x2e\x31\x2e\x32\x00')
bytestream = preamble + prefix + meta
fp = BytesIO(bytestream)
@@ -515,13 +569,13 @@ class ReaderTests(unittest.TestCase):
"""Test reading only preamble and command set"""
preamble = b'\x00' * 128
prefix = b'DICM'
- command = b'\x00\x00\x00\x00\x04\x00\x00\x00\x38' \
- b'\x00\x00\x00\x00\x00\x02\x00\x12\x00\x00' \
- b'\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31' \
- b'\x30\x30\x30\x38\x2e\x31\x2e\x31\x00\x00' \
- b'\x00\x00\x01\x02\x00\x00\x00\x30\x00\x00' \
- b'\x00\x10\x01\x02\x00\x00\x00\x07\x00\x00' \
- b'\x00\x00\x08\x02\x00\x00\x00\x01\x01'
+ command = (b'\x00\x00\x00\x00\x04\x00\x00\x00\x38'
+ b'\x00\x00\x00\x00\x00\x02\x00\x12\x00\x00'
+ b'\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31'
+ b'\x30\x30\x30\x38\x2e\x31\x2e\x31\x00\x00'
+ b'\x00\x00\x01\x02\x00\x00\x00\x30\x00\x00'
+ b'\x00\x10\x01\x02\x00\x00\x00\x07\x00\x00'
+ b'\x00\x00\x08\x02\x00\x00\x00\x01\x01')
bytestream = preamble + prefix + command
fp = BytesIO(bytestream)
@@ -531,11 +585,12 @@ class ReaderTests(unittest.TestCase):
def test_meta_no_dataset(self):
"""Test reading only meta elements"""
- bytestream = b'\x02\x00\x00\x00\x55\x4C\x04\x00\x0A\x00\x00\x00' \
- b'\x02\x00\x02\x00\x55\x49\x16\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31' \
- b'\x30\x30\x30\x38\x2e\x35\x2e\x31\x2e\x31\x2e\x39\x00\x02\x00\x10\x00' \
- b'\x55\x49\x12\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30\x38' \
- b'\x2e\x31\x2e\x32\x00'
+ bytestream = (b'\x02\x00\x00\x00\x55\x4C\x04\x00\x0A\x00\x00\x00'
+ b'\x02\x00\x02\x00\x55\x49\x16\x00\x31\x2e\x32\x2e'
+ b'\x38\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x35\x2e'
+ b'\x31\x2e\x31\x2e\x39\x00\x02\x00\x10\x00\x55\x49'
+ b'\x12\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30'
+ b'\x30\x30\x38\x2e\x31\x2e\x32\x00')
fp = BytesIO(bytestream)
ds = read_file(fp, force=True)
self.assertTrue('TransferSyntaxUID' in ds.file_meta)
@@ -543,13 +598,13 @@ class ReaderTests(unittest.TestCase):
def test_commandset_no_dataset(self):
"""Test reading only command set elements"""
- bytestream = b'\x00\x00\x00\x00\x04\x00\x00\x00\x38' \
- b'\x00\x00\x00\x00\x00\x02\x00\x12\x00\x00' \
- b'\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31' \
- b'\x30\x30\x30\x38\x2e\x31\x2e\x31\x00\x00' \
- b'\x00\x00\x01\x02\x00\x00\x00\x30\x00\x00' \
- b'\x00\x10\x01\x02\x00\x00\x00\x07\x00\x00' \
- b'\x00\x00\x08\x02\x00\x00\x00\x01\x01'
+ bytestream = (b'\x00\x00\x00\x00\x04\x00\x00\x00\x38'
+ b'\x00\x00\x00\x00\x00\x02\x00\x12\x00\x00'
+ b'\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31'
+ b'\x30\x30\x30\x38\x2e\x31\x2e\x31\x00\x00'
+ b'\x00\x00\x01\x02\x00\x00\x00\x30\x00\x00'
+ b'\x00\x10\x01\x02\x00\x00\x00\x07\x00\x00'
+ b'\x00\x00\x08\x02\x00\x00\x00\x01\x01')
fp = BytesIO(bytestream)
ds = read_file(fp, force=True)
self.assertTrue('MessageID' in ds)
@@ -569,23 +624,24 @@ class ReaderTests(unittest.TestCase):
class ReadDataElementTests(unittest.TestCase):
def setUp(self):
ds = Dataset()
- ds.DoubleFloatPixelData = b'\x00\x01\x02\x03\x04\x05\x06\x07' \
- b'\x01\x01\x02\x03\x04\x05\x06\x07' # VR of OD
- ds.SelectorOLValue = b'\x00\x01\x02\x03\x04\x05\x06\x07' \
- b'\x01\x01\x02\x03' # VR of OL
- ds.PotentialReasonsForProcedure = ['A', 'B', 'C'] # VR of UC, odd length
- ds.StrainDescription = 'Test' # Even length
- ds.URNCodeValue = 'http://test.com' # VR of UR
- ds.RetrieveURL = 'ftp://test.com ' # Test trailing spaces ignored
- ds.DestinationAE = ' TEST 12 ' # 16 characters max for AE
-
- self.fp = BytesIO() # Implicit little
+ ds.DoubleFloatPixelData = (b'\x00\x01\x02\x03\x04\x05\x06\x07'
+ b'\x01\x01\x02\x03\x04\x05\x06\x07') # OD
+ ds.SelectorOLValue = (b'\x00\x01\x02\x03\x04\x05\x06\x07'
+ b'\x01\x01\x02\x03') # VR of OL
+ ds.PotentialReasonsForProcedure = ['A', 'B',
+ 'C'] # VR of UC, odd length
+ ds.StrainDescription = 'Test' # Even length
+ ds.URNCodeValue = 'http://test.com' # VR of UR
+ ds.RetrieveURL = 'ftp://test.com ' # Test trailing spaces ignored
+ ds.DestinationAE = ' TEST 12 ' # 16 characters max for AE
+
+ self.fp = BytesIO() # Implicit little
file_ds = FileDataset(self.fp, ds)
file_ds.is_implicit_VR = True
file_ds.is_little_endian = True
file_ds.save_as(self.fp)
- self.fp_ex = BytesIO() # Explicit little
+ self.fp_ex = BytesIO() # Explicit little
file_ds = FileDataset(self.fp_ex, ds)
file_ds.is_implicit_VR = False
file_ds.is_little_endian = True
@@ -595,32 +651,36 @@ class ReadDataElementTests(unittest.TestCase):
"""Check creation of OD DataElement from byte data works correctly."""
ds = read_file(self.fp, force=True)
ref_elem = ds.get(0x7fe00009)
- elem = DataElement(0x7fe00009, 'OD', b'\x00\x01\x02\x03\x04\x05\x06\x07' \
- b'\x01\x01\x02\x03\x04\x05\x06\x07')
+ elem = DataElement(0x7fe00009, 'OD',
+ b'\x00\x01\x02\x03\x04\x05\x06\x07'
+ b'\x01\x01\x02\x03\x04\x05\x06\x07')
self.assertEqual(ref_elem, elem)
def test_read_OD_explicit_little(self):
"""Check creation of OD DataElement from byte data works correctly."""
ds = read_file(self.fp_ex, force=True)
ref_elem = ds.get(0x7fe00009)
- elem = DataElement(0x7fe00009, 'OD', b'\x00\x01\x02\x03\x04\x05\x06\x07' \
- b'\x01\x01\x02\x03\x04\x05\x06\x07')
+ elem = DataElement(0x7fe00009, 'OD',
+ b'\x00\x01\x02\x03\x04\x05\x06\x07'
+ b'\x01\x01\x02\x03\x04\x05\x06\x07')
self.assertEqual(ref_elem, elem)
def test_read_OL_implicit_little(self):
"""Check creation of OL DataElement from byte data works correctly."""
ds = read_file(self.fp, force=True)
ref_elem = ds.get(0x00720075)
- elem = DataElement(0x00720075, 'OL', b'\x00\x01\x02\x03\x04\x05\x06\x07' \
- b'\x01\x01\x02\x03')
+ elem = DataElement(0x00720075, 'OL',
+ b'\x00\x01\x02\x03\x04\x05\x06\x07'
+ b'\x01\x01\x02\x03')
self.assertEqual(ref_elem, elem)
def test_read_OL_explicit_little(self):
"""Check creation of OL DataElement from byte data works correctly."""
ds = read_file(self.fp_ex, force=True)
ref_elem = ds.get(0x00720075)
- elem = DataElement(0x00720075, 'OL', b'\x00\x01\x02\x03\x04\x05\x06\x07' \
- b'\x01\x01\x02\x03')
+ elem = DataElement(0x00720075, 'OL',
+ b'\x00\x01\x02\x03\x04\x05\x06\x07'
+ b'\x01\x01\x02\x03')
self.assertEqual(ref_elem, elem)
def test_read_UC_implicit_little(self):
@@ -650,24 +710,24 @@ class ReadDataElementTests(unittest.TestCase):
def test_read_UR_implicit_little(self):
"""Check creation of DataElement from byte data works correctly."""
ds = read_file(self.fp, force=True)
- ref_elem = ds.get(0x00080120) # URNCodeValue
+ ref_elem = ds.get(0x00080120) # URNCodeValue
elem = DataElement(0x00080120, 'UR', 'http://test.com')
self.assertEqual(ref_elem, elem)
# Test trailing spaces ignored
- ref_elem = ds.get(0x00081190) # RetrieveURL
+ ref_elem = ds.get(0x00081190) # RetrieveURL
elem = DataElement(0x00081190, 'UR', 'ftp://test.com')
self.assertEqual(ref_elem, elem)
def test_read_UR_explicit_little(self):
"""Check creation of DataElement from byte data works correctly."""
ds = read_file(self.fp_ex, force=True)
- ref_elem = ds.get(0x00080120) # URNCodeValue
+ ref_elem = ds.get(0x00080120) # URNCodeValue
elem = DataElement(0x00080120, 'UR', 'http://test.com')
self.assertEqual(ref_elem, elem)
# Test trailing spaces ignored
- ref_elem = ds.get(0x00081190) # RetrieveURL
+ ref_elem = ds.get(0x00081190) # RetrieveURL
elem = DataElement(0x00081190, 'UR', 'ftp://test.com')
self.assertEqual(ref_elem, elem)
@@ -690,9 +750,11 @@ class JPEG_LS_Tests(unittest.TestCase):
a = self.jpeg_ls_lossless.pixel_array
b = self.mr_small.pixel_array
self.assertEqual(a.mean(), b.mean(),
- "Decoded pixel data is not all {0} (mean == {1})".format(b.mean(), a.mean()))
+ "Decoded pixel data is not all {0} "
+ "(mean == {1})".format(b.mean(), a.mean()))
else:
- self.assertRaises(NotImplementedError, self.jpeg_ls_lossless._get_pixel_array)
+ self.assertRaises(NotImplementedError,
+ self.jpeg_ls_lossless._get_pixel_array)
def test_emri_JPEG_LS_PixelArray(self):
"""JPEG LS Lossless: Now works"""
@@ -700,9 +762,11 @@ class JPEG_LS_Tests(unittest.TestCase):
a = self.emri_jpeg_ls_lossless.pixel_array
b = self.emri_small.pixel_array
self.assertEqual(a.mean(), b.mean(),
- "Decoded pixel data is not all {0} (mean == {1})".format(b.mean(), a.mean()))
+ "Decoded pixel data is not all {0} "
+ "(mean == {1})".format(b.mean(), a.mean()))
else:
- self.assertRaises(NotImplementedError, self.emri_jpeg_ls_lossless._get_pixel_array)
+ self.assertRaises(NotImplementedError,
+ self.emri_jpeg_ls_lossless._get_pixel_array)
class BigEndian_Tests(unittest.TestCase):
@@ -716,9 +780,11 @@ class BigEndian_Tests(unittest.TestCase):
a = self.emri_big_endian.pixel_array
b = self.emri_small.pixel_array
self.assertEqual(a.mean(), b.mean(),
- "Decoded big endian pixel data is not all {0} (mean == {1})".format(b.mean(), a.mean()))
+ "Decoded big endian pixel data is not all {0} "
+ "(mean == {1})".format(b.mean(), a.mean()))
else:
- self.assertRaises(ImportError, self.emri_big_endian._get_pixel_array)
+ self.assertRaises(ImportError,
+ self.emri_big_endian._get_pixel_array)
class JPEG2000Tests(unittest.TestCase):
@@ -730,14 +796,19 @@ class JPEG2000Tests(unittest.TestCase):
self.emri_small = read_file(emri_name)
def testJPEG2000(self):
- """JPEG2000: Returns correct values for sample data elements............"""
- expected = [Tag(0x0054, 0x0010), Tag(0x0054, 0x0020)] # XX also tests multiple-valued AT data element
+ """JPEG2000: Returns correct values for sample data elements."""
+ # XX also tests multiple-valued AT data element
+ expected = [Tag(0x0054, 0x0010), Tag(0x0054, 0x0020)]
got = self.jpeg.FrameIncrementPointer
- self.assertEqual(got, expected, "JPEG2000 file, Frame Increment Pointer: expected %s, got %s" % (expected, got))
+ self.assertEqual(got, expected,
+ "JPEG2000 file, Frame Increment Pointer: "
+ "expected %s, got %s" % (expected, got))
got = self.jpeg.DerivationCodeSequence[0].CodeMeaning
expected = 'Lossy Compression'
- self.assertEqual(got, expected, "JPEG200 file, Code Meaning got %s, expected %s" % (got, expected))
+ self.assertEqual(got, expected,
+ "JPEG200 file, Code Meaning got %s, expected %s" % (
+ got, expected))
def testJPEG2000PixelArray(self):
"""JPEG2000: Now works"""
@@ -745,9 +816,11 @@ class JPEG2000Tests(unittest.TestCase):
a = self.jpegls.pixel_array
b = self.mr_small.pixel_array
self.assertEqual(a.mean(), b.mean(),
- "Decoded pixel data is not all {0} (mean == {1})".format(b.mean(), a.mean()))
+ "Decoded pixel data is not all {0} "
+ "(mean == {1})".format(b.mean(), a.mean()))
else:
- self.assertRaises(NotImplementedError, self.jpegls._get_pixel_array)
+ self.assertRaises(NotImplementedError,
+ self.jpegls._get_pixel_array)
def test_emri_JPEG2000PixelArray(self):
"""JPEG2000: Now works"""
@@ -755,25 +828,28 @@ class JPEG2000Tests(unittest.TestCase):
a = self.emri_jpeg_2k_lossless.pixel_array
b = self.emri_small.pixel_array
self.assertEqual(a.mean(), b.mean(),
- "Decoded pixel data is not all {0} (mean == {1})".format(b.mean(), a.mean()))
+ "Decoded pixel data is not all {0} "
+ "(mean == {1})".format(b.mean(), a.mean()))
else:
- self.assertRaises(NotImplementedError, self.emri_jpeg_2k_lossless._get_pixel_array)
+ self.assertRaises(NotImplementedError,
+ self.emri_jpeg_2k_lossless._get_pixel_array)
class JPEGlossyTests(unittest.TestCase):
-
def setUp(self):
self.jpeg = read_file(jpeg_lossy_name)
self.color_3d_jpeg = read_file(color_3d_jpeg_baseline)
def testJPEGlossy(self):
- """JPEG-lossy: Returns correct values for sample data elements.........."""
+ """JPEG-lossy: Returns correct values for sample data elements."""
got = self.jpeg.DerivationCodeSequence[0].CodeMeaning
expected = 'Lossy Compression'
- self.assertEqual(got, expected, "JPEG-lossy file, Code Meaning got %s, expected %s" % (got, expected))
+ self.assertEqual(got, expected,
+ "JPEG-lossy file, Code Meaning got %s, "
+ "expected %s" % (got, expected))
def testJPEGlossyPixelArray(self):
- """JPEG-lossy: Fails gracefully when uncompressed data is asked for....."""
+ """JPEG-lossy: Fails gracefully when uncompressed data is asked for."""
if have_pillow and have_numpy:
self.assertRaises(NotImplementedError, self.jpeg._get_pixel_array)
else:
@@ -787,7 +863,8 @@ class JPEGlossyTests(unittest.TestCase):
self.assertEqual(tuple(a[3, 159, 290, :]), (41, 41, 41))
self.assertEqual(tuple(a[3, 169, 290, :]), (57, 57, 57))
else:
- self.assertRaises(NotImplementedError, self.color_3d_jpeg._get_pixel_array)
+ self.assertRaises(NotImplementedError,
+ self.color_3d_jpeg._get_pixel_array)
class JPEGlosslessTests(unittest.TestCase):
@@ -795,13 +872,17 @@ class JPEGlosslessTests(unittest.TestCase):
self.jpeg = read_file(jpeg_lossless_name)
def testJPEGlossless(self):
- """JPEGlossless: Returns correct values for sample data elements........"""
- got = self.jpeg.SourceImageSequence[0].PurposeOfReferenceCodeSequence[0].CodeMeaning
+ """JPEGlossless: Returns correct values for sample data elements."""
+ got = self.jpeg.SourceImageSequence[0].PurposeOfReferenceCodeSequence[
+ 0].CodeMeaning
expected = 'Uncompressed predecessor'
- self.assertEqual(got, expected, "JPEG-lossless file, Code Meaning got %s, expected %s" % (got, expected))
+ self.assertEqual(got, expected,
+ "JPEG-lossless file, Code Meaning got %s, "
+ "expected %s" % (got, expected))
def testJPEGlosslessPixelArray(self):
- """JPEGlossless: Fails gracefully when uncompressed data is asked for..."""
+ """JPEGlossless: Fails gracefully when uncompressed data is asked for.
+ """
# This test passes if the call raises either an
# ImportError when there is no Pillow module
# Or
@@ -823,6 +904,7 @@ class DeferredReadTests(unittest.TestCase):
"""Test that deferred data element reading (for large size)
works as expected
"""
+
# Copy one of test files and use temporarily, then later remove.
def setUp(self):
self.testfile_name = ct_name + ".tmp"
@@ -830,8 +912,8 @@ class DeferredReadTests(unittest.TestCase):
def testTimeCheck(self):
"""Deferred read warns if file has been modified..........."""
- if stat_available:
- ds = read_file(self.testfile_name, defer_size=2000)
+ if stat is not None:
+ ds = read_file(self.testfile_name, defer_size='2 kB')
from time import sleep
sleep(1)
with open(self.testfile_name, "r+") as f:
@@ -859,7 +941,8 @@ class DeferredReadTests(unittest.TestCase):
ds_defer = read_file(self.testfile_name, defer_size=2000)
for data_elem in ds_norm:
tag = data_elem.tag
- self.assertEqual(data_elem.value, ds_defer[tag].value, "Mismatched value for tag %r" % tag)
+ self.assertEqual(data_elem.value, ds_defer[tag].value,
+ "Mismatched value for tag %r" % tag)
def testZippedDeferred(self):
"""Deferred values from a gzipped file works.............."""
@@ -867,8 +950,8 @@ class DeferredReadTests(unittest.TestCase):
fobj = gzip.open(gzip_name)
ds = read_file(fobj, defer_size=1)
fobj.close()
- # before the fix, this threw an error as file reading was not in right place,
- # it was re-opened as a normal file, not zip file
+ # before the fix, this threw an error as file reading was not in
+ # the right place, it was re-opened as a normal file, not a zip file
ds.InstanceNumber
def tearDown(self):
@@ -880,9 +963,11 @@ class ReadTruncatedFileTests(unittest.TestCase):
def testReadFileWithMissingPixelData(self):
mr = read_file(truncated_mr_name)
mr.decode()
- self.assertEqual(mr.PatientName, 'CompressedSamples^MR1', "Wrong patient name")
+ self.assertEqual(mr.PatientName, 'CompressedSamples^MR1',
+ "Wrong patient name")
self.assertEqual(mr.PatientName, mr[0x10, 0x10].value,
- "Name does not match value found when accessed by tag number")
+ "Name does not match value found when "
+ "accessed by tag number")
got = mr.PixelSpacing
DS = pydicom.valuerep.DS
expected = [DS('0.3125'), DS('0.3125')]
@@ -892,35 +977,48 @@ class ReadTruncatedFileTests(unittest.TestCase):
def testReadFileWithMissingPixelDataArray(self):
mr = read_file(truncated_mr_name)
mr.decode()
- with self.assertRaisesRegexp(AttributeError, "Amount of pixel data.*does not match the expected data"):
+ with self.assertRaisesRegexp(AttributeError,
+ "Amount of pixel data.*does not match "
+ "the expected data"):
mr.pixel_array
class FileLikeTests(unittest.TestCase):
- """Test that can read DICOM files with file-like object rather than filename"""
+ """Test that can read DICOM files with file-like object rather than
+ filename
+ """
+
def testReadFileGivenFileObject(self):
"""filereader: can read using already opened file............"""
f = open(ct_name, 'rb')
ct = read_file(f)
- # Tests here simply repeat testCT -- perhaps should collapse the code together?
+ # Tests here simply repeat testCT -- perhaps should collapse
+ # the code together?
got = ct.ImagePositionPatient
DS = pydicom.valuerep.DS
expected = [DS('-158.135803'), DS('-179.035797'), DS('-75.699997')]
- self.assertTrue(got == expected, "ImagePosition(Patient) values not as expected")
- self.assertEqual(ct.file_meta.ImplementationClassUID, '1.3.6.1.4.1.5962.2',
+ self.assertTrue(got == expected,
+ "ImagePosition(Patient) values not as expected")
+ self.assertEqual(ct.file_meta.ImplementationClassUID,
+ '1.3.6.1.4.1.5962.2',
"ImplementationClassUID not the expected value")
self.assertEqual(ct.file_meta.ImplementationClassUID,
ct.file_meta[0x2, 0x12].value,
- "ImplementationClassUID does not match the value accessed by tag number")
- # (0020, 0032) Image Position (Patient) [-158.13580300000001, -179.035797, -75.699996999999996]
+ "ImplementationClassUID does not match the "
+ "value accessed by tag number")
+ # (0020, 0032) Image Position (Patient)
+ # [-158.13580300000001, -179.035797, -75.699996999999996]
got = ct.ImagePositionPatient
expected = [DS('-158.135803'), DS('-179.035797'), DS('-75.699997')]
- self.assertTrue(got == expected, "ImagePosition(Patient) values not as expected")
+ self.assertTrue(got == expected,
+ "ImagePosition(Patient) values not as expected")
self.assertEqual(ct.Rows, 128, "Rows not 128")
self.assertEqual(ct.Columns, 128, "Columns not 128")
self.assertEqual(ct.BitsStored, 16, "Bits Stored not 16")
- self.assertEqual(len(ct.PixelData), 128 * 128 * 2, "Pixel data not expected length")
- # Should also be able to close the file ourselves without exception raised:
+ self.assertEqual(len(ct.PixelData), 128 * 128 * 2,
+ "Pixel data not expected length")
+ # Should also be able to close the file ourselves without
+ # exception raised:
f.close()
def testReadFileGivenFileLikeObject(self):
@@ -932,13 +1030,17 @@ class FileLikeTests(unittest.TestCase):
got = ct.ImagePositionPatient
DS = pydicom.valuerep.DS
expected = [DS('-158.135803'), DS('-179.035797'), DS('-75.699997')]
- self.assertTrue(got == expected, "ImagePosition(Patient) values not as expected")
- self.assertEqual(len(ct.PixelData), 128 * 128 * 2, "Pixel data not expected length")
- # Should also be able to close the file ourselves without exception raised:
+ self.assertTrue(got == expected,
+ "ImagePosition(Patient) values not as expected")
+ self.assertEqual(len(ct.PixelData), 128 * 128 * 2,
+ "Pixel data not expected length")
+ # Should also be able to close the file ourselves without
+ # exception raised:
file_like.close()
if __name__ == "__main__":
# This is called if run alone, but not if loaded through run_tests.py
- # If not run from the directory where the sample images are, then need to switch there
+ # If not run from the directory where the sample images are, then need
+ # to switch there
unittest.main()
diff --git a/tests/test_misc.py b/tests/test_misc.py
index 2472e99d6..040938495 100644
--- a/tests/test_misc.py
+++ b/tests/test_misc.py
@@ -3,12 +3,13 @@
import unittest
import os.path as osp
-from pydicom.misc import is_dicom
+from pydicom.misc import is_dicom, size_in_bytes
-test_file = osp.join(osp.dirname(osp.abspath(__file__)), 'test_files', 'CT_small.dcm')
+test_file = osp.join(osp.dirname(osp.abspath(__file__)), 'test_files',
+ 'CT_small.dcm')
-class Test_Misc(unittest.TestCase):
+class TestMisc(unittest.TestCase):
def test_is_dicom(self):
"""Test the is_dicom function."""
invalid_file = test_file.replace('CT_', 'CT') # invalid file
@@ -22,3 +23,19 @@ class Test_Misc(unittest.TestCase):
# test invalid path
self.assertRaises(IOError, is_dicom, invalid_file)
+
+ def test_size_in_bytes(self):
+ """Test convenience function size_in_bytes()."""
+ # None or numbers shall be returned unchanged
+ self.assertIsNone(size_in_bytes(None))
+ self.assertEqual(1234, size_in_bytes(1234))
+
+ # string shall be parsed
+ self.assertEqual(1234, size_in_bytes('1234'))
+ self.assertEqual(4096, size_in_bytes('4 kb'))
+ self.assertEqual(0x4000, size_in_bytes('16 KB'))
+ self.assertEqual(0x300000, size_in_bytes('3 MB'))
+ self.assertEqual(0x80000000, size_in_bytes('2gB'))
+
+ self.assertRaises(ValueError, size_in_bytes, '2 TB')
+ self.assertRaises(ValueError, size_in_bytes, 'KB 2')
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 3,
"issue_text_score": 3,
"test_score": 3
},
"num_modified_files": 6
} | 0.9 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
coverage==6.2
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
-e git+https://github.com/pydicom/pydicom.git@db94409999965965a0e73b53db5d89dfc3707e47#egg=pydicom
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-cov==4.0.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tomli==1.2.3
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: pydicom
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==6.2
- pytest-cov==4.0.0
- tomli==1.2.3
prefix: /opt/conda/envs/pydicom
| [
"tests/test_misc.py::TestMisc::test_size_in_bytes"
]
| [
"tests/test_filereader.py::DeferredReadTests::testFileExists",
"tests/test_filereader.py::DeferredReadTests::testTimeCheck",
"tests/test_filereader.py::DeferredReadTests::testValuesIdentical",
"tests/test_filereader.py::DeferredReadTests::testZippedDeferred"
]
| [
"tests/test_filereader.py::ReaderTests::testCT",
"tests/test_filereader.py::ReaderTests::testDeflate",
"tests/test_filereader.py::ReaderTests::testDir",
"tests/test_filereader.py::ReaderTests::testEmptyNumbersTag",
"tests/test_filereader.py::ReaderTests::testExplicitVRBigEndianNoMeta",
"tests/test_filereader.py::ReaderTests::testExplicitVRLittleEndianNoMeta",
"tests/test_filereader.py::ReaderTests::testMR",
"tests/test_filereader.py::ReaderTests::testNestedPrivateSQ",
"tests/test_filereader.py::ReaderTests::testNoForce",
"tests/test_filereader.py::ReaderTests::testNoMetaGroupLength",
"tests/test_filereader.py::ReaderTests::testNoPixelsRead",
"tests/test_filereader.py::ReaderTests::testNoTransferSyntaxInMeta",
"tests/test_filereader.py::ReaderTests::testPlanarConfig",
"tests/test_filereader.py::ReaderTests::testPrivateSQ",
"tests/test_filereader.py::ReaderTests::testRTDose",
"tests/test_filereader.py::ReaderTests::testRTPlan",
"tests/test_filereader.py::ReaderTests::testRTstruct",
"tests/test_filereader.py::ReaderTests::testUTF8FileName",
"tests/test_filereader.py::ReaderTests::test_commandset_no_dataset",
"tests/test_filereader.py::ReaderTests::test_correct_ambiguous_vr",
"tests/test_filereader.py::ReaderTests::test_correct_ambiguous_vr_compressed",
"tests/test_filereader.py::ReaderTests::test_group_length_wrong",
"tests/test_filereader.py::ReaderTests::test_long_specific_char_set",
"tests/test_filereader.py::ReaderTests::test_meta_no_dataset",
"tests/test_filereader.py::ReaderTests::test_no_dataset",
"tests/test_filereader.py::ReaderTests::test_no_preamble_command_group_dataset",
"tests/test_filereader.py::ReaderTests::test_no_preamble_file_meta_dataset",
"tests/test_filereader.py::ReaderTests::test_preamble_command_meta_no_dataset",
"tests/test_filereader.py::ReaderTests::test_preamble_commandset_no_dataset",
"tests/test_filereader.py::ReaderTests::test_preamble_meta_no_dataset",
"tests/test_filereader.py::ReadDataElementTests::test_read_AE",
"tests/test_filereader.py::ReadDataElementTests::test_read_OD_explicit_little",
"tests/test_filereader.py::ReadDataElementTests::test_read_OD_implicit_little",
"tests/test_filereader.py::ReadDataElementTests::test_read_OL_explicit_little",
"tests/test_filereader.py::ReadDataElementTests::test_read_OL_implicit_little",
"tests/test_filereader.py::ReadDataElementTests::test_read_UC_explicit_little",
"tests/test_filereader.py::ReadDataElementTests::test_read_UC_implicit_little",
"tests/test_filereader.py::ReadDataElementTests::test_read_UR_explicit_little",
"tests/test_filereader.py::ReadDataElementTests::test_read_UR_implicit_little",
"tests/test_filereader.py::JPEG_LS_Tests::testJPEG_LS_PixelArray",
"tests/test_filereader.py::JPEG_LS_Tests::test_emri_JPEG_LS_PixelArray",
"tests/test_filereader.py::BigEndian_Tests::test_big_endian_PixelArray",
"tests/test_filereader.py::JPEG2000Tests::testJPEG2000",
"tests/test_filereader.py::JPEG2000Tests::testJPEG2000PixelArray",
"tests/test_filereader.py::JPEG2000Tests::test_emri_JPEG2000PixelArray",
"tests/test_filereader.py::JPEGlossyTests::testJPEGBaselineColor3DPixelArray",
"tests/test_filereader.py::JPEGlossyTests::testJPEGlossy",
"tests/test_filereader.py::JPEGlossyTests::testJPEGlossyPixelArray",
"tests/test_filereader.py::JPEGlosslessTests::testJPEGlossless",
"tests/test_filereader.py::JPEGlosslessTests::testJPEGlosslessPixelArray",
"tests/test_filereader.py::ReadTruncatedFileTests::testReadFileWithMissingPixelData",
"tests/test_filereader.py::FileLikeTests::testReadFileGivenFileLikeObject",
"tests/test_filereader.py::FileLikeTests::testReadFileGivenFileObject",
"tests/test_misc.py::TestMisc::test_is_dicom"
]
| []
| MIT License | 1,461 | [
"Makefile",
"pydicom/fileutil.py",
"pydicom/util/leanread.py",
"pydicom/misc.py",
"build_tools/circle/build_doc.sh",
"pydicom/filereader.py"
]
| [
"Makefile",
"pydicom/fileutil.py",
"pydicom/util/leanread.py",
"pydicom/misc.py",
"build_tools/circle/build_doc.sh",
"pydicom/filereader.py"
]
|
|
tox-dev__tox-552 | 04c34a61f93f020965e7e0ed298aa951368e561a | 2017-07-14 12:24:53 | e374ce61bf101fb2cc2eddd955f57048df153017 | diff --git a/doc/config.txt b/doc/config.txt
index 1cc6d3d0..2a7a1465 100644
--- a/doc/config.txt
+++ b/doc/config.txt
@@ -220,7 +220,7 @@ Complete list of settings that you can put into ``testenv*`` sections:
* passed through on all platforms: ``PATH``, ``LANG``, ``LANGUAGE``,
``LD_LIBRARY_PATH``, ``PIP_INDEX_URL``
* Windows: ``SYSTEMDRIVE``, ``SYSTEMROOT``, ``PATHEXT``, ``TEMP``, ``TMP``
- ``NUMBER_OF_PROCESSORS``, ``USERPROFILE``
+ ``NUMBER_OF_PROCESSORS``, ``USERPROFILE``, ``MSYSTEM``
* Others (e.g. UNIX, macOS): ``TMPDIR``
You can override these variables with the ``setenv`` option.
diff --git a/tox/config.py b/tox/config.py
index 2f5d52ac..33bf14f2 100755
--- a/tox/config.py
+++ b/tox/config.py
@@ -492,7 +492,8 @@ def tox_addoption(parser):
# for `multiprocessing.cpu_count()` on Windows
# (prior to Python 3.4).
passenv.add("NUMBER_OF_PROCESSORS")
- passenv.add("USERPROFILE") # needed for `os.path.expanduser()`.
+ passenv.add("USERPROFILE") # needed for `os.path.expanduser()`
+ passenv.add("MSYSTEM") # fixes #429
else:
passenv.add("TMPDIR")
for spec in value:
| MSYSTEM is not passed by default on Windows
## Summary
I discovered this when people started reporting breakage when they run `check-manifest` from `tox`, from a Git for Windows `bash.exe` shell: https://github.com/mgedmin/check-manifest/issues/64
When you run `git.exe` on Windows, the way it prints filenames depends on the environment. If `MSYSTEM` is set to `MINGW64`, the filenames are printed in the standard Windows fashion (e.g. `C:\Users\...`). When `MSYSTEM` is not set, filenames are printed in the Unix fashion (`/c/Users/...`).
`check-manifest` runs `git` in a subprocess to enumerate submodules and versioned files. This works fine, except when `tox` clears the environment, things break.
I think MSYSTEM should be included in tox's default `passenv` list, since it describes system configuration (sort of like TERM, or LC_CTYPE) and a wrong value breaks things.
## Steps to reproduce
- get a Windows VM
- install Git for Windows
- install Python
- install tox
- launch Git Bash
- `git clone https://github.com/mgedmin/check-manifest`
- `cd check-manifest`
- `git checkout 0.34` # master has a workaround for this
- `tox --develop -e py27 -- tests:TestGit.test_get_versioned_files_with_git_submodules`
## Expected output
- tox indicates it ran 1 test successfully
## Actual output
- tox shows a test failure where `subprocess.Popen(..., cwd=...)` says the directory name is invalid. | tox-dev/tox | diff --git a/tests/test_config.py b/tests/test_config.py
index 54d11367..3416d0d4 100644
--- a/tests/test_config.py
+++ b/tests/test_config.py
@@ -876,6 +876,7 @@ class TestConfigTestEnv:
assert "TMP" in envconfig.passenv
assert "NUMBER_OF_PROCESSORS" in envconfig.passenv
assert "USERPROFILE" in envconfig.passenv
+ assert "MSYSTEM" in envconfig.passenv
else:
assert "TMPDIR" in envconfig.passenv
assert "PATH" in envconfig.passenv
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 2
} | 2.7 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-timeout"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
distlib==0.3.9
filelock==3.4.1
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
platformdirs==2.4.0
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-timeout==2.1.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
-e git+https://github.com/tox-dev/tox.git@04c34a61f93f020965e7e0ed298aa951368e561a#egg=tox
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
virtualenv==20.17.1
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: tox
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- distlib==0.3.9
- filelock==3.4.1
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- platformdirs==2.4.0
- pytest-timeout==2.1.0
- virtualenv==20.17.1
prefix: /opt/conda/envs/tox
| [
"tests/test_config.py::TestConfigTestEnv::test_passenv_as_multiline_list[win32]"
]
| [
"tests/test_config.py::TestVenvConfig::test_force_dep_with_url",
"tests/test_config.py::TestIniParser::test_getbool"
]
| [
"tests/test_config.py::TestVenvConfig::test_config_parsing_minimal",
"tests/test_config.py::TestVenvConfig::test_config_parsing_multienv",
"tests/test_config.py::TestVenvConfig::test_envdir_set_manually",
"tests/test_config.py::TestVenvConfig::test_envdir_set_manually_with_substitutions",
"tests/test_config.py::TestVenvConfig::test_force_dep_version",
"tests/test_config.py::TestVenvConfig::test_is_same_dep",
"tests/test_config.py::TestConfigPlatform::test_config_parse_platform",
"tests/test_config.py::TestConfigPlatform::test_config_parse_platform_rex",
"tests/test_config.py::TestConfigPlatform::test_config_parse_platform_with_factors[win]",
"tests/test_config.py::TestConfigPlatform::test_config_parse_platform_with_factors[lin]",
"tests/test_config.py::TestConfigPackage::test_defaults",
"tests/test_config.py::TestConfigPackage::test_defaults_distshare",
"tests/test_config.py::TestConfigPackage::test_defaults_changed_dir",
"tests/test_config.py::TestConfigPackage::test_project_paths",
"tests/test_config.py::TestParseconfig::test_search_parents",
"tests/test_config.py::TestParseconfig::test_explicit_config_path",
"tests/test_config.py::test_get_homedir",
"tests/test_config.py::TestGetcontextname::test_blank",
"tests/test_config.py::TestGetcontextname::test_jenkins",
"tests/test_config.py::TestGetcontextname::test_hudson_legacy",
"tests/test_config.py::TestIniParserAgainstCommandsKey::test_command_substitution_from_other_section",
"tests/test_config.py::TestIniParserAgainstCommandsKey::test_command_substitution_from_other_section_multiline",
"tests/test_config.py::TestIniParserAgainstCommandsKey::test_command_substitution_from_other_section_posargs",
"tests/test_config.py::TestIniParserAgainstCommandsKey::test_command_section_and_posargs_substitution",
"tests/test_config.py::TestIniParserAgainstCommandsKey::test_command_env_substitution",
"tests/test_config.py::TestIniParserAgainstCommandsKey::test_command_env_substitution_global",
"tests/test_config.py::TestIniParser::test_getstring_single",
"tests/test_config.py::TestIniParser::test_missing_substitution",
"tests/test_config.py::TestIniParser::test_getstring_fallback_sections",
"tests/test_config.py::TestIniParser::test_getstring_substitution",
"tests/test_config.py::TestIniParser::test_getlist",
"tests/test_config.py::TestIniParser::test_getdict",
"tests/test_config.py::TestIniParser::test_getstring_environment_substitution",
"tests/test_config.py::TestIniParser::test_getstring_environment_substitution_with_default",
"tests/test_config.py::TestIniParser::test_value_matches_section_substituion",
"tests/test_config.py::TestIniParser::test_value_doesn_match_section_substitution",
"tests/test_config.py::TestIniParser::test_getstring_other_section_substitution",
"tests/test_config.py::TestIniParser::test_argvlist",
"tests/test_config.py::TestIniParser::test_argvlist_windows_escaping",
"tests/test_config.py::TestIniParser::test_argvlist_multiline",
"tests/test_config.py::TestIniParser::test_argvlist_quoting_in_command",
"tests/test_config.py::TestIniParser::test_argvlist_comment_after_command",
"tests/test_config.py::TestIniParser::test_argvlist_command_contains_hash",
"tests/test_config.py::TestIniParser::test_argvlist_positional_substitution",
"tests/test_config.py::TestIniParser::test_argvlist_quoted_posargs",
"tests/test_config.py::TestIniParser::test_argvlist_posargs_with_quotes",
"tests/test_config.py::TestIniParser::test_positional_arguments_are_only_replaced_when_standing_alone",
"tests/test_config.py::TestIniParser::test_posargs_are_added_escaped_issue310",
"tests/test_config.py::TestIniParser::test_substitution_with_multiple_words",
"tests/test_config.py::TestIniParser::test_getargv",
"tests/test_config.py::TestIniParser::test_getpath",
"tests/test_config.py::TestIniParserPrefix::test_basic_section_access",
"tests/test_config.py::TestIniParserPrefix::test_fallback_sections",
"tests/test_config.py::TestIniParserPrefix::test_value_matches_prefixed_section_substituion",
"tests/test_config.py::TestIniParserPrefix::test_value_doesn_match_prefixed_section_substitution",
"tests/test_config.py::TestIniParserPrefix::test_other_section_substitution",
"tests/test_config.py::TestConfigTestEnv::test_commentchars_issue33",
"tests/test_config.py::TestConfigTestEnv::test_defaults",
"tests/test_config.py::TestConfigTestEnv::test_sitepackages_switch",
"tests/test_config.py::TestConfigTestEnv::test_installpkg_tops_develop",
"tests/test_config.py::TestConfigTestEnv::test_specific_command_overrides",
"tests/test_config.py::TestConfigTestEnv::test_whitelist_externals",
"tests/test_config.py::TestConfigTestEnv::test_changedir",
"tests/test_config.py::TestConfigTestEnv::test_ignore_errors",
"tests/test_config.py::TestConfigTestEnv::test_envbindir",
"tests/test_config.py::TestConfigTestEnv::test_envbindir_jython[jython]",
"tests/test_config.py::TestConfigTestEnv::test_envbindir_jython[pypy]",
"tests/test_config.py::TestConfigTestEnv::test_envbindir_jython[pypy3]",
"tests/test_config.py::TestConfigTestEnv::test_passenv_as_multiline_list[linux2]",
"tests/test_config.py::TestConfigTestEnv::test_passenv_as_space_separated_list[win32]",
"tests/test_config.py::TestConfigTestEnv::test_passenv_as_space_separated_list[linux2]",
"tests/test_config.py::TestConfigTestEnv::test_passenv_with_factor",
"tests/test_config.py::TestConfigTestEnv::test_passenv_from_global_env",
"tests/test_config.py::TestConfigTestEnv::test_passenv_glob_from_global_env",
"tests/test_config.py::TestConfigTestEnv::test_changedir_override",
"tests/test_config.py::TestConfigTestEnv::test_install_command_setting",
"tests/test_config.py::TestConfigTestEnv::test_install_command_must_contain_packages",
"tests/test_config.py::TestConfigTestEnv::test_install_command_substitutions",
"tests/test_config.py::TestConfigTestEnv::test_pip_pre",
"tests/test_config.py::TestConfigTestEnv::test_pip_pre_cmdline_override",
"tests/test_config.py::TestConfigTestEnv::test_simple",
"tests/test_config.py::TestConfigTestEnv::test_substitution_error",
"tests/test_config.py::TestConfigTestEnv::test_substitution_defaults",
"tests/test_config.py::TestConfigTestEnv::test_substitution_notfound_issue246",
"tests/test_config.py::TestConfigTestEnv::test_substitution_positional",
"tests/test_config.py::TestConfigTestEnv::test_substitution_noargs_issue240",
"tests/test_config.py::TestConfigTestEnv::test_substitution_double",
"tests/test_config.py::TestConfigTestEnv::test_posargs_backslashed_or_quoted",
"tests/test_config.py::TestConfigTestEnv::test_rewrite_posargs",
"tests/test_config.py::TestConfigTestEnv::test_rewrite_simple_posargs",
"tests/test_config.py::TestConfigTestEnv::test_take_dependencies_from_other_testenv[envlist0-deps0]",
"tests/test_config.py::TestConfigTestEnv::test_take_dependencies_from_other_testenv[envlist1-deps1]",
"tests/test_config.py::TestConfigTestEnv::test_take_dependencies_from_other_section",
"tests/test_config.py::TestConfigTestEnv::test_multilevel_substitution",
"tests/test_config.py::TestConfigTestEnv::test_recursive_substitution_cycle_fails",
"tests/test_config.py::TestConfigTestEnv::test_single_value_from_other_secton",
"tests/test_config.py::TestConfigTestEnv::test_factors",
"tests/test_config.py::TestConfigTestEnv::test_factor_ops",
"tests/test_config.py::TestConfigTestEnv::test_default_factors",
"tests/test_config.py::TestConfigTestEnv::test_factors_in_boolean",
"tests/test_config.py::TestConfigTestEnv::test_factors_in_setenv",
"tests/test_config.py::TestConfigTestEnv::test_factor_use_not_checked",
"tests/test_config.py::TestConfigTestEnv::test_factors_groups_touch",
"tests/test_config.py::TestConfigTestEnv::test_period_in_factor",
"tests/test_config.py::TestConfigTestEnv::test_ignore_outcome",
"tests/test_config.py::TestGlobalOptions::test_notest",
"tests/test_config.py::TestGlobalOptions::test_verbosity",
"tests/test_config.py::TestGlobalOptions::test_substitution_jenkins_default",
"tests/test_config.py::TestGlobalOptions::test_substitution_jenkins_context",
"tests/test_config.py::TestGlobalOptions::test_sdist_specification",
"tests/test_config.py::TestGlobalOptions::test_env_selection",
"tests/test_config.py::TestGlobalOptions::test_py_venv",
"tests/test_config.py::TestGlobalOptions::test_default_environments",
"tests/test_config.py::TestGlobalOptions::test_envlist_expansion",
"tests/test_config.py::TestGlobalOptions::test_envlist_cross_product",
"tests/test_config.py::TestGlobalOptions::test_envlist_multiline",
"tests/test_config.py::TestGlobalOptions::test_minversion",
"tests/test_config.py::TestGlobalOptions::test_skip_missing_interpreters_true",
"tests/test_config.py::TestGlobalOptions::test_skip_missing_interpreters_false",
"tests/test_config.py::TestGlobalOptions::test_defaultenv_commandline",
"tests/test_config.py::TestGlobalOptions::test_defaultenv_partial_override",
"tests/test_config.py::TestHashseedOption::test_default",
"tests/test_config.py::TestHashseedOption::test_passing_integer",
"tests/test_config.py::TestHashseedOption::test_passing_string",
"tests/test_config.py::TestHashseedOption::test_passing_empty_string",
"tests/test_config.py::TestHashseedOption::test_setenv",
"tests/test_config.py::TestHashseedOption::test_noset",
"tests/test_config.py::TestHashseedOption::test_noset_with_setenv",
"tests/test_config.py::TestHashseedOption::test_one_random_hashseed",
"tests/test_config.py::TestHashseedOption::test_setenv_in_one_testenv",
"tests/test_config.py::TestSetenv::test_getdict_lazy",
"tests/test_config.py::TestSetenv::test_getdict_lazy_update",
"tests/test_config.py::TestSetenv::test_setenv_uses_os_environ",
"tests/test_config.py::TestSetenv::test_setenv_default_os_environ",
"tests/test_config.py::TestSetenv::test_setenv_uses_other_setenv",
"tests/test_config.py::TestSetenv::test_setenv_recursive_direct",
"tests/test_config.py::TestSetenv::test_setenv_overrides",
"tests/test_config.py::TestSetenv::test_setenv_with_envdir_and_basepython",
"tests/test_config.py::TestSetenv::test_setenv_ordering_1",
"tests/test_config.py::TestSetenv::test_setenv_cross_section_subst_issue294",
"tests/test_config.py::TestSetenv::test_setenv_cross_section_subst_twice",
"tests/test_config.py::TestSetenv::test_setenv_cross_section_mixed",
"tests/test_config.py::TestIndexServer::test_indexserver",
"tests/test_config.py::TestIndexServer::test_parse_indexserver",
"tests/test_config.py::TestIndexServer::test_multiple_homedir_relative_local_indexservers",
"tests/test_config.py::TestConfigConstSubstitutions::test_replace_pathsep_unix[:]",
"tests/test_config.py::TestConfigConstSubstitutions::test_replace_pathsep_unix[;]",
"tests/test_config.py::TestConfigConstSubstitutions::test_pathsep_regex",
"tests/test_config.py::TestParseEnv::test_parse_recreate",
"tests/test_config.py::TestCmdInvocation::test_help",
"tests/test_config.py::TestCmdInvocation::test_version",
"tests/test_config.py::TestCmdInvocation::test_listenvs",
"tests/test_config.py::TestCmdInvocation::test_listenvs_verbose_description",
"tests/test_config.py::TestCmdInvocation::test_listenvs_all",
"tests/test_config.py::TestCmdInvocation::test_listenvs_all_verbose_description",
"tests/test_config.py::TestCmdInvocation::test_listenvs_all_verbose_description_no_additional_environments",
"tests/test_config.py::TestCmdInvocation::test_config_specific_ini",
"tests/test_config.py::TestCmdInvocation::test_no_tox_ini",
"tests/test_config.py::TestCmdInvocation::test_override_workdir",
"tests/test_config.py::TestCmdInvocation::test_showconfig_with_force_dep_version",
"tests/test_config.py::test_env_spec[-e",
"tests/test_config.py::TestCommandParser::test_command_parser_for_word",
"tests/test_config.py::TestCommandParser::test_command_parser_for_posargs",
"tests/test_config.py::TestCommandParser::test_command_parser_for_multiple_words",
"tests/test_config.py::TestCommandParser::test_command_parser_for_substitution_with_spaces",
"tests/test_config.py::TestCommandParser::test_command_parser_with_complex_word_set",
"tests/test_config.py::TestCommandParser::test_command_with_runs_of_whitespace",
"tests/test_config.py::TestCommandParser::test_command_with_split_line_in_subst_arguments",
"tests/test_config.py::TestCommandParser::test_command_parsing_for_issue_10"
]
| []
| MIT License | 1,462 | [
"doc/config.txt",
"tox/config.py"
]
| [
"doc/config.txt",
"tox/config.py"
]
|
|
asottile__add-trailing-comma-12 | 49a0d757435b4962c58f8d4f48ba85c7f2f5256f | 2017-07-14 15:44:54 | 49a0d757435b4962c58f8d4f48ba85c7f2f5256f | diff --git a/add_trailing_comma.py b/add_trailing_comma.py
index 7a571a7..882cb9d 100644
--- a/add_trailing_comma.py
+++ b/add_trailing_comma.py
@@ -7,6 +7,7 @@ import collections
import io
import sys
+from tokenize_rt import ESCAPED_NL
from tokenize_rt import src_to_tokens
from tokenize_rt import Token
from tokenize_rt import tokens_to_src
@@ -20,8 +21,8 @@ Literal = collections.namedtuple('Literal', ('node', 'backtrack'))
Literal.__new__.__defaults__ = (False,)
Fix = collections.namedtuple('Fix', ('braces', 'multi_arg', 'initial_indent'))
-NEWLINES = frozenset(('NEWLINE', 'NL'))
-NON_CODING_TOKENS = frozenset(('COMMENT', 'NL', UNIMPORTANT_WS))
+NEWLINES = frozenset((ESCAPED_NL, 'NEWLINE', 'NL'))
+NON_CODING_TOKENS = frozenset(('COMMENT', ESCAPED_NL, 'NL', UNIMPORTANT_WS))
INDENT_TOKENS = frozenset(('INDENT', UNIMPORTANT_WS))
START_BRACES = frozenset(('(', '{', '['))
END_BRACES = frozenset((')', '}', ']'))
diff --git a/setup.py b/setup.py
index 39c86ff..828d6a9 100644
--- a/setup.py
+++ b/setup.py
@@ -17,7 +17,7 @@ setup(
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
- install_requires=['tokenize-rt'],
+ install_requires=['tokenize-rt>=2'],
py_modules=['add_trailing_comma'],
entry_points={
'console_scripts': ['add-trailing-comma = add_trailing_comma:main'],
| escaped newlines are throwing off indent detection
This should be a noop:
```python
x = y.\
foo(
bar,
)
```
However, this is the current behaviour:
```diff
x = y.\
foo(
bar,
- )
+)
```
Might need help from https://github.com/asottile/tokenize-rt/issues/1 | asottile/add-trailing-comma | diff --git a/tests/add_trailing_comma_test.py b/tests/add_trailing_comma_test.py
index 9f67fb3..d41af5c 100644
--- a/tests/add_trailing_comma_test.py
+++ b/tests/add_trailing_comma_test.py
@@ -501,6 +501,11 @@ def test_fix_unhugs_py3_only(src, expected):
' 1, 2, 3, 4,\n'
' ],\n'
']',
+ # Regression test for #11
+ 'foo.\\\n'
+ ' bar(\n'
+ ' 5,\n'
+ ' )',
),
)
def test_noop_trailing_brace(src):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 2
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/asottile/add-trailing-comma.git@49a0d757435b4962c58f8d4f48ba85c7f2f5256f#egg=add_trailing_comma
attrs==22.2.0
certifi==2021.5.30
cfgv==3.3.1
coverage==6.2
distlib==0.3.9
filelock==3.4.1
flake8==5.0.4
identify==2.4.4
importlib-metadata==4.2.0
importlib-resources==5.2.3
iniconfig==1.1.1
mccabe==0.7.0
nodeenv==1.6.0
packaging==21.3
platformdirs==2.4.0
pluggy==1.0.0
pre-commit==2.17.0
py==1.11.0
pycodestyle==2.9.1
pyflakes==2.5.0
pyparsing==3.1.4
pytest==7.0.1
PyYAML==6.0.1
tokenize-rt==4.2.1
toml==0.10.2
tomli==1.2.3
typing_extensions==4.1.1
virtualenv==20.16.2
zipp==3.6.0
| name: add-trailing-comma
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- cfgv==3.3.1
- coverage==6.2
- distlib==0.3.9
- filelock==3.4.1
- flake8==5.0.4
- identify==2.4.4
- importlib-metadata==4.2.0
- importlib-resources==5.2.3
- iniconfig==1.1.1
- mccabe==0.7.0
- nodeenv==1.6.0
- packaging==21.3
- platformdirs==2.4.0
- pluggy==1.0.0
- pre-commit==2.17.0
- py==1.11.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
- pyparsing==3.1.4
- pytest==7.0.1
- pyyaml==6.0.1
- tokenize-rt==4.2.1
- toml==0.10.2
- tomli==1.2.3
- typing-extensions==4.1.1
- virtualenv==20.16.2
- zipp==3.6.0
prefix: /opt/conda/envs/add-trailing-comma
| [
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[foo.\\\\\\n"
]
| []
| [
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x(1)]",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[tuple(\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x(\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x((\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[(\\n",
"tests/add_trailing_comma_test.py::test_ignores_invalid_ast_node",
"tests/add_trailing_comma_test.py::test_py35_plus_rewrite",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[(1,",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[[1,",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[{1,",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[{1:",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[y(*args1,",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[y(**kwargs1,",
"tests/add_trailing_comma_test.py::test_fixes_calls[x(\\n",
"tests/add_trailing_comma_test.py::test_fixes_calls[foo()(\\n",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[(1,",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[[1,",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[{1,",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[{1:",
"tests/add_trailing_comma_test.py::test_fixes_literals[x",
"tests/add_trailing_comma_test.py::test_fixes_py35_plus_literals[x",
"tests/add_trailing_comma_test.py::test_noop_tuple_literal_without_braces",
"tests/add_trailing_comma_test.py::test_noop_function_defs[def",
"tests/add_trailing_comma_test.py::test_fixes_defs[def",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f(x,",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f(\\n",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f((\\n",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f([\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(a,\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[with",
"tests/add_trailing_comma_test.py::test_fix_unhugs[if",
"tests/add_trailing_comma_test.py::test_fix_unhugs[{'foo':",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(g(\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[{\"foo\":",
"tests/add_trailing_comma_test.py::test_fix_unhugs[x",
"tests/add_trailing_comma_test.py::test_fix_unhugs_py3_only[def",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[[]]",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[x",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[y",
"tests/add_trailing_comma_test.py::test_fix_trailing_brace[x",
"tests/add_trailing_comma_test.py::test_main_trivial",
"tests/add_trailing_comma_test.py::test_main_noop",
"tests/add_trailing_comma_test.py::test_main_changes_a_file",
"tests/add_trailing_comma_test.py::test_main_syntax_error",
"tests/add_trailing_comma_test.py::test_main_non_utf8_bytes",
"tests/add_trailing_comma_test.py::test_main_py35_plus_argument_star_args",
"tests/add_trailing_comma_test.py::test_main_py35_plus_argument_star_star_kwargs"
]
| []
| MIT License | 1,463 | [
"setup.py",
"add_trailing_comma.py"
]
| [
"setup.py",
"add_trailing_comma.py"
]
|
|
pydicom__pydicom-411 | db94409999965965a0e73b53db5d89dfc3707e47 | 2017-07-14 17:05:37 | bef49851e7c3b70edd43cc40fc84fe905e78d5ba | pep8speaks: Hello @mrbean-bremen! Thanks for submitting the PR.
- In the file [`pydicom/filereader.py`](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/filereader.py), following are the PEP8 issues :
> [Line 31:1](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/filereader.py#L31): [E402](https://duckduckgo.com/?q=pep8%20E402) module level import not at top of file
> [Line 32:1](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/filereader.py#L32): [E402](https://duckduckgo.com/?q=pep8%20E402) module level import not at top of file
> [Line 32:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/filereader.py#L32): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (82 > 79 characters)
> [Line 33:1](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/filereader.py#L33): [E402](https://duckduckgo.com/?q=pep8%20E402) module level import not at top of file
> [Line 34:1](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/filereader.py#L34): [E402](https://duckduckgo.com/?q=pep8%20E402) module level import not at top of file
> [Line 35:1](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/filereader.py#L35): [E402](https://duckduckgo.com/?q=pep8%20E402) module level import not at top of file
> [Line 36:1](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/filereader.py#L36): [E402](https://duckduckgo.com/?q=pep8%20E402) module level import not at top of file
> [Line 37:1](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/filereader.py#L37): [E402](https://duckduckgo.com/?q=pep8%20E402) module level import not at top of file
> [Line 38:1](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/filereader.py#L38): [E402](https://duckduckgo.com/?q=pep8%20E402) module level import not at top of file
> [Line 39:1](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/filereader.py#L39): [E402](https://duckduckgo.com/?q=pep8%20E402) module level import not at top of file
> [Line 40:1](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/filereader.py#L40): [E402](https://duckduckgo.com/?q=pep8%20E402) module level import not at top of file
> [Line 41:1](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/filereader.py#L41): [E402](https://duckduckgo.com/?q=pep8%20E402) module level import not at top of file
> [Line 42:1](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/filereader.py#L42): [E402](https://duckduckgo.com/?q=pep8%20E402) module level import not at top of file
> [Line 115:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/filereader.py#L115): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (96 > 79 characters)
> [Line 121:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/filereader.py#L121): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (87 > 79 characters)
> [Line 240:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/filereader.py#L240): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (81 > 79 characters)
> [Line 242:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/filereader.py#L242): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (86 > 79 characters)
> [Line 255:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/filereader.py#L255): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (89 > 79 characters)
> [Line 261:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/filereader.py#L261): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (93 > 79 characters)
> [Line 262:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/filereader.py#L262): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (94 > 79 characters)
> [Line 281:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/filereader.py#L281): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (80 > 79 characters)
> [Line 303:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/filereader.py#L303): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (81 > 79 characters)
> [Line 306:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/filereader.py#L306): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (97 > 79 characters)
> [Line 307:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/filereader.py#L307): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (98 > 79 characters)
> [Line 315:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/filereader.py#L315): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (84 > 79 characters)
> [Line 397:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/filereader.py#L397): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (81 > 79 characters)
> [Line 407:5](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/filereader.py#L407): [E722](https://duckduckgo.com/?q=pep8%20E722) do not use bare except'
> [Line 412:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/filereader.py#L412): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (86 > 79 characters)
> [Line 415:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/filereader.py#L415): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (81 > 79 characters)
> [Line 440:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/filereader.py#L440): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (80 > 79 characters)
> [Line 502:25](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/filereader.py#L502): [E122](https://duckduckgo.com/?q=pep8%20E122) continuation line missing indentation or outdented
> [Line 503:25](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/filereader.py#L503): [E122](https://duckduckgo.com/?q=pep8%20E122) continuation line missing indentation or outdented
> [Line 522:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/filereader.py#L522): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (80 > 79 characters)
> [Line 561:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/filereader.py#L561): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (80 > 79 characters)
> [Line 608:5](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/filereader.py#L608): [E266](https://duckduckgo.com/?q=pep8%20E266) too many leading '#' for block comment
> [Line 614:5](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/filereader.py#L614): [E266](https://duckduckgo.com/?q=pep8%20E266) too many leading '#' for block comment
> [Line 625:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/filereader.py#L625): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (80 > 79 characters)
> [Line 707:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/filereader.py#L707): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (80 > 79 characters)
> [Line 824:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/filereader.py#L824): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (82 > 79 characters)
> [Line 875:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/filereader.py#L875): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (81 > 79 characters)
> [Line 878:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/filereader.py#L878): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (83 > 79 characters)
- In the file [`pydicom/fileutil.py`](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/fileutil.py), following are the PEP8 issues :
> [Line 26:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/fileutil.py#L26): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (88 > 79 characters)
> [Line 27:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/fileutil.py#L27): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (85 > 79 characters)
> [Line 31:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/fileutil.py#L31): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (96 > 79 characters)
> [Line 35:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/fileutil.py#L35): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (100 > 79 characters)
> [Line 66:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/fileutil.py#L66): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (80 > 79 characters)
> [Line 79:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/fileutil.py#L79): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (107 > 79 characters)
> [Line 89:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/fileutil.py#L89): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (85 > 79 characters)
> [Line 91:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/fileutil.py#L91): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (81 > 79 characters)
> [Line 93:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/fileutil.py#L93): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (85 > 79 characters)
> [Line 137:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/fileutil.py#L137): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (80 > 79 characters)
> [Line 152:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/fileutil.py#L152): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (93 > 79 characters)
> [Line 156:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/fileutil.py#L156): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (100 > 79 characters)
> [Line 158:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/fileutil.py#L158): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (107 > 79 characters)
> [Line 171:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/fileutil.py#L171): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (80 > 79 characters)
> [Line 192:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/fileutil.py#L192): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (94 > 79 characters)
> [Line 196:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/fileutil.py#L196): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (92 > 79 characters)
> [Line 197:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/fileutil.py#L197): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (82 > 79 characters)
> [Line 217:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/fileutil.py#L217): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (81 > 79 characters)
> [Line 221:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/fileutil.py#L221): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (82 > 79 characters)
> [Line 233:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/fileutil.py#L233): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (117 > 79 characters)
> [Line 236:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/fileutil.py#L236): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (116 > 79 characters)
- In the file [`pydicom/misc.py`](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/misc.py), following are the PEP8 issues :
> [Line 25:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/misc.py#L25): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (85 > 79 characters)
> [Line 31:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/misc.py#L31): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (83 > 79 characters)
- In the file [`pydicom/util/leanread.py`](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/util/leanread.py), following are the PEP8 issues :
> [Line 21:1](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/util/leanread.py#L21): [E402](https://duckduckgo.com/?q=pep8%20E402) module level import not at top of file
> [Line 50:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/util/leanread.py#L50): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (82 > 79 characters)
> [Line 61:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/util/leanread.py#L61): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (83 > 79 characters)
> [Line 66:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/util/leanread.py#L66): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (84 > 79 characters)
> [Line 171:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/util/leanread.py#L171): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (80 > 79 characters)
> [Line 185:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/pydicom/util/leanread.py#L185): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (103 > 79 characters)
- In the file [`tests/test_filereader.py`](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py), following are the PEP8 issues :
> [Line 32:1](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L32): [E722](https://duckduckgo.com/?q=pep8%20E722) do not use bare except'
> [Line 38:1](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L38): [E722](https://duckduckgo.com/?q=pep8%20E722) do not use bare except'
> [Line 41:1](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L41): [E402](https://duckduckgo.com/?q=pep8%20E402) module level import not at top of file
> [Line 42:1](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L42): [E402](https://duckduckgo.com/?q=pep8%20E402) module level import not at top of file
> [Line 43:1](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L43): [E402](https://duckduckgo.com/?q=pep8%20E402) module level import not at top of file
> [Line 44:1](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L44): [E402](https://duckduckgo.com/?q=pep8%20E402) module level import not at top of file
> [Line 45:1](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L45): [E402](https://duckduckgo.com/?q=pep8%20E402) module level import not at top of file
> [Line 46:1](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L46): [E402](https://duckduckgo.com/?q=pep8%20E402) module level import not at top of file
> [Line 47:1](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L47): [E402](https://duckduckgo.com/?q=pep8%20E402) module level import not at top of file
> [Line 48:1](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L48): [E402](https://duckduckgo.com/?q=pep8%20E402) module level import not at top of file
> [Line 49:1](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L49): [E402](https://duckduckgo.com/?q=pep8%20E402) module level import not at top of file
> [Line 72:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L72): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (88 > 79 characters)
> [Line 80:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L80): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (81 > 79 characters)
> [Line 87:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L87): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (80 > 79 characters)
> [Line 96:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L96): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (83 > 79 characters)
> [Line 97:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L97): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (83 > 79 characters)
> [Line 107:5](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L107): [E722](https://duckduckgo.com/?q=pep8%20E722) do not use bare except'
> [Line 120:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L120): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (112 > 79 characters)
> [Line 138:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L138): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (82 > 79 characters)
> [Line 141:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L141): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (105 > 79 characters)
> [Line 143:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L143): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (85 > 79 characters)
> [Line 145:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L145): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (101 > 79 characters)
> [Line 147:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L147): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (87 > 79 characters)
> [Line 151:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L151): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (89 > 79 characters)
> [Line 157:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L157): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (82 > 79 characters)
> [Line 162:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L162): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (97 > 79 characters)
> [Line 164:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L164): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (85 > 79 characters)
> [Line 165:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L165): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (83 > 79 characters)
> [Line 170:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L170): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (81 > 79 characters)
> [Line 172:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L172): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (83 > 79 characters)
> [Line 176:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L176): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (98 > 79 characters)
> [Line 177:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L177): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (104 > 79 characters)
> [Line 181:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L181): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (89 > 79 characters)
> [Line 187:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L187): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (92 > 79 characters)
> [Line 197:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L197): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (85 > 79 characters)
> [Line 201:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L201): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (95 > 79 characters)
> [Line 205:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L205): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (82 > 79 characters)
> [Line 209:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L209): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (82 > 79 characters)
> [Line 210:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L210): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (83 > 79 characters)
> [Line 233:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L233): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (95 > 79 characters)
> [Line 241:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L241): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (82 > 79 characters)
> [Line 249:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L249): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (82 > 79 characters)
> [Line 252:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L252): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (82 > 79 characters)
> [Line 256:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L256): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (87 > 79 characters)
> [Line 258:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L258): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (87 > 79 characters)
> [Line 265:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L265): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (100 > 79 characters)
> [Line 266:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L266): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (116 > 79 characters)
> [Line 270:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L270): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (146 > 79 characters)
> [Line 273:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L273): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (84 > 79 characters)
> [Line 279:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L279): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (98 > 79 characters)
> [Line 285:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L285): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (84 > 79 characters)
> [Line 286:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L286): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (80 > 79 characters)
> [Line 287:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L287): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (89 > 79 characters)
> [Line 294:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L294): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (84 > 79 characters)
> [Line 306:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L306): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (91 > 79 characters)
> [Line 331:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L331): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (84 > 79 characters)
> [Line 338:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L338): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (149 > 79 characters)
> [Line 341:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L341): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (86 > 79 characters)
> [Line 344:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L344): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (85 > 79 characters)
> [Line 350:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L350): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (94 > 79 characters)
> [Line 353:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L353): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (84 > 79 characters)
> [Line 359:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L359): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (125 > 79 characters)
> [Line 362:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L362): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (84 > 79 characters)
> [Line 368:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L368): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (125 > 79 characters)
> [Line 410:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L410): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (91 > 79 characters)
> [Line 425:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L425): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (94 > 79 characters)
> [Line 426:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L426): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (94 > 79 characters)
> [Line 427:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L427): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (94 > 79 characters)
> [Line 428:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L428): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (94 > 79 characters)
> [Line 434:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L434): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (80 > 79 characters)
> [Line 440:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L440): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (94 > 79 characters)
> [Line 441:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L441): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (94 > 79 characters)
> [Line 442:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L442): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (94 > 79 characters)
> [Line 451:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L451): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (80 > 79 characters)
> [Line 457:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L457): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (94 > 79 characters)
> [Line 459:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L459): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (94 > 79 characters)
> [Line 460:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L460): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (94 > 79 characters)
> [Line 461:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L461): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (94 > 79 characters)
> [Line 467:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L467): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (93 > 79 characters)
> [Line 470:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L470): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (80 > 79 characters)
> [Line 486:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L486): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (88 > 79 characters)
> [Line 487:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L487): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (88 > 79 characters)
> [Line 488:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L488): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (88 > 79 characters)
> [Line 502:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L502): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (88 > 79 characters)
> [Line 503:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L503): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (88 > 79 characters)
> [Line 504:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L504): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (88 > 79 characters)
> [Line 535:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L535): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (94 > 79 characters)
> [Line 536:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L536): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (94 > 79 characters)
> [Line 537:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L537): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (94 > 79 characters)
> [Line 573:70](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L573): [E261](https://duckduckgo.com/?q=pep8%20E261) at least two spaces before inline comment
> [Line 573:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L573): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (80 > 79 characters)
> [Line 575:49](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L575): [E261](https://duckduckgo.com/?q=pep8%20E261) at least two spaces before inline comment
> [Line 576:58](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L576): [E261](https://duckduckgo.com/?q=pep8%20E261) at least two spaces before inline comment
> [Line 576:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L576): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (80 > 79 characters)
> [Line 577:38](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L577): [E261](https://duckduckgo.com/?q=pep8%20E261) at least two spaces before inline comment
> [Line 578:44](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L578): [E261](https://duckduckgo.com/?q=pep8%20E261) at least two spaces before inline comment
> [Line 579:44](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L579): [E261](https://duckduckgo.com/?q=pep8%20E261) at least two spaces before inline comment
> [Line 580:46](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L580): [E261](https://duckduckgo.com/?q=pep8%20E261) at least two spaces before inline comment
> [Line 582:28](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L582): [E261](https://duckduckgo.com/?q=pep8%20E261) at least two spaces before inline comment
> [Line 588:31](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L588): [E261](https://duckduckgo.com/?q=pep8%20E261) at least two spaces before inline comment
> [Line 598:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L598): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (82 > 79 characters)
> [Line 598:82](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L598): [E502](https://duckduckgo.com/?q=pep8%20E502) the backslash is redundant between brackets
> [Line 599:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L599): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (81 > 79 characters)
> [Line 606:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L606): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (82 > 79 characters)
> [Line 606:82](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L606): [E502](https://duckduckgo.com/?q=pep8%20E502) the backslash is redundant between brackets
> [Line 607:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L607): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (81 > 79 characters)
> [Line 614:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L614): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (82 > 79 characters)
> [Line 614:82](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L614): [E502](https://duckduckgo.com/?q=pep8%20E502) the backslash is redundant between brackets
> [Line 622:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L622): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (82 > 79 characters)
> [Line 622:82](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L622): [E502](https://duckduckgo.com/?q=pep8%20E502) the backslash is redundant between brackets
> [Line 653:38](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L653): [E261](https://duckduckgo.com/?q=pep8%20E261) at least two spaces before inline comment
> [Line 658:38](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L658): [E261](https://duckduckgo.com/?q=pep8%20E261) at least two spaces before inline comment
> [Line 665:38](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L665): [E261](https://duckduckgo.com/?q=pep8%20E261) at least two spaces before inline comment
> [Line 670:38](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L670): [E261](https://duckduckgo.com/?q=pep8%20E261) at least two spaces before inline comment
> [Line 693:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L693): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (106 > 79 characters)
> [Line 695:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L695): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (90 > 79 characters)
> [Line 703:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L703): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (106 > 79 characters)
> [Line 705:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L705): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (95 > 79 characters)
> [Line 719:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L719): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (117 > 79 characters)
> [Line 721:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L721): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (81 > 79 characters)
> [Line 733:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L733): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (83 > 79 characters)
> [Line 734:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L734): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (110 > 79 characters)
> [Line 736:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L736): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (120 > 79 characters)
> [Line 740:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L740): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (107 > 79 characters)
> [Line 748:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L748): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (106 > 79 characters)
> [Line 750:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L750): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (80 > 79 characters)
> [Line 758:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L758): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (106 > 79 characters)
> [Line 760:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L760): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (95 > 79 characters)
> [Line 770:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L770): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (83 > 79 characters)
> [Line 773:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L773): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (110 > 79 characters)
> [Line 776:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L776): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (83 > 79 characters)
> [Line 790:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L790): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (87 > 79 characters)
> [Line 798:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L798): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (83 > 79 characters)
> [Line 799:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L799): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (92 > 79 characters)
> [Line 801:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L801): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (113 > 79 characters)
> [Line 804:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L804): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (83 > 79 characters)
> [Line 862:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L862): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (103 > 79 characters)
> [Line 870:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L870): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (85 > 79 characters)
> [Line 883:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L883): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (87 > 79 characters)
> [Line 885:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L885): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (87 > 79 characters)
> [Line 895:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L895): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (111 > 79 characters)
> [Line 900:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L900): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (83 > 79 characters)
> [Line 905:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L905): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (87 > 79 characters)
> [Line 909:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L909): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (89 > 79 characters)
> [Line 910:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L910): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (83 > 79 characters)
> [Line 914:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L914): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (98 > 79 characters)
> [Line 915:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L915): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (104 > 79 characters)
> [Line 918:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L918): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (89 > 79 characters)
> [Line 922:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L922): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (92 > 79 characters)
> [Line 923:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L923): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (83 > 79 characters)
> [Line 935:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L935): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (89 > 79 characters)
> [Line 936:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L936): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (92 > 79 characters)
> [Line 937:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L937): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (83 > 79 characters)
> [Line 943:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_filereader.py#L943): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (90 > 79 characters)
- In the file [`tests/test_misc.py`](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_misc.py), following are the PEP8 issues :
> [Line 8:80](https://github.com/pydicom/pydicom/blob/6e4b424d65c03e7b443778b2881b9d528d365bc5/tests/test_misc.py#L8): [E501](https://duckduckgo.com/?q=pep8%20E501) line too long (86 > 79 characters)
mrbean-bremen: Found the most trivial bug to get my feet wet... and got betaen by pep8speaks. What is the policy now, each file we touch should be made PEP-8 conform?
vsoch: I think it's pretty annoying yes, but I also think the code will be much improved, readability wise, if we make the suggested changes.
mrbean-bremen: Thats ok, just wanted to be sure that this is what we want :)
I also didn't add pep8speaks to my fork - will do that now.
mrbean-bremen: Circleio fails with
> make: *** No rule to make target `html-noplot'. Stop.
I have no experience with circleio - has anyone an idea?
vsoch: Just looking at the Makefile for docs on current master:
```
# simple makefile to simplify repetitive build env management tasks under posix
# caution: testing won't work on windows
test-code:
py.test tests
test-doc:
pytest doc/*.rst
test-coverage:
rm -rf coverage .coverage
py.test tests --cov-report term-missing --cov=pydicom
test: test-code test-doc
doc:
make -C doc html
doc-noplot:
make -C doc html-noplot
clean:
find . -name "*.so" -o -name "*.pyc" -o -name "*.md5" -o -name "*.pyd" -o -name "*~" | xargs rm -f
find . -name "*.pyx" -exec ./tools/rm_pyx_c_file.sh {} \;
rm -rf .cache
rm -rf .coverage
rm -rf dist
rm -rf build
rm -rf doc/auto_examples
rm -rf doc/generated
rm -rf doc/modules
rm -rf examples/.ipynb_checkpoints
code-analysis:
flake8 pydicom | grep -v __init__ | grep -v external
pylint -E -i y pydicom/ -d E1103,E0611,E1101
```
I would guess it's a typo, should it be something like:
```
make html doc-noplot
```
vsoch: oh just kidding, I was in the wrong folder, that's for pydicom :)
vsoch: make html would work for the docs, but if there was intended to be a special rule to make it with no plots, I don't think that exists?
darcymason: > I think it's pretty annoying yes, but I also think the code will be much improved, readability wise, if we make the suggested changes.
+1. But it would be nice to try to keep PEP8 changes separate so that code is easier to review for 'real' changes -- I think it has been mentioned before, to make a converted effort to get all the pep8 done. Or perhaps any new commits can tackle pep8 first, then follow with separate commits for other changes.
mrbean-bremen: Ah, yes, you may be right - I just noticed that @massich created #408 for that.
mrbean-bremen: @darcymason - yes, I also thought about that. In my case, I made the "real" commit first, and the PEP8 commits afterwards, so they still can be reviewed separately, though it's not ideal...
vsoch: The calling function is a build tool under circle, specifically:
```
# Build and install scikit-learn in dev mode
python setup.py develop
# The pipefail is requested to propagate exit code
set -o pipefail && cd doc && make $MAKE_TARGET 2>&1 | tee ~/log.txt
cd -
set +o pipefail
```
so the `$MAKE_TARGET` html-noplot just needs to be one that exists I think.
vsoch: here is the bottom of the log (with the fail) if others want:
```
Stored in directory: /home/ubuntu/.cache/pip/wheels/b8/09/cf/9801757ff42c85e23678e2520f7dee38f3fdd6455f7550f676
Successfully built sphinx-gallery
Installing collected packages: sphinx-gallery
Successfully installed sphinx-gallery-0.1.11
+ python setup.py develop
running develop
running egg_info
creating pydicom.egg-info
writing pydicom.egg-info/PKG-INFO
writing dependency_links to pydicom.egg-info/dependency_links.txt
writing top-level names to pydicom.egg-info/top_level.txt
writing manifest file 'pydicom.egg-info/SOURCES.txt'
reading manifest file 'pydicom.egg-info/SOURCES.txt'
reading manifest template 'MANIFEST.in'
writing manifest file 'pydicom.egg-info/SOURCES.txt'
running build_ext
Creating /home/ubuntu/miniconda/envs/testenv/lib/python3.6/site-packages/pydicom.egg-link (link to .)
Adding pydicom 1.0.0a1 to easy-install.pth file
Installed /home/ubuntu/pydicom
Processing dependencies for pydicom==1.0.0a1
Finished processing dependencies for pydicom==1.0.0a1
+ set -o pipefail
+ cd doc
+ tee /home/ubuntu/log.txt
+ make html-noplot
make: *** No rule to make target `html-noplot'. Stop.
./build_tools/circle/build_doc.sh returned exit code 2
Action failed: ./build_tools/circle/build_doc.sh
```
mrbean-bremen: Hopefully, with more files touched, this problem will go away eventually.
Somewhat related: what is the policy for PRs regarding commits? I usually first work in my branch, and before making a PR I make a git rebase to sanitize the number of commits and the commit messages (I didn't do this in this case, because I forgot about PEP8).
mrbean-bremen: @vsoch - I think we should move these findings to #408.
darcymason: > what is the policy for PRs regarding commits? I usually first work in my branch, and before making a PR I make a git rebase to sanitize the number of commits and the commit messages
I've been using "squash and merge" for most things. I'm barely above novice in terms of git's abilities, however, so anyone can educate me on how to do these things. I could see doing rebase occasionally if there are only a few commits and we want to see the history more directly. But with either, rebasing before the PR should not matter, if I understand correctly, unless you want to edit the messages.
vsoch: +1 squash and merge ;)
mrbean-bremen: I'm ok with squash and merge - I only asked because that would not work if we want to separate the PEP8 commit from the interesting ones. In this case it would sense to rebase to 2 commits before making the PR, and then rwbase the commit.
And I'm also fairly new to git - used only svn (and cvs) before in my day job.
darcymason: > it would make sense to rebase to 2 commits before making the PR, and then rebase-merge the commit.
That sounds perfect. If ever going back to track something down in the history, it would be a lot cleaner to be able to skip over the mostly cosmetic pep8 changes.
mrbean-bremen: Ok, so in this case I can just rebase the current PR to have only 2 commits - I think nobody will mind.
mrbean-bremen: BTW - do you think it makes sense to handle PEP8 separately? E.g. just make one or a couple of separate PRs just for the cleanup, to be done with that? This is something anybody (like myself) could do, just have to make sure to get no conflicts with ongoing work.
vsoch: @mrbean-bremen yeah! Let me give a go at this later today. I kind of weirdly like that kind of work :)
mrbean-bremen: Well, in that case it's yours :) You just have to clarify with others working on the same code, and with our BDFL :P
darcymason: Can someone advise on how to pull this in cleanly? If I simply rebase and merge on gitub now, are the pep8 changes separated out? I know I could see this more easily by pulling to a local branch, but I'm hoping to do in github interface here.
mrbean-bremen: Yes, they are - a "Rebase and Merge" will leave the 2 commits separate.
darcymason: Okay, I've reviewed it. In the interest of getting pep8 changes in, I'll merge with just the my MRG+1. | diff --git a/pydicom/filereader.py b/pydicom/filereader.py
index 8735955f7..aa575f5a4 100644
--- a/pydicom/filereader.py
+++ b/pydicom/filereader.py
@@ -1,16 +1,17 @@
-# filereader.py
"""Read a dicom media file"""
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at https://github.com/darcymason/pydicom
from __future__ import absolute_import
+
# Need zlib and io.BytesIO for deflate-compressed file
import os.path
import warnings
import zlib
from io import BytesIO
+from pydicom.misc import size_in_bytes
from pydicom.tag import TupleTag
from pydicom.dataelem import RawDataElement
from pydicom.util.hexutil import bytes2hex
@@ -20,15 +21,8 @@ from pydicom.compat import in_py2
from pydicom import compat
from pydicom import config # don't import datetime_conversion directly
from pydicom.config import logger
-
-stat_available = True
-try:
- from os import stat
-except ImportError:
- stat_available = False
-
from pydicom.errors import InvalidDicomError
-import pydicom.uid # for Implicit/Explicit/Little/Big Endian transfer syntax UIDs
+import pydicom.uid # for transfer syntax UIDs
from pydicom.filebase import DicomFile
from pydicom.dataset import Dataset, FileDataset
from pydicom.dicomdir import DicomDir
@@ -39,12 +33,19 @@ from pydicom.sequence import Sequence
from pydicom.fileutil import read_undefined_length_value
from struct import Struct, unpack
from sys import byteorder
+
+try:
+ from os import stat
+except ImportError:
+ stat = None
+
sys_is_little_endian = (byteorder == 'little')
class DicomIter(object):
"""Iterator over DICOM data elements created from a file-like object
"""
+
def __init__(self, fp, stop_when=None, force=False):
"""Read the preamble and meta info and prepare iterator for remainder of file.
@@ -109,13 +110,15 @@ class DicomIter(object):
yield self.file_meta_info[tag]
for data_element in data_element_generator(self.fp,
- self._is_implicit_VR, self._is_little_endian,
+ self._is_implicit_VR,
+ self._is_little_endian,
stop_when=self.stop_when):
yield data_element
def data_element_generator(fp, is_implicit_VR, is_little_endian,
- stop_when=None, defer_size=None, encoding=default_encoding):
+ stop_when=None, defer_size=None,
+ encoding=default_encoding):
"""Create a generator to efficiently return the raw data elements.
Parameters
@@ -182,6 +185,7 @@ def data_element_generator(fp, is_implicit_VR, is_little_endian,
logger_debug = logger.debug
debugging = config.debugging
element_struct_unpack = element_struct.unpack
+ defer_size = size_in_bytes(defer_size)
while True:
# Read tag, VR, length, get ready to read value
@@ -233,9 +237,10 @@ def data_element_generator(fp, is_implicit_VR, is_little_endian,
# Reading the value
# First case (most common): reading a value with a defined length
if length != 0xFFFFFFFF:
- # don't defer loading of Specific Character Set value as it is needed
- # immediately to get the character encoding for other tags
- if defer_size is not None and length > defer_size and tag != (0x08, 0x05):
+ # don't defer loading of Specific Character Set value as it is
+ # needed immediately to get the character encoding for other tags
+ if defer_size is not None and length > defer_size and tag != (
+ 0x08, 0x05):
# Flag as deferred by setting value to None, and skip bytes
value = None
logger_debug("Defer size exceeded. "
@@ -248,13 +253,18 @@ def data_element_generator(fp, is_implicit_VR, is_little_endian,
if length > 12:
dotdot = "..."
logger_debug("%08x: %-34s %s %r %s" % (value_tell,
- bytes2hex(value[:12]), dotdot, value[:12], dotdot))
+ bytes2hex(
+ value[:12]),
+ dotdot,
+ value[:12], dotdot))
# If the tag is (0008,0005) Specific Character Set, then store it
if tag == (0x08, 0x05):
from pydicom.values import convert_string
- encoding = convert_string(value, is_little_endian, encoding=default_encoding)
- # Store the encoding value in the generator for use with future elements (SQs)
+ encoding = convert_string(value, is_little_endian,
+ encoding=default_encoding)
+ # Store the encoding value in the generator
+ # for use with future elements (SQs)
encoding = convert_encodings(encoding)
yield RawDataElement(tag, VR, length, value, value_tell,
@@ -273,7 +283,8 @@ def data_element_generator(fp, is_implicit_VR, is_little_endian,
try:
VR = dictionary_VR(tag)
except KeyError:
- # Look ahead to see if it consists of items and is thus a SQ
+ # Look ahead to see if it consists of items
+ # and is thus a SQ
next_tag = TupleTag(unpack(endian_chr + "HH", fp_read(4)))
# Rewind the file
fp.seek(fp_tell() - 4)
@@ -295,11 +306,14 @@ def data_element_generator(fp, is_implicit_VR, is_little_endian,
value = read_undefined_length_value(fp, is_little_endian,
delimiter, defer_size)
- # If the tag is (0008,0005) Specific Character Set, then store it
+ # If the tag is (0008,0005) Specific Character Set,
+ # then store it
if tag == (0x08, 0x05):
from pydicom.values import convert_string
- encoding = convert_string(value, is_little_endian, encoding=default_encoding)
- # Store the encoding value in the generator for use with future elements (SQs)
+ encoding = convert_string(value, is_little_endian,
+ encoding=default_encoding)
+ # Store the encoding value in the generator for use
+ # with future elements (SQs)
encoding = convert_encodings(encoding)
yield RawDataElement(tag, VR, length, value, value_tell,
@@ -307,7 +321,8 @@ def data_element_generator(fp, is_implicit_VR, is_little_endian,
def read_dataset(fp, is_implicit_VR, is_little_endian, bytelength=None,
- stop_when=None, defer_size=None, parent_encoding=default_encoding):
+ stop_when=None, defer_size=None,
+ parent_encoding=default_encoding):
"""Return a Dataset instance containing the next dataset in the file.
Parameters
@@ -389,7 +404,8 @@ def read_sequence(fp, is_implicit_VR, is_little_endian, bytelength, encoding,
return seq
-def read_sequence_item(fp, is_implicit_VR, is_little_endian, encoding, offset=0):
+def read_sequence_item(fp, is_implicit_VR, is_little_endian, encoding,
+ offset=0):
"""Read and return a single sequence item, i.e. a Dataset"""
seq_item_tell = fp.tell() + offset
if is_little_endian:
@@ -399,15 +415,17 @@ def read_sequence_item(fp, is_implicit_VR, is_little_endian, encoding, offset=0)
try:
bytes_read = fp.read(8)
group, element, length = unpack(tag_length_format, bytes_read)
- except:
+ except BaseException:
raise IOError("No tag to read at file position "
"{0:05x}".format(fp.tell() + offset))
tag = (group, element)
if tag == SequenceDelimiterTag: # No more items, time to stop reading
- logger.debug("{0:08x}: {1}".format(fp.tell() - 8 + offset, "End of Sequence"))
+ logger.debug(
+ "{0:08x}: {1}".format(fp.tell() - 8 + offset, "End of Sequence"))
if length != 0:
logger.warning("Expected 0x00000000 after delimiter, found 0x%x, "
- "at position 0x%x" % (length, fp.tell() - 4 + offset))
+ "at position 0x%x" % (
+ length, fp.tell() - 4 + offset))
return None
if tag != ItemTag:
logger.warning("Expected sequence item with tag %s at file position "
@@ -429,11 +447,12 @@ def read_sequence_item(fp, is_implicit_VR, is_little_endian, encoding, offset=0)
def _read_command_set_elements(fp):
- """Return a Dataset containing any Command Set (0000,eeee) elements in `fp`.
+ """Return a Dataset containing any Command Set (0000,eeee) elements
+ in `fp`.
Command Set elements are always Implicit VR Little Endian (as per PS3.7
- Section 6.3). Once any Command Set elements are read `fp` will be positioned
- at the start of the next group of elements.
+ Section 6.3). Once any Command Set elements are read `fp` will be
+ positioned at the start of the next group of elements.
Parameters
----------
@@ -446,6 +465,7 @@ def _read_command_set_elements(fp):
The command set elements as a Dataset instance. May be empty if no
command set elements are present.
"""
+
def _not_group_0000(tag, VR, length):
"""Return True if the tag is not in group 0x0000, False otherwise."""
return (tag.group != 0)
@@ -474,9 +494,10 @@ def _read_file_meta_info(fp):
The File Meta elements as a Dataset instance. May be empty if no
File Meta are present.
"""
+
def _not_group_0002(tag, VR, length):
"""Return True if the tag is not in group 0x0002, False otherwise."""
- return (tag.group != 2)
+ return tag.group != 2
start_file_meta = fp.tell()
file_meta = read_dataset(fp, is_implicit_VR=False, is_little_endian=True,
@@ -491,9 +512,9 @@ def _read_file_meta_info(fp):
logger.info("_read_file_meta_info: (0002,0000) 'File Meta "
"Information Group Length' value doesn't match the "
"actual File Meta Information length ({0} vs {1} "
- "bytes).".format(
- file_meta.FileMetaInformationGroupLength,
- length_file_meta))
+ "bytes)."
+ .format(file_meta.FileMetaInformationGroupLength,
+ length_file_meta))
return file_meta
@@ -512,11 +533,11 @@ def read_file_meta_info(filename):
def read_preamble(fp, force):
"""Return the 128-byte DICOM preamble in `fp` if present.
- `fp` should be positioned at the start of the file-like. If the preamble and
- prefix are found then after reading `fp` will be positioned at the first
- byte after the prefix (byte offset 133). If either the preamble or prefix
- are missing and `force` is True then after reading `fp` will be positioned
- at the start of the file-like.
+ `fp` should be positioned at the start of the file-like. If the preamble
+ and prefix are found then after reading `fp` will be positioned at the
+ first byte after the prefix (byte offset 133). If either the preamble or
+ prefix are missing and `force` is True then after reading `fp` will be
+ positioned at the start of the file-like.
Parameters
----------
@@ -551,10 +572,11 @@ def read_preamble(fp, force):
logger.debug("Reading File Meta Information prefix...")
magic = fp.read(4)
if magic != b"DICM" and force:
- logger.info("File is not conformant with the DICOM File Format: 'DICM' "
- "prefix is missing from the File Meta Information header "
- "or the header itself is missing. Assuming no header and "
- "continuing.")
+ logger.info(
+ "File is not conformant with the DICOM File Format: 'DICM' "
+ "prefix is missing from the File Meta Information header "
+ "or the header itself is missing. Assuming no header and "
+ "continuing.")
preamble = None
fp.seek(0)
elif magic != b"DICM" and not force:
@@ -598,13 +620,15 @@ def read_partial(fileobj, stop_when=None, defer_size=None, force=False):
read_file
More generic file reading function.
"""
- ## Read File Meta Information
+ # Read File Meta Information
+
# Read preamble (if present)
preamble = read_preamble(fileobj, force)
# Read any File Meta Information group (0002,eeee) elements (if present)
file_meta_dataset = _read_file_meta_info(fileobj)
- ## Read Dataset
+ # Read Dataset
+
# Read any Command Set group (0000,eeee) elements (if present)
command_set = _read_command_set_elements(fileobj)
@@ -612,14 +636,14 @@ def read_partial(fileobj, stop_when=None, defer_size=None, force=False):
peek = fileobj.read(1)
fileobj.seek(-1, 1)
- # `filobj` should be positioned at the start of the dataset by this point,
+ # `filobj` should be positioned at the start of the dataset by this point.
# Ensure we have appropriate values for `is_implicit_VR` and
- # `is_little_endian` before we try decoding. We assume an initial
- # transfer syntax of implicit VR little endian and correct it as necessary
+ # `is_little_endian` before we try decoding. We assume an initial
+ # transfer syntax of implicit VR little endian and correct it as necessary
is_implicit_VR = True
is_little_endian = True
transfer_syntax = file_meta_dataset.get("TransferSyntaxUID")
- if peek == b'': # EOF
+ if peek == b'': # EOF
pass
elif transfer_syntax is None: # issue 258
# If no TransferSyntaxUID element then we have to try and figure out
@@ -693,11 +717,12 @@ def read_partial(fileobj, stop_when=None, defer_size=None, force=False):
def read_file(fp, defer_size=None, stop_before_pixels=False, force=False):
"""Read and parse a DICOM dataset stored in the DICOM File Format.
- Read a DICOM dataset stored in accordance with the DICOM File Format (DICOM
- Standard Part 10 Section 7). If the dataset is not stored in accordance
- with the File Format (i.e. the preamble and prefix are missing, there are
- missing required Type 1 File Meta Information Group elements or the entire
- File Meta Information is missing) then you will have to set `force` to True.
+ Read a DICOM dataset stored in accordance with the DICOM File Format
+ (DICOM Standard Part 10 Section 7). If the dataset is not stored in
+ accordance with the File Format (i.e. the preamble and prefix are missing,
+ there are missing required Type 1 File Meta Information Group elements
+ or the entire File Meta Information is missing) then you will have to
+ set `force` to True.
Parameters
----------
@@ -772,10 +797,8 @@ def read_file(fp, defer_size=None, stop_before_pixels=False, force=False):
logger.debug("Caller passed file name")
logger.debug("-" * 80)
- # Convert size to defer reading into bytes, and store in file object
- # if defer_size is not None:
- # defer_size = size_in_bytes(defer_size)
- # fp.defer_size = defer_size
+ # Convert size to defer reading into bytes
+ defer_size = size_in_bytes(defer_size)
# Iterate through all items and store them --include file meta if present
stop_when = None
@@ -816,7 +839,8 @@ def read_dicomdir(filename="DICOMDIR"):
ds = read_file(filename)
# Here, check that it is in fact DicomDir
if not isinstance(ds, DicomDir):
- msg = u"File '{0}' is not a Media Storage Directory file".format(filename)
+ msg = u"File '{0}' is not a Media Storage Directory file".format(
+ filename)
raise InvalidDicomError(msg)
return ds
@@ -824,7 +848,7 @@ def read_dicomdir(filename="DICOMDIR"):
def data_element_offset_to_value(is_implicit_VR, VR):
"""Return number of bytes from start of data element to start of value"""
if is_implicit_VR:
- offset = 8 # tag of 4 plus 4-byte length
+ offset = 8 # tag of 4 plus 4-byte length
else:
if VR in extra_length_VRs:
offset = 12 # tag 4 + 2 VR + 2 reserved + 4 length
@@ -846,7 +870,7 @@ def read_deferred_data_element(fileobj_type, filename, timestamp,
if not os.path.exists(filename):
raise IOError(u"Deferred read -- original file "
"{0:s} is missing".format(filename))
- if stat_available and (timestamp is not None):
+ if stat is not None and (timestamp is not None):
statinfo = os.stat(filename)
if statinfo.st_mtime != timestamp:
warnings.warn("Deferred read warning -- file modification time "
@@ -867,10 +891,12 @@ def read_deferred_data_element(fileobj_type, filename, timestamp,
fp.close()
if data_elem.VR != raw_data_elem.VR:
raise ValueError("Deferred read VR {0:s} does not match "
- "original {1:s}".format(data_elem.VR, raw_data_elem.VR))
+ "original {1:s}".format(data_elem.VR,
+ raw_data_elem.VR))
if data_elem.tag != raw_data_elem.tag:
raise ValueError("Deferred read tag {0!r} does not match "
- "original {1!r}".format(data_elem.tag, raw_data_elem.tag))
+ "original {1!r}".format(data_elem.tag,
+ raw_data_elem.tag))
# Everything is ok, now this object should act like usual DataElement
return data_elem
diff --git a/pydicom/fileutil.py b/pydicom/fileutil.py
index 146fa8ebb..e3eab5c5d 100644
--- a/pydicom/fileutil.py
+++ b/pydicom/fileutil.py
@@ -1,11 +1,12 @@
-# fileutil.py
-"""Functions for reading to certain bytes, e.g. delimiters"""
+"""Functions for reading to certain bytes, e.g. delimiters."""
# Copyright (c) 2009-2012 Darcy Mason
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at https://github.com/darcymason/pydicom
from struct import pack, unpack
+
+from pydicom.misc import size_in_bytes
from pydicom.tag import TupleTag, Tag
from pydicom.datadict import dictionary_description
@@ -21,16 +22,20 @@ def absorb_delimiter_item(fp, is_little_endian, delimiter):
group, elem, length = unpack(struct_format, fp.read(8))
tag = TupleTag((group, elem))
if tag != delimiter:
- msg = "Did not find expected delimiter '%s'" % dictionary_description(delimiter)
- msg += ", instead found %s at file position 0x%x" % (str(tag), fp.tell() - 8)
+ msg = ("Did not find expected delimiter '%s'" %
+ dictionary_description(delimiter))
+ msg += ", instead found %s at file position 0x%x" % (
+ str(tag), fp.tell() - 8)
logger.warn(msg)
fp.seek(fp.tell() - 8)
return
- logger.debug("%04x: Found Delimiter '%s'", fp.tell() - 8, dictionary_description(delimiter))
+ logger.debug("%04x: Found Delimiter '%s'", fp.tell() - 8,
+ dictionary_description(delimiter))
if length == 0:
logger.debug("%04x: Read 0 bytes after delimiter", fp.tell() - 4)
else:
- logger.debug("%04x: Expected 0x00000000 after delimiter, found 0x%x", fp.tell() - 4, length)
+ logger.debug("%04x: Expected 0x00000000 after delimiter, found 0x%x",
+ fp.tell() - 4, length)
def find_bytes(fp, bytes_to_find, read_size=128, rewind=True):
@@ -38,6 +43,7 @@ def find_bytes(fp, bytes_to_find, read_size=128, rewind=True):
Parameters
----------
+ fp : file-like object
bytes_to_find : str
Contains the bytes to find. Must be in correct
endian order already.
@@ -56,25 +62,27 @@ def find_bytes(fp, bytes_to_find, read_size=128, rewind=True):
search_rewind = len(bytes_to_find) - 1
found = False
- EOF = False
+ eof = False
while not found:
chunk_start = fp.tell()
bytes_read = fp.read(read_size)
if len(bytes_read) < read_size:
- # try again - if still don't get required amount, this is last block
+ # try again - if still don't get required amount,
+ # this is the last block
new_bytes = fp.read(read_size - len(bytes_read))
bytes_read += new_bytes
if len(bytes_read) < read_size:
- EOF = True # but will still check whatever we did get
+ eof = True # but will still check whatever we did get
index = bytes_read.find(bytes_to_find)
if index != -1:
found = True
- elif EOF:
+ elif eof:
if rewind:
fp.seek(data_start)
return None
else:
- fp.seek(fp.tell() - search_rewind) # rewind a bit in case delimiter crossed read_size boundary
+ # rewind a bit in case delimiter crossed read_size boundary
+ fp.seek(fp.tell() - search_rewind)
# if get here then have found the byte string
found_at = chunk_start + index
if rewind:
@@ -84,18 +92,25 @@ def find_bytes(fp, bytes_to_find, read_size=128, rewind=True):
return found_at
-def read_undefined_length_value(fp, is_little_endian, delimiter_tag, defer_size=None,
+def read_undefined_length_value(fp, is_little_endian, delimiter_tag,
+ defer_size=None,
read_size=128):
- """Read until the delimiter tag found and return the value; ignore the delimiter.
+ """Read until the delimiter tag found and return the value;
+ ignore the delimiter.
- On completion, the file will be set to the first byte after the delimiter and its
- following four zero bytes.
+ On completion, the file will be set to the first byte after the delimiter
+ and its following four zero bytes.
Parameters
----------
fp : a file-like object
is_little_endian : boolean
True if file transfer syntax is little endian, else False.
+ delimiter_tag : BaseTag
+ tag used as and marker for reading
+ defer_size : int, None, optional
+ Size to avoid loading large elements in memory.
+ See ``filereader.read_file`` for more parameter info.
read_size : int
Number of bytes to read at one time.
@@ -119,18 +134,20 @@ def read_undefined_length_value(fp, is_little_endian, delimiter_tag, defer_size=
bytes_to_find = pack(bytes_format, delimiter_tag.group, delimiter_tag.elem)
found = False
- EOF = False
+ eof = False
value_chunks = []
+ defer_size = size_in_bytes(defer_size)
byte_count = 0 # for defer_size checks
while not found:
chunk_start = fp.tell()
bytes_read = fp.read(read_size)
if len(bytes_read) < read_size:
- # try again - if still don't get required amount, this is last block
+ # try again - if still don't get required amount,
+ # this is the last block
new_bytes = fp.read(read_size - len(bytes_read))
bytes_read += new_bytes
if len(bytes_read) < read_size:
- EOF = True # but will still check whatever we did get
+ eof = True # but will still check whatever we did get
index = bytes_read.find(bytes_to_find)
if index != -1:
found = True
@@ -141,26 +158,31 @@ def read_undefined_length_value(fp, is_little_endian, delimiter_tag, defer_size=
fp.seek(chunk_start + index + 4) # rewind to end of delimiter
length = fp.read(4)
if length != b"\0\0\0\0":
- msg = "Expected 4 zero bytes after undefined length delimiter at pos {0:04x}"
+ msg = ("Expected 4 zero bytes after undefined length delimiter"
+ " at pos {0:04x}")
logger.error(msg.format(fp.tell() - 4))
- elif EOF:
+ elif eof:
fp.seek(data_start)
- raise EOFError("End of file reached before delimiter {0!r} found".format(delimiter_tag))
+ raise EOFError(
+ "End of file reached before delimiter {0!r} found".format(
+ delimiter_tag))
else:
- fp.seek(fp.tell() - search_rewind) # rewind a bit in case delimiter crossed read_size boundary
+ # rewind a bit in case delimiter crossed read_size boundary
+ fp.seek(fp.tell() - search_rewind)
# accumulate the bytes read (not including the rewind)
new_bytes = bytes_read[:-search_rewind]
byte_count += len(new_bytes)
if defer_size is None or byte_count < defer_size:
value_chunks.append(new_bytes)
# if get here then have found the byte string
- if defer_size is not None and defer_size >= defer_size:
+ if defer_size is not None and byte_count >= defer_size:
return None
else:
return b"".join(value_chunks)
-def find_delimiter(fp, delimiter, is_little_endian, read_size=128, rewind=True):
+def find_delimiter(fp, delimiter, is_little_endian, read_size=128,
+ rewind=True):
"""Return file position where 4-byte delimiter is located.
Parameters
@@ -181,13 +203,15 @@ def find_delimiter(fp, delimiter, is_little_endian, read_size=128, rewind=True):
if not is_little_endian:
struct_format = ">H"
delimiter = Tag(delimiter)
- bytes_to_find = pack(struct_format, delimiter.group) + pack(struct_format, delimiter.elem)
+ bytes_to_find = pack(struct_format, delimiter.group) + pack(struct_format,
+ delimiter.elem)
return find_bytes(fp, bytes_to_find, read_size=read_size, rewind=rewind)
-def length_of_undefined_length(fp, delimiter, is_little_endian, read_size=128, rewind=True):
- """Search through the file to find the delimiter and return the length of the data
- element.
+def length_of_undefined_length(fp, delimiter, is_little_endian, read_size=128,
+ rewind=True):
+ """Search through the file to find the delimiter and return the length
+ of the data element.
Parameters
----------
@@ -206,11 +230,12 @@ def length_of_undefined_length(fp, delimiter, is_little_endian, read_size=128, r
Notes
-----
- Note the data element that the delimiter starts is not read here, the calling
- routine must handle that. Delimiter must be 4 bytes long.
+ Note the data element that the delimiter starts is not read here,
+ the calling routine must handle that. Delimiter must be 4 bytes long.
"""
data_start = fp.tell()
- delimiter_pos = find_delimiter(fp, delimiter, is_little_endian, rewind=rewind)
+ delimiter_pos = find_delimiter(fp, delimiter, is_little_endian,
+ rewind=rewind)
length = delimiter_pos - data_start
return length
@@ -222,7 +247,9 @@ def read_delimiter_item(fp, delimiter):
"""
found = fp.read(4)
if found != delimiter:
- logger.warn("Expected delimitor %s, got %s at file position 0x%x", Tag(delimiter), Tag(found), fp.tell() - 4)
+ logger.warn("Expected delimitor %s, got %s at file position 0x%x",
+ Tag(delimiter), Tag(found), fp.tell() - 4)
length = fp.read_UL()
if length != 0:
- logger.warn("Expected delimiter item to have length 0, got %d at file position 0x%x", length, fp.tell() - 4)
+ logger.warn("Expected delimiter item to have length 0, "
+ "got %d at file position 0x%x", length, fp.tell() - 4)
diff --git a/pydicom/misc.py b/pydicom/misc.py
index 2540a0b47..5e5e5c4f8 100644
--- a/pydicom/misc.py
+++ b/pydicom/misc.py
@@ -12,6 +12,8 @@ _size_factors = dict(KB=1024, MB=1024 * 1024, GB=1024 * 1024 * 1024)
def size_in_bytes(expr):
"""Return the number of bytes for a defer_size argument to read_file()
"""
+ if expr is None:
+ return None
try:
return int(expr)
except ValueError:
@@ -20,13 +22,15 @@ def size_in_bytes(expr):
val = float(expr[:-2]) * _size_factors[unit]
return val
else:
- raise ValueError("Unable to parse length with unit '{0:s}'".format(unit))
+ raise ValueError(
+ "Unable to parse length with unit '{0:s}'".format(unit))
def is_dicom(file):
"""Boolean specifying if file is a proper DICOM file.
- This function is a pared down version of read_preamble meant for a fast return.
+ This function is a pared down version of read_preamble meant for a
+ fast return.
The file is read for a proper preamble ('DICM'), returning True if so,
and False otherwise. This is a conservative approach.
@@ -45,9 +49,6 @@ def is_dicom(file):
raise IOError("File passed was not a valid file")
# TODO: error is only in Py3; what's a better Py2/3 error?
fp = open(file, 'rb')
- preamble = fp.read(0x80)
+ fp.read(0x80) # preamble
magic = fp.read(4)
- if magic == b"DICM":
- return True
- else:
- return False
+ return magic == b"DICM"
diff --git a/pydicom/util/leanread.py b/pydicom/util/leanread.py
index 59df2292f..296e9777c 100644
--- a/pydicom/util/leanread.py
+++ b/pydicom/util/leanread.py
@@ -1,10 +1,11 @@
-# leanread.py
"""Read a dicom media file"""
# Copyright (c) 2013 Darcy Mason
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at https://github.com/darcymason/pydicom
+from pydicom.misc import size_in_bytes
+from struct import Struct, unpack
extra_length_VRs_b = (b'OB', b'OW', b'OF', b'SQ', b'UN', b'UT')
ExplicitVRLittleEndian = b'1.2.840.10008.1.2.1'
@@ -17,9 +18,6 @@ ItemDelimiterTag = 0xFFFEE00D # end of Sequence Item
SequenceDelimiterTag = 0xFFFEE0DD # end of Sequence of undefined length
-from struct import Struct, unpack
-
-
class dicomfile(object):
"""Context-manager based DICOM file object with data element iteration"""
@@ -46,7 +44,8 @@ class dicomfile(object):
# Yield the file meta info elements
file_meta_gen = data_element_generator(self.fobj, is_implicit_VR=False,
is_little_endian=True,
- stop_when=lambda gp, elem: gp != 2)
+ stop_when=lambda gp,
+ elem: gp != 2)
for data_elem in file_meta_gen:
if data_elem[0] == (0x0002, 0x0010):
transfer_syntax_uid = data_elem[3]
@@ -57,12 +56,14 @@ class dicomfile(object):
if transfer_syntax_uid.endswith(b' ') or \
transfer_syntax_uid.endswith(b'\0'):
transfer_syntax_uid = transfer_syntax_uid[:-1]
- is_implicit_VR, is_little_endian = transfer_syntax(transfer_syntax_uid)
+ is_implicit_VR, is_little_endian = transfer_syntax(
+ transfer_syntax_uid)
# print is_implicit_VR
else:
raise NotImplementedError("No transfer syntax in file meta info")
- ds_gen = data_element_generator(self.fobj, is_implicit_VR, is_little_endian)
+ ds_gen = data_element_generator(self.fobj, is_implicit_VR,
+ is_little_endian)
for data_elem in ds_gen:
yield data_elem
@@ -114,6 +115,7 @@ def data_element_generator(fp, is_implicit_VR, is_little_endian,
fp_read = fp.read
fp_tell = fp.tell
element_struct_unpack = element_struct.unpack
+ defer_size = size_in_bytes(defer_size)
while True:
# Read tag, VR, length, get ready to read value
@@ -166,7 +168,8 @@ def data_element_generator(fp, is_implicit_VR, is_little_endian,
try:
VR = dictionary_VR(tag)
except KeyError:
- # Look ahead to see if it consists of items and is thus a SQ
+ # Look ahead to see if it consists of items and
+ # is thus a SQ
next_tag = TupleTag(unpack(endian_chr + "HH", fp_read(4)))
# Rewind the file
fp.seek(fp_tell() - 4)
@@ -180,7 +183,8 @@ def data_element_generator(fp, is_implicit_VR, is_little_endian,
# yield DataElement(tag, VR, seq, value_tell,
# is_undefined_length=True)
else:
- raise NotImplementedError("This reader does not handle undefined length except for SQ")
+ raise NotImplementedError("This reader does not handle "
+ "undefined length except for SQ")
from pydicom.fileio.fileutil import read_undefined_length_value
delimiter = SequenceDelimiterTag
| defer_size of type str are not taken into account
When calling `read_file` with `defer_size='512 KB'`, the conversion of `defer_size` to `Integer` is never performed.
On version 0.9.9 with python3, this leads to
```
File "/usr/lib/python3.4/site-packages/dicom/filereader.py", line 614, in read_file
force=force)
File "/usr/lib/python3.4/site-packages/dicom/filereader.py", line 553, in read_partial
stop_when=stop_when, defer_size=defer_size)
File "/usr/lib/python3.4/site-packages/dicom/filereader.py", line 305, in read_dataset
raw_data_element = next(de_gen)
File "/usr/lib/python3.4/site-packages/dicom/filereader.py", line 212, in data_element_generator
if defer_size is not None and length > defer_size:
TypeError: unorderable types: int() > str()
```
There is no exception in python2 because comparing a `str` to an `Integer` always give the same result :
```
>>> 10000000 > '512 KB'
False
```
Nevertheless, `defer_size` of type `str` are not taken into account in python2.
| pydicom/pydicom | diff --git a/tests/test_filereader.py b/tests/test_filereader.py
index 7a07748c5..57d634ddf 100644
--- a/tests/test_filereader.py
+++ b/tests/test_filereader.py
@@ -14,47 +14,24 @@ import shutil
import sys
import tempfile
import unittest
-from pydicom.util.testing.warncheck import assertWarns
-
-try:
- unittest.skipUnless
-except AttributeError:
- try:
- import unittest2 as unittest
- except ImportError:
- print("unittest2 is required for testing in python2.6")
# os.stat is only available on Unix and Windows XXX Mac?
# Not sure if on other platforms the import fails, or the call to it??
-stat_available = True
try:
from os import stat # NOQA
-except:
- stat_available = False
+except ImportError:
+ stat = None
-have_numpy = True
try:
import numpy # NOQA
-except:
- have_numpy = False
-
-from pydicom.dataset import Dataset, FileDataset
-from pydicom.dataelem import DataElement
-from pydicom.filebase import DicomBytesIO
-from pydicom.filereader import read_file, data_element_generator
-from pydicom.errors import InvalidDicomError
-from pydicom.dataset import PropertyError
-from pydicom.tag import Tag, TupleTag
-from pydicom.uid import ImplicitVRLittleEndian
-import pydicom.valuerep
+except ImportError:
+ numpy = None
-have_jpeg_ls = True
try:
import jpeg_ls
except ImportError:
- have_jpeg_ls = False
+ jpeg_ls = None
-have_pillow = True
try:
from PIL import Image as PILImg
except ImportError:
@@ -63,13 +40,26 @@ except ImportError:
import Image as PILImg
except ImportError:
# Neither worked, so it's likely not installed.
- have_pillow = False
+ PILImg = None
+
+from pydicom.dataset import Dataset, FileDataset
+from pydicom.dataelem import DataElement
+from pydicom.filereader import read_file
+from pydicom.errors import InvalidDicomError
+from pydicom.tag import Tag, TupleTag
+from pydicom.uid import ImplicitVRLittleEndian
+from pydicom.util.testing.warncheck import assertWarns
+import pydicom.valuerep
+have_numpy = numpy is not None
+have_jpeg_ls = jpeg_ls is not None
+have_pillow = PILImg is not None
test_dir = os.path.dirname(__file__)
test_files = os.path.join(test_dir, 'test_files')
-empty_number_tags_name = os.path.join(test_files, "reportsi_with_empty_number_tags.dcm")
+empty_number_tags_name = os.path.join(test_files,
+ "reportsi_with_empty_number_tags.dcm")
rtplan_name = os.path.join(test_files, "rtplan.dcm")
rtdose_name = os.path.join(test_files, "rtdose.dcm")
ct_name = os.path.join(test_files, "CT_small.dcm")
@@ -77,14 +67,16 @@ mr_name = os.path.join(test_files, "MR_small.dcm")
truncated_mr_name = os.path.join(test_files, "MR_truncated.dcm")
jpeg2000_name = os.path.join(test_files, "JPEG2000.dcm")
jpeg2000_lossless_name = os.path.join(test_files, "MR_small_jp2klossless.dcm")
-jpeg_ls_lossless_name = os.path.join(test_files, "MR_small_jpeg_ls_lossless.dcm")
+jpeg_ls_lossless_name = os.path.join(test_files,
+ "MR_small_jpeg_ls_lossless.dcm")
jpeg_lossy_name = os.path.join(test_files, "JPEG-lossy.dcm")
jpeg_lossless_name = os.path.join(test_files, "JPEG-LL.dcm")
deflate_name = os.path.join(test_files, "image_dfl.dcm")
rtstruct_name = os.path.join(test_files, "rtstruct.dcm")
priv_SQ_name = os.path.join(test_files, "priv_SQ.dcm")
nested_priv_SQ_name = os.path.join(test_files, "nested_priv_SQ.dcm")
-meta_missing_tsyntax_name = os.path.join(test_files, "meta_missing_tsyntax.dcm")
+meta_missing_tsyntax_name = os.path.join(test_files,
+ "meta_missing_tsyntax.dcm")
no_meta_group_length = os.path.join(test_files, "no_meta_group_length.dcm")
gzip_name = os.path.join(test_files, "zipMR.gz")
color_px_name = os.path.join(test_files, "color-px.dcm")
@@ -93,8 +85,10 @@ explicit_vr_le_no_meta = os.path.join(test_files, "ExplVR_LitEndNoMeta.dcm")
explicit_vr_be_no_meta = os.path.join(test_files, "ExplVR_BigEndNoMeta.dcm")
emri_name = os.path.join(test_files, "emri_small.dcm")
emri_big_endian_name = os.path.join(test_files, "emri_small_big_endian.dcm")
-emri_jpeg_ls_lossless = os.path.join(test_files, "emri_small_jpeg_ls_lossless.dcm")
-emri_jpeg_2k_lossless = os.path.join(test_files, "emri_small_jpeg_2k_lossless.dcm")
+emri_jpeg_ls_lossless = os.path.join(test_files,
+ "emri_small_jpeg_ls_lossless.dcm")
+emri_jpeg_2k_lossless = os.path.join(test_files,
+ "emri_small_jpeg_2k_lossless.dcm")
color_3d_jpeg_baseline = os.path.join(test_files, "color3d_jpeg_baseline.dcm")
dir_name = os.path.dirname(sys.argv[0])
save_dir = os.getcwd()
@@ -104,7 +98,7 @@ def isClose(a, b, epsilon=0.000001):
"""Compare within some tolerance, to avoid machine roundoff differences"""
try:
a.append # see if is a list
- except: # (is not)
+ except BaseException: # (is not)
return abs(a - b) < epsilon
else:
if len(a) != len(b):
@@ -117,7 +111,8 @@ def isClose(a, b, epsilon=0.000001):
class ReaderTests(unittest.TestCase):
def testEmptyNumbersTag(self):
- """Tests that an empty tag with a number VR (FL, UL, SL, US, SS, FL, FD, OF) reads as an empty string"""
+ """Tests that an empty tag with a number VR (FL, UL, SL, US,
+ SS, FL, FD, OF) reads as an empty string"""
empty_number_tags_ds = read_file(empty_number_tags_name)
self.assertEqual(empty_number_tags_ds.ExaminedBodyThickness, '')
self.assertEqual(empty_number_tags_ds.SimpleFrameList, '')
@@ -135,56 +130,72 @@ class ReaderTests(unittest.TestCase):
self.assertTrue(ds is not None)
def testRTPlan(self):
- """Returns correct values for sample data elements in test RT Plan file"""
+ """Returns correct values for sample data elements in test
+ RT Plan file.
+ """
plan = read_file(rtplan_name)
beam = plan.BeamSequence[0]
- cp0, cp1 = beam.ControlPointSequence # if not two controlpoints, then this would raise exception
+ # if not two controlpoints, then this would raise exception
+ cp0, cp1 = beam.ControlPointSequence
- self.assertEqual(beam.TreatmentMachineName, "unit001", "Incorrect unit name")
+ self.assertEqual(beam.TreatmentMachineName, "unit001",
+ "Incorrect unit name")
self.assertEqual(beam.TreatmentMachineName, beam[0x300a, 0x00b2].value,
- "beam TreatmentMachineName does not match the value accessed by tag number")
+ "beam TreatmentMachineName does not match "
+ "the value accessed by tag number")
- got = cp1.ReferencedDoseReferenceSequence[0].CumulativeDoseReferenceCoefficient
+ got = cp1.ReferencedDoseReferenceSequence[
+ 0].CumulativeDoseReferenceCoefficient
DS = pydicom.valuerep.DS
expected = DS('0.9990268')
self.assertTrue(got == expected,
- "Cum Dose Ref Coeff not the expected value (CP1, Ref'd Dose Ref")
+ "Cum Dose Ref Coeff not the expected value "
+ "(CP1, Ref'd Dose Ref")
got = cp0.BeamLimitingDevicePositionSequence[0].LeafJawPositions
self.assertTrue(got[0] == DS('-100') and got[1] == DS('100.0'),
"X jaws not as expected (control point 0)")
def testRTDose(self):
- """Returns correct values for sample data elements in test RT Dose file"""
+ """Returns correct values for sample data elements in test
+ RT Dose file"""
dose = read_file(rtdose_name)
self.assertEqual(dose.FrameIncrementPointer, Tag((0x3004, 0x000c)),
"Frame Increment Pointer not the expected value")
self.assertEqual(dose.FrameIncrementPointer, dose[0x28, 9].value,
- "FrameIncrementPointer does not match the value accessed by tag number")
+ "FrameIncrementPointer does not match the value "
+ "accessed by tag number")
- # try a value that is nested the deepest (so deep I break it into two steps!)
- fract = dose.ReferencedRTPlanSequence[0].ReferencedFractionGroupSequence[0]
+ # try a value that is nested the deepest
+ # (so deep I break it into two steps!)
+ fract = \
+ dose.ReferencedRTPlanSequence[0].ReferencedFractionGroupSequence[0]
beamnum = fract.ReferencedBeamSequence[0].ReferencedBeamNumber
self.assertEqual(beamnum, 1, "Beam number not the expected value")
def testCT(self):
- """Returns correct values for sample data elements in test CT file...."""
+ """Returns correct values for sample data elements in test CT file."""
ct = read_file(ct_name)
- self.assertEqual(ct.file_meta.ImplementationClassUID, '1.3.6.1.4.1.5962.2',
+ self.assertEqual(ct.file_meta.ImplementationClassUID,
+ '1.3.6.1.4.1.5962.2',
"ImplementationClassUID not the expected value")
self.assertEqual(ct.file_meta.ImplementationClassUID,
ct.file_meta[0x2, 0x12].value,
- "ImplementationClassUID does not match the value accessed by tag number")
- # (0020, 0032) Image Position (Patient) [-158.13580300000001, -179.035797, -75.699996999999996]
+ "ImplementationClassUID does not match the value "
+ "accessed by tag number")
+ # (0020, 0032) Image Position (Patient)
+ # [-158.13580300000001, -179.035797, -75.699996999999996]
got = ct.ImagePositionPatient
DS = pydicom.valuerep.DS
expected = [DS('-158.135803'), DS('-179.035797'), DS('-75.699997')]
- self.assertTrue(got == expected, "ImagePosition(Patient) values not as expected."
+ self.assertTrue(got == expected,
+ "ImagePosition(Patient) values not as expected."
"got {0}, expected {1}".format(got, expected))
self.assertEqual(ct.Rows, 128, "Rows not 128")
self.assertEqual(ct.Columns, 128, "Columns not 128")
self.assertEqual(ct.BitsStored, 16, "Bits Stored not 16")
- self.assertEqual(len(ct.PixelData), 128 * 128 * 2, "Pixel data not expected length")
+ self.assertEqual(len(ct.PixelData), 128 * 128 * 2,
+ "Pixel data not expected length")
# Also test private elements name can be resolved:
expected = "[Duration of X-ray on]"
@@ -194,20 +205,24 @@ class ReaderTests(unittest.TestCase):
@unittest.skipUnless(have_numpy, "Numpy not installed")
def testCTPixelData(self):
- """Check that we can read pixel data. Tests that we get last one in array."""
+ """Check that we can read pixel data.
+ Tests that we get last one in array.
+ """
ct = read_file(ct_name)
expected = 909
got = ct.pixel_array[-1][-1]
- msg = "Did not get correct value for last pixel: expected %d, got %r" % (expected, got)
+ msg = ("Did not get correct value for last pixel: "
+ "expected %d, got %r" % (expected, got))
self.assertEqual(expected, got, msg)
def testNoForce(self):
- """Raises exception if missing DICOM header and force==False..........."""
+ """Raises exception if missing DICOM header and force==False."""
self.assertRaises(InvalidDicomError, read_file, rtstruct_name)
def testRTstruct(self):
- """Returns correct values for sample elements in test RTSTRUCT file...."""
- # RTSTRUCT test file has complex nested sequences -- see rtstruct.dump file
+ """Returns correct values for sample elements in test RTSTRUCT file."""
+ # RTSTRUCT test file has complex nested sequences
+ # -- see rtstruct.dump file
# Also has no DICOM header ... so tests 'force' argument of read_file
rtss = read_file(rtstruct_name, force=True)
@@ -230,7 +245,8 @@ class ReaderTests(unittest.TestCase):
self.assertEqual(expected, got, msg)
def testDir(self):
- """Returns correct dir attributes for both Dataset and DICOM names (python >= 2.6).."""
+ """Returns correct dir attributes for both Dataset and DICOM names
+ (python >= 2.6).."""
# Only python >= 2.6 calls __dir__ for dir() call
rtss = read_file(rtstruct_name, force=True)
# sample some expected 'dir' values
@@ -238,7 +254,8 @@ class ReaderTests(unittest.TestCase):
expect_in_dir = ['pixel_array', 'add_new', 'ROIContourSequence',
'StructureSetDate']
for name in expect_in_dir:
- self.assertTrue(name in got_dir, "Expected name '%s' in dir()" % name)
+ self.assertTrue(name in got_dir,
+ "Expected name '%s' in dir()" % name)
# Now check for some items in dir() of a nested item
roi0 = rtss.ROIContourSequence[0]
@@ -246,52 +263,65 @@ class ReaderTests(unittest.TestCase):
expect_in_dir = ['pixel_array', 'add_new', 'ReferencedROINumber',
'ROIDisplayColor']
for name in expect_in_dir:
- self.assertTrue(name in got_dir, "Expected name '%s' in dir()" % name)
+ self.assertTrue(name in got_dir,
+ "Expected name '%s' in dir()" % name)
def testMR(self):
- """Returns correct values for sample data elements in test MR file....."""
+ """Returns correct values for sample data elements in test MR file."""
mr = read_file(mr_name)
# (0010, 0010) Patient's Name 'CompressedSamples^MR1'
mr.decode()
- self.assertEqual(mr.PatientName, 'CompressedSamples^MR1', "Wrong patient name")
+ self.assertEqual(mr.PatientName, 'CompressedSamples^MR1',
+ "Wrong patient name")
self.assertEqual(mr.PatientName, mr[0x10, 0x10].value,
- "Name does not match value found when accessed by tag number")
+ "Name does not match value found when "
+ "accessed by tag number")
got = mr.PixelSpacing
DS = pydicom.valuerep.DS
expected = [DS('0.3125'), DS('0.3125')]
self.assertTrue(got == expected, "Wrong pixel spacing")
def testDeflate(self):
- """Returns correct values for sample data elements in test compressed (zlib deflate) file"""
- # Everything after group 2 is compressed. If we can read anything else, the decompression must have been ok.
+ """Returns correct values for sample data elements in test compressed
+ (zlib deflate) file
+ """
+ # Everything after group 2 is compressed.
+ # If we can read anything else, the decompression must have been ok.
ds = read_file(deflate_name)
got = ds.ConversionType
expected = "WSD"
- self.assertEqual(got, expected, "Attempted to read deflated file data element Conversion Type, expected '%s', got '%s'" % (expected, got))
+ self.assertEqual(got, expected,
+ "Attempted to read deflated file data element "
+ "Conversion Type, expected '%s', got '%s'" % (
+ expected, got))
def testNoPixelsRead(self):
- """Returns all data elements before pixels using stop_before_pixels=False"""
+ """Returns all data elements before pixels using
+ stop_before_pixels=False.
+ """
# Just check the tags, and a couple of values
ctpartial = read_file(ct_name, stop_before_pixels=True)
ctpartial_tags = sorted(ctpartial.keys())
ctfull = read_file(ct_name)
ctfull_tags = sorted(ctfull.keys())
- msg = "Tag list of partial CT read (except pixel tag and padding) did not match full read"
+ msg = ("Tag list of partial CT read (except pixel tag and padding) "
+ "did not match full read")
msg += "\nExpected: %r\nGot %r" % (ctfull_tags[:-2], ctpartial_tags)
missing = [Tag(0x7fe0, 0x10), Tag(0xfffc, 0xfffc)]
self.assertEqual(ctfull_tags, ctpartial_tags + missing, msg)
def testPrivateSQ(self):
- """Can read private undefined length SQ without error...................."""
- # From issues 91, 97, 98. Bug introduced by fast reading, due to VR=None
- # in raw data elements, then an undefined length private item VR is looked up,
- # and there is no such tag, generating an exception
+ """Can read private undefined length SQ without error."""
+ # From issues 91, 97, 98. Bug introduced by fast reading, due to
+ # VR=None in raw data elements, then an undefined length private
+ # item VR is looked up, and there is no such tag,
+ # generating an exception
# Simply read the file, in 0.9.5 this generated an exception
read_file(priv_SQ_name)
def testNestedPrivateSQ(self):
- """Can successfully read a private SQ which contains additional SQ's....."""
+ """Can successfully read a private SQ which contains additional SQs."""
# From issue 113. When a private SQ of undefined length is used, the
# sequence is read in and the length of the SQ is determined upon
# identification of the SQ termination sequence. When using nested
@@ -303,7 +333,8 @@ class ReaderTests(unittest.TestCase):
# Make sure that the entire dataset was read in
pixel_data_tag = TupleTag((0x7fe0, 0x10))
self.assertTrue(pixel_data_tag in ds,
- "Entire dataset was not parsed properly. PixelData is not present")
+ "Entire dataset was not parsed properly. "
+ "PixelData is not present")
# Check that the DataElement is indeed a Sequence
tag = TupleTag((0x01, 0x01))
@@ -328,44 +359,54 @@ class ReaderTests(unittest.TestCase):
"Expected a value of %s, got %s'" % (expected, got))
def testNoMetaGroupLength(self):
- """Read file with no group length in file meta..........................."""
+ """Read file with no group length in file meta."""
# Issue 108 -- iView example file with no group length (0002,0002)
# Originally crashed, now check no exception, but also check one item
# in file_meta, and second one in followinsg dataset
ds = read_file(no_meta_group_length)
got = ds.InstanceCreationDate
expected = "20111130"
- self.assertEqual(got, expected, "Sample data element after file meta with no group length failed, expected '%s', got '%s'" % (expected, got))
+ self.assertEqual(got, expected,
+ "Sample data element after file meta with no "
+ "group length failed, expected '%s', got '%s'" % (
+ expected, got))
def testNoTransferSyntaxInMeta(self):
- """Read file with file_meta, but has no TransferSyntaxUID in it............"""
+ """Read file with file_meta, but has no TransferSyntaxUID in it."""
# From issue 258: if file has file_meta but no TransferSyntaxUID in it,
# should assume default transfer syntax
- ds = read_file(meta_missing_tsyntax_name) # is dicom default transfer syntax
+ ds = read_file(
+ meta_missing_tsyntax_name) # is dicom default transfer syntax
# Repeat one test from nested private sequence test to maker sure
# file was read correctly
pixel_data_tag = TupleTag((0x7fe0, 0x10))
self.assertTrue(pixel_data_tag in ds,
- "Failed to properly read a file with no Transfer Syntax in file_meta")
+ "Failed to properly read a file with no "
+ "Transfer Syntax in file_meta")
def testExplicitVRLittleEndianNoMeta(self):
- """Read file without file meta with Little Endian Explicit VR dataset...."""
+ """Read file without file meta with Little Endian Explicit VR dataset.
+ """
# Example file from CMS XiO 5.0 and above
# Still need to force read data since there is no 'DICM' marker present
ds = read_file(explicit_vr_le_no_meta, force=True)
got = ds.InstanceCreationDate
expected = "20150529"
- self.assertEqual(got, expected, "Sample data element from dataset failed, expected '%s', got '%s'" % (expected, got))
+ self.assertEqual(got, expected,
+ "Sample data element from dataset failed, "
+ "expected '%s', got '%s'" % (expected, got))
def testExplicitVRBigEndianNoMeta(self):
- """Read file without file meta with Big Endian Explicit VR dataset......."""
+ """Read file without file meta with Big Endian Explicit VR dataset."""
# Example file from CMS XiO 5.0 and above
# Still need to force read data since there is no 'DICM' marker present
ds = read_file(explicit_vr_be_no_meta, force=True)
got = ds.InstanceCreationDate
expected = "20150529"
- self.assertEqual(got, expected, "Sample data element from dataset failed, expected '%s', got '%s'" % (expected, got))
+ self.assertEqual(got, expected,
+ "Sample data element from dataset failed, "
+ "expected '%s', got '%s'" % (expected, got))
def testPlanarConfig(self):
px_data_ds = read_file(color_px_name)
@@ -407,7 +448,8 @@ class ReaderTests(unittest.TestCase):
self.assertEqual(ds[0x7fe00010].VR, 'OB')
def test_long_specific_char_set(self):
- """Test that specific character set is read even if it is longer than defer_size"""
+ """Test that specific character set is read even if it is longer
+ than defer_size"""
ds = Dataset()
long_specific_char_set_value = ['ISO 2022IR 100'] * 9
@@ -422,52 +464,62 @@ class ReaderTests(unittest.TestCase):
def test_no_preamble_file_meta_dataset(self):
"""Test correct read of group 2 elements with no preamble."""
- bytestream = b'\x02\x00\x02\x00\x55\x49\x16\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31' \
- b'\x30\x30\x30\x38\x2e\x35\x2e\x31\x2e\x31\x2e\x39\x00\x02\x00\x10\x00' \
- b'\x55\x49\x12\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30\x38' \
- b'\x2e\x31\x2e\x32\x00\x20\x20\x10\x00\x02\x00\x00\x00\x01\x00\x20\x20' \
- b'\x20\x00\x06\x00\x00\x00\x4e\x4f\x52\x4d\x41\x4c'
+ bytestream = (b'\x02\x00\x02\x00\x55\x49\x16\x00\x31\x2e\x32\x2e'
+ b'\x38\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x35\x2e'
+ b'\x31\x2e\x31\x2e\x39\x00\x02\x00\x10\x00\x55\x49'
+ b'\x12\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30'
+ b'\x30\x30\x38\x2e\x31\x2e\x32\x00\x20\x20\x10\x00'
+ b'\x02\x00\x00\x00\x01\x00\x20\x20\x20\x00\x06\x00'
+ b'\x00\x00\x4e\x4f\x52\x4d\x41\x4c')
fp = BytesIO(bytestream)
ds = read_file(fp, force=True)
self.assertTrue('MediaStorageSOPClassUID' in ds.file_meta)
- self.assertEqual(ds.file_meta.TransferSyntaxUID, ImplicitVRLittleEndian)
+ self.assertEqual(ds.file_meta.TransferSyntaxUID,
+ ImplicitVRLittleEndian)
self.assertEqual(ds.Polarity, 'NORMAL')
self.assertEqual(ds.ImageBoxPosition, 1)
def test_no_preamble_command_group_dataset(self):
"""Test correct read of group 0 and 2 elements with no preamble."""
- bytestream = b'\x02\x00\x02\x00\x55\x49\x16\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31' \
- b'\x30\x30\x30\x38\x2e\x35\x2e\x31\x2e\x31\x2e\x39\x00\x02\x00\x10\x00' \
- b'\x55\x49\x12\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30\x38' \
- b'\x2e\x31\x2e\x32\x00' \
- b'\x20\x20\x10\x00\x02\x00\x00\x00\x01\x00\x20\x20' \
- b'\x20\x00\x06\x00\x00\x00\x4e\x4f\x52\x4d\x41\x4c' \
- b'\x00\x00\x10\x01\x02\x00\x00\x00\x03\x00'
+ bytestream = (b'\x02\x00\x02\x00\x55\x49\x16\x00\x31\x2e\x32\x2e'
+ b'\x38\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x35\x2e'
+ b'\x31\x2e\x31\x2e\x39\x00\x02\x00\x10\x00\x55\x49'
+ b'\x12\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30'
+ b'\x30\x30\x38\x2e\x31\x2e\x32\x00'
+ b'\x20\x20\x10\x00\x02\x00\x00\x00\x01\x00\x20\x20'
+ b'\x20\x00\x06\x00\x00\x00\x4e\x4f\x52\x4d\x41\x4c'
+ b'\x00\x00\x10\x01\x02\x00\x00\x00\x03\x00')
fp = BytesIO(bytestream)
ds = read_file(fp, force=True)
self.assertTrue('MediaStorageSOPClassUID' in ds.file_meta)
- self.assertEqual(ds.file_meta.TransferSyntaxUID, ImplicitVRLittleEndian)
+ self.assertEqual(ds.file_meta.TransferSyntaxUID,
+ ImplicitVRLittleEndian)
self.assertEqual(ds.Polarity, 'NORMAL')
self.assertEqual(ds.ImageBoxPosition, 1)
self.assertEqual(ds.MessageID, 3)
def test_group_length_wrong(self):
- """Test file is read correctly even if FileMetaInformationGroupLength is incorrect."""
- bytestream = b'\x02\x00\x00\x00\x55\x4C\x04\x00\x0A\x00\x00\x00' \
- b'\x02\x00\x02\x00\x55\x49\x16\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31' \
- b'\x30\x30\x30\x38\x2e\x35\x2e\x31\x2e\x31\x2e\x39\x00\x02\x00\x10\x00' \
- b'\x55\x49\x12\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30\x38' \
- b'\x2e\x31\x2e\x32\x00' \
- b'\x20\x20\x10\x00\x02\x00\x00\x00\x01\x00\x20\x20' \
- b'\x20\x00\x06\x00\x00\x00\x4e\x4f\x52\x4d\x41\x4c'
+ """Test file is read correctly even if FileMetaInformationGroupLength
+ is incorrect.
+ """
+ bytestream = (b'\x02\x00\x00\x00\x55\x4C\x04\x00\x0A\x00\x00\x00'
+ b'\x02\x00\x02\x00\x55\x49\x16\x00\x31\x2e\x32\x2e'
+ b'\x38\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x35\x2e'
+ b'\x31\x2e\x31\x2e\x39\x00\x02\x00\x10\x00\x55\x49'
+ b'\x12\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30'
+ b'\x30\x30\x38\x2e\x31\x2e\x32\x00'
+ b'\x20\x20\x10\x00\x02\x00\x00\x00\x01\x00\x20\x20'
+ b'\x20\x00\x06\x00\x00\x00\x4e\x4f\x52\x4d\x41\x4c')
fp = BytesIO(bytestream)
ds = read_file(fp, force=True)
- self.assertFalse(len(bytestream) - 12 == ds.file_meta.FileMetaInformationGroupLength)
+ self.assertFalse(len(
+ bytestream) - 12 == ds.file_meta.FileMetaInformationGroupLength)
self.assertTrue(ds.file_meta.FileMetaInformationGroupLength == 10)
self.assertTrue('MediaStorageSOPClassUID' in ds.file_meta)
- self.assertEqual(ds.file_meta.TransferSyntaxUID, ImplicitVRLittleEndian)
+ self.assertEqual(ds.file_meta.TransferSyntaxUID,
+ ImplicitVRLittleEndian)
self.assertEqual(ds.Polarity, 'NORMAL')
self.assertEqual(ds.ImageBoxPosition, 1)
@@ -475,18 +527,19 @@ class ReaderTests(unittest.TestCase):
"""Test reading only preamble, command and meta elements"""
preamble = b'\x00' * 128
prefix = b'DICM'
- command = b'\x00\x00\x00\x00\x04\x00\x00\x00\x38' \
- b'\x00\x00\x00\x00\x00\x02\x00\x12\x00\x00' \
- b'\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31' \
- b'\x30\x30\x30\x38\x2e\x31\x2e\x31\x00\x00' \
- b'\x00\x00\x01\x02\x00\x00\x00\x30\x00\x00' \
- b'\x00\x10\x01\x02\x00\x00\x00\x07\x00\x00' \
- b'\x00\x00\x08\x02\x00\x00\x00\x01\x01'
- meta = b'\x02\x00\x00\x00\x55\x4C\x04\x00\x0A\x00\x00\x00' \
- b'\x02\x00\x02\x00\x55\x49\x16\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31' \
- b'\x30\x30\x30\x38\x2e\x35\x2e\x31\x2e\x31\x2e\x39\x00\x02\x00\x10\x00' \
- b'\x55\x49\x12\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30\x38' \
- b'\x2e\x31\x2e\x32\x00'
+ command = (b'\x00\x00\x00\x00\x04\x00\x00\x00\x38'
+ b'\x00\x00\x00\x00\x00\x02\x00\x12\x00\x00'
+ b'\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31'
+ b'\x30\x30\x30\x38\x2e\x31\x2e\x31\x00\x00'
+ b'\x00\x00\x01\x02\x00\x00\x00\x30\x00\x00'
+ b'\x00\x10\x01\x02\x00\x00\x00\x07\x00\x00'
+ b'\x00\x00\x08\x02\x00\x00\x00\x01\x01')
+ meta = (b'\x02\x00\x00\x00\x55\x4C\x04\x00\x0A\x00\x00\x00'
+ b'\x02\x00\x02\x00\x55\x49\x16\x00\x31\x2e\x32\x2e'
+ b'\x38\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x35\x2e'
+ b'\x31\x2e\x31\x2e\x39\x00\x02\x00\x10\x00\x55\x49'
+ b'\x12\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30'
+ b'\x30\x30\x38\x2e\x31\x2e\x32\x00')
bytestream = preamble + prefix + meta + command
fp = BytesIO(bytestream)
@@ -498,11 +551,12 @@ class ReaderTests(unittest.TestCase):
"""Test reading only preamble and meta elements"""
preamble = b'\x00' * 128
prefix = b'DICM'
- meta = b'\x02\x00\x00\x00\x55\x4C\x04\x00\x0A\x00\x00\x00' \
- b'\x02\x00\x02\x00\x55\x49\x16\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31' \
- b'\x30\x30\x30\x38\x2e\x35\x2e\x31\x2e\x31\x2e\x39\x00\x02\x00\x10\x00' \
- b'\x55\x49\x12\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30\x38' \
- b'\x2e\x31\x2e\x32\x00'
+ meta = (b'\x02\x00\x00\x00\x55\x4C\x04\x00\x0A\x00\x00\x00'
+ b'\x02\x00\x02\x00\x55\x49\x16\x00\x31\x2e\x32\x2e'
+ b'\x38\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x35\x2e'
+ b'\x31\x2e\x31\x2e\x39\x00\x02\x00\x10\x00\x55\x49'
+ b'\x12\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30'
+ b'\x30\x30\x38\x2e\x31\x2e\x32\x00')
bytestream = preamble + prefix + meta
fp = BytesIO(bytestream)
@@ -515,13 +569,13 @@ class ReaderTests(unittest.TestCase):
"""Test reading only preamble and command set"""
preamble = b'\x00' * 128
prefix = b'DICM'
- command = b'\x00\x00\x00\x00\x04\x00\x00\x00\x38' \
- b'\x00\x00\x00\x00\x00\x02\x00\x12\x00\x00' \
- b'\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31' \
- b'\x30\x30\x30\x38\x2e\x31\x2e\x31\x00\x00' \
- b'\x00\x00\x01\x02\x00\x00\x00\x30\x00\x00' \
- b'\x00\x10\x01\x02\x00\x00\x00\x07\x00\x00' \
- b'\x00\x00\x08\x02\x00\x00\x00\x01\x01'
+ command = (b'\x00\x00\x00\x00\x04\x00\x00\x00\x38'
+ b'\x00\x00\x00\x00\x00\x02\x00\x12\x00\x00'
+ b'\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31'
+ b'\x30\x30\x30\x38\x2e\x31\x2e\x31\x00\x00'
+ b'\x00\x00\x01\x02\x00\x00\x00\x30\x00\x00'
+ b'\x00\x10\x01\x02\x00\x00\x00\x07\x00\x00'
+ b'\x00\x00\x08\x02\x00\x00\x00\x01\x01')
bytestream = preamble + prefix + command
fp = BytesIO(bytestream)
@@ -531,11 +585,12 @@ class ReaderTests(unittest.TestCase):
def test_meta_no_dataset(self):
"""Test reading only meta elements"""
- bytestream = b'\x02\x00\x00\x00\x55\x4C\x04\x00\x0A\x00\x00\x00' \
- b'\x02\x00\x02\x00\x55\x49\x16\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31' \
- b'\x30\x30\x30\x38\x2e\x35\x2e\x31\x2e\x31\x2e\x39\x00\x02\x00\x10\x00' \
- b'\x55\x49\x12\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30\x30\x30\x38' \
- b'\x2e\x31\x2e\x32\x00'
+ bytestream = (b'\x02\x00\x00\x00\x55\x4C\x04\x00\x0A\x00\x00\x00'
+ b'\x02\x00\x02\x00\x55\x49\x16\x00\x31\x2e\x32\x2e'
+ b'\x38\x34\x30\x2e\x31\x30\x30\x30\x38\x2e\x35\x2e'
+ b'\x31\x2e\x31\x2e\x39\x00\x02\x00\x10\x00\x55\x49'
+ b'\x12\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31\x30'
+ b'\x30\x30\x38\x2e\x31\x2e\x32\x00')
fp = BytesIO(bytestream)
ds = read_file(fp, force=True)
self.assertTrue('TransferSyntaxUID' in ds.file_meta)
@@ -543,13 +598,13 @@ class ReaderTests(unittest.TestCase):
def test_commandset_no_dataset(self):
"""Test reading only command set elements"""
- bytestream = b'\x00\x00\x00\x00\x04\x00\x00\x00\x38' \
- b'\x00\x00\x00\x00\x00\x02\x00\x12\x00\x00' \
- b'\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31' \
- b'\x30\x30\x30\x38\x2e\x31\x2e\x31\x00\x00' \
- b'\x00\x00\x01\x02\x00\x00\x00\x30\x00\x00' \
- b'\x00\x10\x01\x02\x00\x00\x00\x07\x00\x00' \
- b'\x00\x00\x08\x02\x00\x00\x00\x01\x01'
+ bytestream = (b'\x00\x00\x00\x00\x04\x00\x00\x00\x38'
+ b'\x00\x00\x00\x00\x00\x02\x00\x12\x00\x00'
+ b'\x00\x31\x2e\x32\x2e\x38\x34\x30\x2e\x31'
+ b'\x30\x30\x30\x38\x2e\x31\x2e\x31\x00\x00'
+ b'\x00\x00\x01\x02\x00\x00\x00\x30\x00\x00'
+ b'\x00\x10\x01\x02\x00\x00\x00\x07\x00\x00'
+ b'\x00\x00\x08\x02\x00\x00\x00\x01\x01')
fp = BytesIO(bytestream)
ds = read_file(fp, force=True)
self.assertTrue('MessageID' in ds)
@@ -569,23 +624,24 @@ class ReaderTests(unittest.TestCase):
class ReadDataElementTests(unittest.TestCase):
def setUp(self):
ds = Dataset()
- ds.DoubleFloatPixelData = b'\x00\x01\x02\x03\x04\x05\x06\x07' \
- b'\x01\x01\x02\x03\x04\x05\x06\x07' # VR of OD
- ds.SelectorOLValue = b'\x00\x01\x02\x03\x04\x05\x06\x07' \
- b'\x01\x01\x02\x03' # VR of OL
- ds.PotentialReasonsForProcedure = ['A', 'B', 'C'] # VR of UC, odd length
- ds.StrainDescription = 'Test' # Even length
- ds.URNCodeValue = 'http://test.com' # VR of UR
- ds.RetrieveURL = 'ftp://test.com ' # Test trailing spaces ignored
- ds.DestinationAE = ' TEST 12 ' # 16 characters max for AE
-
- self.fp = BytesIO() # Implicit little
+ ds.DoubleFloatPixelData = (b'\x00\x01\x02\x03\x04\x05\x06\x07'
+ b'\x01\x01\x02\x03\x04\x05\x06\x07') # OD
+ ds.SelectorOLValue = (b'\x00\x01\x02\x03\x04\x05\x06\x07'
+ b'\x01\x01\x02\x03') # VR of OL
+ ds.PotentialReasonsForProcedure = ['A', 'B',
+ 'C'] # VR of UC, odd length
+ ds.StrainDescription = 'Test' # Even length
+ ds.URNCodeValue = 'http://test.com' # VR of UR
+ ds.RetrieveURL = 'ftp://test.com ' # Test trailing spaces ignored
+ ds.DestinationAE = ' TEST 12 ' # 16 characters max for AE
+
+ self.fp = BytesIO() # Implicit little
file_ds = FileDataset(self.fp, ds)
file_ds.is_implicit_VR = True
file_ds.is_little_endian = True
file_ds.save_as(self.fp)
- self.fp_ex = BytesIO() # Explicit little
+ self.fp_ex = BytesIO() # Explicit little
file_ds = FileDataset(self.fp_ex, ds)
file_ds.is_implicit_VR = False
file_ds.is_little_endian = True
@@ -595,32 +651,36 @@ class ReadDataElementTests(unittest.TestCase):
"""Check creation of OD DataElement from byte data works correctly."""
ds = read_file(self.fp, force=True)
ref_elem = ds.get(0x7fe00009)
- elem = DataElement(0x7fe00009, 'OD', b'\x00\x01\x02\x03\x04\x05\x06\x07' \
- b'\x01\x01\x02\x03\x04\x05\x06\x07')
+ elem = DataElement(0x7fe00009, 'OD',
+ b'\x00\x01\x02\x03\x04\x05\x06\x07'
+ b'\x01\x01\x02\x03\x04\x05\x06\x07')
self.assertEqual(ref_elem, elem)
def test_read_OD_explicit_little(self):
"""Check creation of OD DataElement from byte data works correctly."""
ds = read_file(self.fp_ex, force=True)
ref_elem = ds.get(0x7fe00009)
- elem = DataElement(0x7fe00009, 'OD', b'\x00\x01\x02\x03\x04\x05\x06\x07' \
- b'\x01\x01\x02\x03\x04\x05\x06\x07')
+ elem = DataElement(0x7fe00009, 'OD',
+ b'\x00\x01\x02\x03\x04\x05\x06\x07'
+ b'\x01\x01\x02\x03\x04\x05\x06\x07')
self.assertEqual(ref_elem, elem)
def test_read_OL_implicit_little(self):
"""Check creation of OL DataElement from byte data works correctly."""
ds = read_file(self.fp, force=True)
ref_elem = ds.get(0x00720075)
- elem = DataElement(0x00720075, 'OL', b'\x00\x01\x02\x03\x04\x05\x06\x07' \
- b'\x01\x01\x02\x03')
+ elem = DataElement(0x00720075, 'OL',
+ b'\x00\x01\x02\x03\x04\x05\x06\x07'
+ b'\x01\x01\x02\x03')
self.assertEqual(ref_elem, elem)
def test_read_OL_explicit_little(self):
"""Check creation of OL DataElement from byte data works correctly."""
ds = read_file(self.fp_ex, force=True)
ref_elem = ds.get(0x00720075)
- elem = DataElement(0x00720075, 'OL', b'\x00\x01\x02\x03\x04\x05\x06\x07' \
- b'\x01\x01\x02\x03')
+ elem = DataElement(0x00720075, 'OL',
+ b'\x00\x01\x02\x03\x04\x05\x06\x07'
+ b'\x01\x01\x02\x03')
self.assertEqual(ref_elem, elem)
def test_read_UC_implicit_little(self):
@@ -650,24 +710,24 @@ class ReadDataElementTests(unittest.TestCase):
def test_read_UR_implicit_little(self):
"""Check creation of DataElement from byte data works correctly."""
ds = read_file(self.fp, force=True)
- ref_elem = ds.get(0x00080120) # URNCodeValue
+ ref_elem = ds.get(0x00080120) # URNCodeValue
elem = DataElement(0x00080120, 'UR', 'http://test.com')
self.assertEqual(ref_elem, elem)
# Test trailing spaces ignored
- ref_elem = ds.get(0x00081190) # RetrieveURL
+ ref_elem = ds.get(0x00081190) # RetrieveURL
elem = DataElement(0x00081190, 'UR', 'ftp://test.com')
self.assertEqual(ref_elem, elem)
def test_read_UR_explicit_little(self):
"""Check creation of DataElement from byte data works correctly."""
ds = read_file(self.fp_ex, force=True)
- ref_elem = ds.get(0x00080120) # URNCodeValue
+ ref_elem = ds.get(0x00080120) # URNCodeValue
elem = DataElement(0x00080120, 'UR', 'http://test.com')
self.assertEqual(ref_elem, elem)
# Test trailing spaces ignored
- ref_elem = ds.get(0x00081190) # RetrieveURL
+ ref_elem = ds.get(0x00081190) # RetrieveURL
elem = DataElement(0x00081190, 'UR', 'ftp://test.com')
self.assertEqual(ref_elem, elem)
@@ -690,9 +750,11 @@ class JPEG_LS_Tests(unittest.TestCase):
a = self.jpeg_ls_lossless.pixel_array
b = self.mr_small.pixel_array
self.assertEqual(a.mean(), b.mean(),
- "Decoded pixel data is not all {0} (mean == {1})".format(b.mean(), a.mean()))
+ "Decoded pixel data is not all {0} "
+ "(mean == {1})".format(b.mean(), a.mean()))
else:
- self.assertRaises(NotImplementedError, self.jpeg_ls_lossless._get_pixel_array)
+ self.assertRaises(NotImplementedError,
+ self.jpeg_ls_lossless._get_pixel_array)
def test_emri_JPEG_LS_PixelArray(self):
"""JPEG LS Lossless: Now works"""
@@ -700,9 +762,11 @@ class JPEG_LS_Tests(unittest.TestCase):
a = self.emri_jpeg_ls_lossless.pixel_array
b = self.emri_small.pixel_array
self.assertEqual(a.mean(), b.mean(),
- "Decoded pixel data is not all {0} (mean == {1})".format(b.mean(), a.mean()))
+ "Decoded pixel data is not all {0} "
+ "(mean == {1})".format(b.mean(), a.mean()))
else:
- self.assertRaises(NotImplementedError, self.emri_jpeg_ls_lossless._get_pixel_array)
+ self.assertRaises(NotImplementedError,
+ self.emri_jpeg_ls_lossless._get_pixel_array)
class BigEndian_Tests(unittest.TestCase):
@@ -716,9 +780,11 @@ class BigEndian_Tests(unittest.TestCase):
a = self.emri_big_endian.pixel_array
b = self.emri_small.pixel_array
self.assertEqual(a.mean(), b.mean(),
- "Decoded big endian pixel data is not all {0} (mean == {1})".format(b.mean(), a.mean()))
+ "Decoded big endian pixel data is not all {0} "
+ "(mean == {1})".format(b.mean(), a.mean()))
else:
- self.assertRaises(ImportError, self.emri_big_endian._get_pixel_array)
+ self.assertRaises(ImportError,
+ self.emri_big_endian._get_pixel_array)
class JPEG2000Tests(unittest.TestCase):
@@ -730,14 +796,19 @@ class JPEG2000Tests(unittest.TestCase):
self.emri_small = read_file(emri_name)
def testJPEG2000(self):
- """JPEG2000: Returns correct values for sample data elements............"""
- expected = [Tag(0x0054, 0x0010), Tag(0x0054, 0x0020)] # XX also tests multiple-valued AT data element
+ """JPEG2000: Returns correct values for sample data elements."""
+ # XX also tests multiple-valued AT data element
+ expected = [Tag(0x0054, 0x0010), Tag(0x0054, 0x0020)]
got = self.jpeg.FrameIncrementPointer
- self.assertEqual(got, expected, "JPEG2000 file, Frame Increment Pointer: expected %s, got %s" % (expected, got))
+ self.assertEqual(got, expected,
+ "JPEG2000 file, Frame Increment Pointer: "
+ "expected %s, got %s" % (expected, got))
got = self.jpeg.DerivationCodeSequence[0].CodeMeaning
expected = 'Lossy Compression'
- self.assertEqual(got, expected, "JPEG200 file, Code Meaning got %s, expected %s" % (got, expected))
+ self.assertEqual(got, expected,
+ "JPEG200 file, Code Meaning got %s, expected %s" % (
+ got, expected))
def testJPEG2000PixelArray(self):
"""JPEG2000: Now works"""
@@ -745,9 +816,11 @@ class JPEG2000Tests(unittest.TestCase):
a = self.jpegls.pixel_array
b = self.mr_small.pixel_array
self.assertEqual(a.mean(), b.mean(),
- "Decoded pixel data is not all {0} (mean == {1})".format(b.mean(), a.mean()))
+ "Decoded pixel data is not all {0} "
+ "(mean == {1})".format(b.mean(), a.mean()))
else:
- self.assertRaises(NotImplementedError, self.jpegls._get_pixel_array)
+ self.assertRaises(NotImplementedError,
+ self.jpegls._get_pixel_array)
def test_emri_JPEG2000PixelArray(self):
"""JPEG2000: Now works"""
@@ -755,25 +828,28 @@ class JPEG2000Tests(unittest.TestCase):
a = self.emri_jpeg_2k_lossless.pixel_array
b = self.emri_small.pixel_array
self.assertEqual(a.mean(), b.mean(),
- "Decoded pixel data is not all {0} (mean == {1})".format(b.mean(), a.mean()))
+ "Decoded pixel data is not all {0} "
+ "(mean == {1})".format(b.mean(), a.mean()))
else:
- self.assertRaises(NotImplementedError, self.emri_jpeg_2k_lossless._get_pixel_array)
+ self.assertRaises(NotImplementedError,
+ self.emri_jpeg_2k_lossless._get_pixel_array)
class JPEGlossyTests(unittest.TestCase):
-
def setUp(self):
self.jpeg = read_file(jpeg_lossy_name)
self.color_3d_jpeg = read_file(color_3d_jpeg_baseline)
def testJPEGlossy(self):
- """JPEG-lossy: Returns correct values for sample data elements.........."""
+ """JPEG-lossy: Returns correct values for sample data elements."""
got = self.jpeg.DerivationCodeSequence[0].CodeMeaning
expected = 'Lossy Compression'
- self.assertEqual(got, expected, "JPEG-lossy file, Code Meaning got %s, expected %s" % (got, expected))
+ self.assertEqual(got, expected,
+ "JPEG-lossy file, Code Meaning got %s, "
+ "expected %s" % (got, expected))
def testJPEGlossyPixelArray(self):
- """JPEG-lossy: Fails gracefully when uncompressed data is asked for....."""
+ """JPEG-lossy: Fails gracefully when uncompressed data is asked for."""
if have_pillow and have_numpy:
self.assertRaises(NotImplementedError, self.jpeg._get_pixel_array)
else:
@@ -787,7 +863,8 @@ class JPEGlossyTests(unittest.TestCase):
self.assertEqual(tuple(a[3, 159, 290, :]), (41, 41, 41))
self.assertEqual(tuple(a[3, 169, 290, :]), (57, 57, 57))
else:
- self.assertRaises(NotImplementedError, self.color_3d_jpeg._get_pixel_array)
+ self.assertRaises(NotImplementedError,
+ self.color_3d_jpeg._get_pixel_array)
class JPEGlosslessTests(unittest.TestCase):
@@ -795,13 +872,17 @@ class JPEGlosslessTests(unittest.TestCase):
self.jpeg = read_file(jpeg_lossless_name)
def testJPEGlossless(self):
- """JPEGlossless: Returns correct values for sample data elements........"""
- got = self.jpeg.SourceImageSequence[0].PurposeOfReferenceCodeSequence[0].CodeMeaning
+ """JPEGlossless: Returns correct values for sample data elements."""
+ got = self.jpeg.SourceImageSequence[0].PurposeOfReferenceCodeSequence[
+ 0].CodeMeaning
expected = 'Uncompressed predecessor'
- self.assertEqual(got, expected, "JPEG-lossless file, Code Meaning got %s, expected %s" % (got, expected))
+ self.assertEqual(got, expected,
+ "JPEG-lossless file, Code Meaning got %s, "
+ "expected %s" % (got, expected))
def testJPEGlosslessPixelArray(self):
- """JPEGlossless: Fails gracefully when uncompressed data is asked for..."""
+ """JPEGlossless: Fails gracefully when uncompressed data is asked for.
+ """
# This test passes if the call raises either an
# ImportError when there is no Pillow module
# Or
@@ -823,6 +904,7 @@ class DeferredReadTests(unittest.TestCase):
"""Test that deferred data element reading (for large size)
works as expected
"""
+
# Copy one of test files and use temporarily, then later remove.
def setUp(self):
self.testfile_name = ct_name + ".tmp"
@@ -830,8 +912,8 @@ class DeferredReadTests(unittest.TestCase):
def testTimeCheck(self):
"""Deferred read warns if file has been modified..........."""
- if stat_available:
- ds = read_file(self.testfile_name, defer_size=2000)
+ if stat is not None:
+ ds = read_file(self.testfile_name, defer_size='2 kB')
from time import sleep
sleep(1)
with open(self.testfile_name, "r+") as f:
@@ -859,7 +941,8 @@ class DeferredReadTests(unittest.TestCase):
ds_defer = read_file(self.testfile_name, defer_size=2000)
for data_elem in ds_norm:
tag = data_elem.tag
- self.assertEqual(data_elem.value, ds_defer[tag].value, "Mismatched value for tag %r" % tag)
+ self.assertEqual(data_elem.value, ds_defer[tag].value,
+ "Mismatched value for tag %r" % tag)
def testZippedDeferred(self):
"""Deferred values from a gzipped file works.............."""
@@ -867,8 +950,8 @@ class DeferredReadTests(unittest.TestCase):
fobj = gzip.open(gzip_name)
ds = read_file(fobj, defer_size=1)
fobj.close()
- # before the fix, this threw an error as file reading was not in right place,
- # it was re-opened as a normal file, not zip file
+ # before the fix, this threw an error as file reading was not in
+ # the right place, it was re-opened as a normal file, not a zip file
ds.InstanceNumber
def tearDown(self):
@@ -880,9 +963,11 @@ class ReadTruncatedFileTests(unittest.TestCase):
def testReadFileWithMissingPixelData(self):
mr = read_file(truncated_mr_name)
mr.decode()
- self.assertEqual(mr.PatientName, 'CompressedSamples^MR1', "Wrong patient name")
+ self.assertEqual(mr.PatientName, 'CompressedSamples^MR1',
+ "Wrong patient name")
self.assertEqual(mr.PatientName, mr[0x10, 0x10].value,
- "Name does not match value found when accessed by tag number")
+ "Name does not match value found when "
+ "accessed by tag number")
got = mr.PixelSpacing
DS = pydicom.valuerep.DS
expected = [DS('0.3125'), DS('0.3125')]
@@ -892,35 +977,48 @@ class ReadTruncatedFileTests(unittest.TestCase):
def testReadFileWithMissingPixelDataArray(self):
mr = read_file(truncated_mr_name)
mr.decode()
- with self.assertRaisesRegexp(AttributeError, "Amount of pixel data.*does not match the expected data"):
+ with self.assertRaisesRegexp(AttributeError,
+ "Amount of pixel data.*does not match "
+ "the expected data"):
mr.pixel_array
class FileLikeTests(unittest.TestCase):
- """Test that can read DICOM files with file-like object rather than filename"""
+ """Test that can read DICOM files with file-like object rather than
+ filename
+ """
+
def testReadFileGivenFileObject(self):
"""filereader: can read using already opened file............"""
f = open(ct_name, 'rb')
ct = read_file(f)
- # Tests here simply repeat testCT -- perhaps should collapse the code together?
+ # Tests here simply repeat testCT -- perhaps should collapse
+ # the code together?
got = ct.ImagePositionPatient
DS = pydicom.valuerep.DS
expected = [DS('-158.135803'), DS('-179.035797'), DS('-75.699997')]
- self.assertTrue(got == expected, "ImagePosition(Patient) values not as expected")
- self.assertEqual(ct.file_meta.ImplementationClassUID, '1.3.6.1.4.1.5962.2',
+ self.assertTrue(got == expected,
+ "ImagePosition(Patient) values not as expected")
+ self.assertEqual(ct.file_meta.ImplementationClassUID,
+ '1.3.6.1.4.1.5962.2',
"ImplementationClassUID not the expected value")
self.assertEqual(ct.file_meta.ImplementationClassUID,
ct.file_meta[0x2, 0x12].value,
- "ImplementationClassUID does not match the value accessed by tag number")
- # (0020, 0032) Image Position (Patient) [-158.13580300000001, -179.035797, -75.699996999999996]
+ "ImplementationClassUID does not match the "
+ "value accessed by tag number")
+ # (0020, 0032) Image Position (Patient)
+ # [-158.13580300000001, -179.035797, -75.699996999999996]
got = ct.ImagePositionPatient
expected = [DS('-158.135803'), DS('-179.035797'), DS('-75.699997')]
- self.assertTrue(got == expected, "ImagePosition(Patient) values not as expected")
+ self.assertTrue(got == expected,
+ "ImagePosition(Patient) values not as expected")
self.assertEqual(ct.Rows, 128, "Rows not 128")
self.assertEqual(ct.Columns, 128, "Columns not 128")
self.assertEqual(ct.BitsStored, 16, "Bits Stored not 16")
- self.assertEqual(len(ct.PixelData), 128 * 128 * 2, "Pixel data not expected length")
- # Should also be able to close the file ourselves without exception raised:
+ self.assertEqual(len(ct.PixelData), 128 * 128 * 2,
+ "Pixel data not expected length")
+ # Should also be able to close the file ourselves without
+ # exception raised:
f.close()
def testReadFileGivenFileLikeObject(self):
@@ -932,13 +1030,17 @@ class FileLikeTests(unittest.TestCase):
got = ct.ImagePositionPatient
DS = pydicom.valuerep.DS
expected = [DS('-158.135803'), DS('-179.035797'), DS('-75.699997')]
- self.assertTrue(got == expected, "ImagePosition(Patient) values not as expected")
- self.assertEqual(len(ct.PixelData), 128 * 128 * 2, "Pixel data not expected length")
- # Should also be able to close the file ourselves without exception raised:
+ self.assertTrue(got == expected,
+ "ImagePosition(Patient) values not as expected")
+ self.assertEqual(len(ct.PixelData), 128 * 128 * 2,
+ "Pixel data not expected length")
+ # Should also be able to close the file ourselves without
+ # exception raised:
file_like.close()
if __name__ == "__main__":
# This is called if run alone, but not if loaded through run_tests.py
- # If not run from the directory where the sample images are, then need to switch there
+ # If not run from the directory where the sample images are, then need
+ # to switch there
unittest.main()
diff --git a/tests/test_misc.py b/tests/test_misc.py
index 2472e99d6..040938495 100644
--- a/tests/test_misc.py
+++ b/tests/test_misc.py
@@ -3,12 +3,13 @@
import unittest
import os.path as osp
-from pydicom.misc import is_dicom
+from pydicom.misc import is_dicom, size_in_bytes
-test_file = osp.join(osp.dirname(osp.abspath(__file__)), 'test_files', 'CT_small.dcm')
+test_file = osp.join(osp.dirname(osp.abspath(__file__)), 'test_files',
+ 'CT_small.dcm')
-class Test_Misc(unittest.TestCase):
+class TestMisc(unittest.TestCase):
def test_is_dicom(self):
"""Test the is_dicom function."""
invalid_file = test_file.replace('CT_', 'CT') # invalid file
@@ -22,3 +23,19 @@ class Test_Misc(unittest.TestCase):
# test invalid path
self.assertRaises(IOError, is_dicom, invalid_file)
+
+ def test_size_in_bytes(self):
+ """Test convenience function size_in_bytes()."""
+ # None or numbers shall be returned unchanged
+ self.assertIsNone(size_in_bytes(None))
+ self.assertEqual(1234, size_in_bytes(1234))
+
+ # string shall be parsed
+ self.assertEqual(1234, size_in_bytes('1234'))
+ self.assertEqual(4096, size_in_bytes('4 kb'))
+ self.assertEqual(0x4000, size_in_bytes('16 KB'))
+ self.assertEqual(0x300000, size_in_bytes('3 MB'))
+ self.assertEqual(0x80000000, size_in_bytes('2gB'))
+
+ self.assertRaises(ValueError, size_in_bytes, '2 TB')
+ self.assertRaises(ValueError, size_in_bytes, 'KB 2')
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 4
} | 0.9 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"numpy>=1.16.0",
"pytest",
"pytest-cov",
"nose",
"nose-timer"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
coverage==6.2
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
nose==1.3.7
nose-timer==1.0.1
numpy==1.19.5
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
-e git+https://github.com/pydicom/pydicom.git@db94409999965965a0e73b53db5d89dfc3707e47#egg=pydicom
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-cov==4.0.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tomli==1.2.3
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: pydicom
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==6.2
- nose==1.3.7
- nose-timer==1.0.1
- numpy==1.19.5
- pytest-cov==4.0.0
- tomli==1.2.3
prefix: /opt/conda/envs/pydicom
| [
"tests/test_misc.py::TestMisc::test_size_in_bytes"
]
| [
"tests/test_filereader.py::DeferredReadTests::testFileExists",
"tests/test_filereader.py::DeferredReadTests::testTimeCheck",
"tests/test_filereader.py::DeferredReadTests::testValuesIdentical",
"tests/test_filereader.py::DeferredReadTests::testZippedDeferred"
]
| [
"tests/test_filereader.py::ReaderTests::testCT",
"tests/test_filereader.py::ReaderTests::testCTPixelData",
"tests/test_filereader.py::ReaderTests::testDeflate",
"tests/test_filereader.py::ReaderTests::testDir",
"tests/test_filereader.py::ReaderTests::testEmptyNumbersTag",
"tests/test_filereader.py::ReaderTests::testExplicitVRBigEndianNoMeta",
"tests/test_filereader.py::ReaderTests::testExplicitVRLittleEndianNoMeta",
"tests/test_filereader.py::ReaderTests::testMR",
"tests/test_filereader.py::ReaderTests::testNestedPrivateSQ",
"tests/test_filereader.py::ReaderTests::testNoForce",
"tests/test_filereader.py::ReaderTests::testNoMetaGroupLength",
"tests/test_filereader.py::ReaderTests::testNoPixelsRead",
"tests/test_filereader.py::ReaderTests::testNoTransferSyntaxInMeta",
"tests/test_filereader.py::ReaderTests::testPlanarConfig",
"tests/test_filereader.py::ReaderTests::testPrivateSQ",
"tests/test_filereader.py::ReaderTests::testRTDose",
"tests/test_filereader.py::ReaderTests::testRTPlan",
"tests/test_filereader.py::ReaderTests::testRTstruct",
"tests/test_filereader.py::ReaderTests::testUTF8FileName",
"tests/test_filereader.py::ReaderTests::test_commandset_no_dataset",
"tests/test_filereader.py::ReaderTests::test_correct_ambiguous_vr",
"tests/test_filereader.py::ReaderTests::test_correct_ambiguous_vr_compressed",
"tests/test_filereader.py::ReaderTests::test_group_length_wrong",
"tests/test_filereader.py::ReaderTests::test_long_specific_char_set",
"tests/test_filereader.py::ReaderTests::test_meta_no_dataset",
"tests/test_filereader.py::ReaderTests::test_no_dataset",
"tests/test_filereader.py::ReaderTests::test_no_preamble_command_group_dataset",
"tests/test_filereader.py::ReaderTests::test_no_preamble_file_meta_dataset",
"tests/test_filereader.py::ReaderTests::test_preamble_command_meta_no_dataset",
"tests/test_filereader.py::ReaderTests::test_preamble_commandset_no_dataset",
"tests/test_filereader.py::ReaderTests::test_preamble_meta_no_dataset",
"tests/test_filereader.py::ReadDataElementTests::test_read_AE",
"tests/test_filereader.py::ReadDataElementTests::test_read_OD_explicit_little",
"tests/test_filereader.py::ReadDataElementTests::test_read_OD_implicit_little",
"tests/test_filereader.py::ReadDataElementTests::test_read_OL_explicit_little",
"tests/test_filereader.py::ReadDataElementTests::test_read_OL_implicit_little",
"tests/test_filereader.py::ReadDataElementTests::test_read_UC_explicit_little",
"tests/test_filereader.py::ReadDataElementTests::test_read_UC_implicit_little",
"tests/test_filereader.py::ReadDataElementTests::test_read_UR_explicit_little",
"tests/test_filereader.py::ReadDataElementTests::test_read_UR_implicit_little",
"tests/test_filereader.py::JPEG_LS_Tests::testJPEG_LS_PixelArray",
"tests/test_filereader.py::JPEG_LS_Tests::test_emri_JPEG_LS_PixelArray",
"tests/test_filereader.py::BigEndian_Tests::test_big_endian_PixelArray",
"tests/test_filereader.py::JPEG2000Tests::testJPEG2000",
"tests/test_filereader.py::JPEG2000Tests::testJPEG2000PixelArray",
"tests/test_filereader.py::JPEG2000Tests::test_emri_JPEG2000PixelArray",
"tests/test_filereader.py::JPEGlossyTests::testJPEGBaselineColor3DPixelArray",
"tests/test_filereader.py::JPEGlossyTests::testJPEGlossy",
"tests/test_filereader.py::JPEGlossyTests::testJPEGlossyPixelArray",
"tests/test_filereader.py::JPEGlosslessTests::testJPEGlossless",
"tests/test_filereader.py::JPEGlosslessTests::testJPEGlosslessPixelArray",
"tests/test_filereader.py::ReadTruncatedFileTests::testReadFileWithMissingPixelData",
"tests/test_filereader.py::ReadTruncatedFileTests::testReadFileWithMissingPixelDataArray",
"tests/test_filereader.py::FileLikeTests::testReadFileGivenFileLikeObject",
"tests/test_filereader.py::FileLikeTests::testReadFileGivenFileObject",
"tests/test_misc.py::TestMisc::test_is_dicom"
]
| []
| MIT License | 1,464 | [
"pydicom/misc.py",
"pydicom/fileutil.py",
"pydicom/util/leanread.py",
"pydicom/filereader.py"
]
| [
"pydicom/misc.py",
"pydicom/fileutil.py",
"pydicom/util/leanread.py",
"pydicom/filereader.py"
]
|
oasis-open__cti-python-stix2-32 | 6f680be8a65028c303bae38bbe1fa0a2d08852a8 | 2017-07-14 19:19:06 | 58f39f80af5cbfe02879c2efa4b3b4ef7a504390 | diff --git a/stix2/__init__.py b/stix2/__init__.py
index 904af9c..18c0b33 100644
--- a/stix2/__init__.py
+++ b/stix2/__init__.py
@@ -5,22 +5,24 @@
from . import exceptions
from .bundle import Bundle
from .observables import (URL, AlternateDataStream, ArchiveExt, Artifact,
- AutonomousSystem, Directory, DomainName,
- EmailAddress, EmailMessage, EmailMIMEComponent, File,
- HTTPRequestExt, ICMPExt, IPv4Address, IPv6Address,
- MACAddress, Mutex, NetworkTraffic, NTFSExt, PDFExt,
- Process, RasterImageExt, SocketExt, Software, TCPExt,
+ AutonomousSystem, CustomObservable, Directory,
+ DomainName, EmailAddress, EmailMessage,
+ EmailMIMEComponent, File, HTTPRequestExt, ICMPExt,
+ IPv4Address, IPv6Address, MACAddress, Mutex,
+ NetworkTraffic, NTFSExt, PDFExt, Process,
+ RasterImageExt, SocketExt, Software, TCPExt,
UNIXAccountExt, UserAccount, WindowsPEBinaryExt,
WindowsPEOptionalHeaderType, WindowsPESection,
WindowsProcessExt, WindowsRegistryKey,
WindowsRegistryValueType, WindowsServiceExt,
- X509Certificate, X509V3ExtenstionsType)
+ X509Certificate, X509V3ExtenstionsType,
+ parse_observable)
from .other import (TLP_AMBER, TLP_GREEN, TLP_RED, TLP_WHITE,
ExternalReference, GranularMarking, KillChainPhase,
MarkingDefinition, StatementMarking, TLPMarking)
-from .sdo import (AttackPattern, Campaign, CourseOfAction, Identity, Indicator,
- IntrusionSet, Malware, ObservedData, Report, ThreatActor,
- Tool, Vulnerability)
+from .sdo import (AttackPattern, Campaign, CourseOfAction, CustomObject,
+ Identity, Indicator, IntrusionSet, Malware, ObservedData,
+ Report, ThreatActor, Tool, Vulnerability)
from .sro import Relationship, Sighting
from .utils import get_dict
from .version import __version__
@@ -43,59 +45,6 @@ OBJ_MAP = {
'vulnerability': Vulnerability,
}
-OBJ_MAP_OBSERVABLE = {
- 'artifact': Artifact,
- 'autonomous-system': AutonomousSystem,
- 'directory': Directory,
- 'domain-name': DomainName,
- 'email-address': EmailAddress,
- 'email-message': EmailMessage,
- 'file': File,
- 'ipv4-addr': IPv4Address,
- 'ipv6-addr': IPv6Address,
- 'mac-addr': MACAddress,
- 'mutex': Mutex,
- 'network-traffic': NetworkTraffic,
- 'process': Process,
- 'software': Software,
- 'url': URL,
- 'user-account': UserAccount,
- 'windows-registry-key': WindowsRegistryKey,
- 'x509-certificate': X509Certificate,
-}
-
-EXT_MAP_FILE = {
- 'archive-ext': ArchiveExt,
- 'ntfs-ext': NTFSExt,
- 'pdf-ext': PDFExt,
- 'raster-image-ext': RasterImageExt,
- 'windows-pebinary-ext': WindowsPEBinaryExt
-}
-
-EXT_MAP_NETWORK_TRAFFIC = {
- 'http-request-ext': HTTPRequestExt,
- 'icmp-ext': ICMPExt,
- 'socket-ext': SocketExt,
- 'tcp-ext': TCPExt,
-}
-
-EXT_MAP_PROCESS = {
- 'windows-process-ext': WindowsProcessExt,
- 'windows-service-ext': WindowsServiceExt,
-}
-
-EXT_MAP_USER_ACCOUNT = {
- 'unix-account-ext': UNIXAccountExt,
-}
-
-EXT_MAP = {
- 'file': EXT_MAP_FILE,
- 'network-traffic': EXT_MAP_NETWORK_TRAFFIC,
- 'process': EXT_MAP_PROCESS,
- 'user-account': EXT_MAP_USER_ACCOUNT,
-
-}
-
def parse(data, allow_custom=False):
"""Deserialize a string or file-like object into a STIX object.
@@ -120,47 +69,8 @@ def parse(data, allow_custom=False):
return obj_class(allow_custom=allow_custom, **obj)
-def parse_observable(data, _valid_refs=[], allow_custom=False):
- """Deserialize a string or file-like object into a STIX Cyber Observable object.
-
- Args:
- data: The STIX 2 string to be parsed.
- _valid_refs: A list of object references valid for the scope of the object being parsed.
- allow_custom: Whether to allow custom properties or not. Default: False.
-
- Returns:
- An instantiated Python STIX Cyber Observable object.
- """
-
- obj = get_dict(data)
- obj['_valid_refs'] = _valid_refs
-
- if 'type' not in obj:
- raise exceptions.ParseError("Can't parse object with no 'type' property: %s" % str(obj))
- try:
- obj_class = OBJ_MAP_OBSERVABLE[obj['type']]
- except KeyError:
- raise exceptions.ParseError("Can't parse unknown object type '%s'! For custom observables, use the CustomObservable decorator." % obj['type'])
-
- if 'extensions' in obj and obj['type'] in EXT_MAP:
- for name, ext in obj['extensions'].items():
- if name not in EXT_MAP[obj['type']]:
- raise exceptions.ParseError("Can't parse Unknown extension type '%s' for object type '%s'!" % (name, obj['type']))
- ext_class = EXT_MAP[obj['type']][name]
- obj['extensions'][name] = ext_class(allow_custom=allow_custom, **obj['extensions'][name])
-
- return obj_class(allow_custom=allow_custom, **obj)
-
-
def _register_type(new_type):
"""Register a custom STIX Object type.
"""
OBJ_MAP[new_type._type] = new_type
-
-
-def _register_observable(new_observable):
- """Register a custom STIX Cyber Observable type.
- """
-
- OBJ_MAP_OBSERVABLE[new_observable._type] = new_observable
diff --git a/stix2/observables.py b/stix2/observables.py
index a8f3b67..e38e298 100644
--- a/stix2/observables.py
+++ b/stix2/observables.py
@@ -5,16 +5,72 @@ embedded in Email Message objects, inherit from _STIXBase instead of Observable
and do not have a '_type' attribute.
"""
-import stix2
-
from .base import _Extension, _Observable, _STIXBase
-from .exceptions import AtLeastOnePropertyError, DependentPropertiesError
+from .exceptions import (AtLeastOnePropertyError, DependentPropertiesError,
+ ParseError)
from .properties import (BinaryProperty, BooleanProperty, DictionaryProperty,
- EmbeddedObjectProperty, EnumProperty,
- ExtensionsProperty, FloatProperty, HashesProperty,
- HexProperty, IntegerProperty, ListProperty,
- ObjectReferenceProperty, StringProperty,
- TimestampProperty, TypeProperty)
+ EmbeddedObjectProperty, EnumProperty, FloatProperty,
+ HashesProperty, HexProperty, IntegerProperty,
+ ListProperty, ObjectReferenceProperty, Property,
+ StringProperty, TimestampProperty, TypeProperty)
+from .utils import get_dict
+
+
+class ObservableProperty(Property):
+
+ def clean(self, value):
+ try:
+ dictified = get_dict(value)
+ except ValueError:
+ raise ValueError("The observable property must contain a dictionary")
+ if dictified == {}:
+ raise ValueError("The dictionary property must contain a non-empty dictionary")
+
+ valid_refs = dict((k, v['type']) for (k, v) in dictified.items())
+
+ # from .__init__ import parse_observable # avoid circular import
+ for key, obj in dictified.items():
+ parsed_obj = parse_observable(obj, valid_refs)
+ if not issubclass(type(parsed_obj), _Observable):
+ raise ValueError("Objects in an observable property must be "
+ "Cyber Observable Objects")
+ dictified[key] = parsed_obj
+
+ return dictified
+
+
+class ExtensionsProperty(DictionaryProperty):
+ """ Property for representing extensions on Observable objects
+ """
+
+ def __init__(self, enclosing_type=None, required=False):
+ self.enclosing_type = enclosing_type
+ super(ExtensionsProperty, self).__init__(required)
+
+ def clean(self, value):
+ try:
+ dictified = get_dict(value)
+ except ValueError:
+ raise ValueError("The extensions property must contain a dictionary")
+ if dictified == {}:
+ raise ValueError("The dictionary property must contain a non-empty dictionary")
+
+ if self.enclosing_type in EXT_MAP:
+ specific_type_map = EXT_MAP[self.enclosing_type]
+ for key, subvalue in dictified.items():
+ if key in specific_type_map:
+ cls = specific_type_map[key]
+ if type(subvalue) is dict:
+ dictified[key] = cls(**subvalue)
+ elif type(subvalue) is cls:
+ dictified[key] = subvalue
+ else:
+ raise ValueError("Cannot determine extension type.")
+ else:
+ raise ValueError("The key used in the extensions dictionary is not an extension type name")
+ else:
+ raise ValueError("The enclosing type has no extensions defined")
+ return dictified
class Artifact(_Observable):
@@ -590,9 +646,101 @@ class X509Certificate(_Observable):
}
+OBJ_MAP_OBSERVABLE = {
+ 'artifact': Artifact,
+ 'autonomous-system': AutonomousSystem,
+ 'directory': Directory,
+ 'domain-name': DomainName,
+ 'email-address': EmailAddress,
+ 'email-message': EmailMessage,
+ 'file': File,
+ 'ipv4-addr': IPv4Address,
+ 'ipv6-addr': IPv6Address,
+ 'mac-addr': MACAddress,
+ 'mutex': Mutex,
+ 'network-traffic': NetworkTraffic,
+ 'process': Process,
+ 'software': Software,
+ 'url': URL,
+ 'user-account': UserAccount,
+ 'windows-registry-key': WindowsRegistryKey,
+ 'x509-certificate': X509Certificate,
+}
+
+EXT_MAP_FILE = {
+ 'archive-ext': ArchiveExt,
+ 'ntfs-ext': NTFSExt,
+ 'pdf-ext': PDFExt,
+ 'raster-image-ext': RasterImageExt,
+ 'windows-pebinary-ext': WindowsPEBinaryExt
+}
+
+EXT_MAP_NETWORK_TRAFFIC = {
+ 'http-request-ext': HTTPRequestExt,
+ 'icmp-ext': ICMPExt,
+ 'socket-ext': SocketExt,
+ 'tcp-ext': TCPExt,
+}
+
+EXT_MAP_PROCESS = {
+ 'windows-process-ext': WindowsProcessExt,
+ 'windows-service-ext': WindowsServiceExt,
+}
+
+EXT_MAP_USER_ACCOUNT = {
+ 'unix-account-ext': UNIXAccountExt,
+}
+
+EXT_MAP = {
+ 'file': EXT_MAP_FILE,
+ 'network-traffic': EXT_MAP_NETWORK_TRAFFIC,
+ 'process': EXT_MAP_PROCESS,
+ 'user-account': EXT_MAP_USER_ACCOUNT,
+
+}
+
+
+def parse_observable(data, _valid_refs=[], allow_custom=False):
+ """Deserialize a string or file-like object into a STIX Cyber Observable object.
+
+ Args:
+ data: The STIX 2 string to be parsed.
+ _valid_refs: A list of object references valid for the scope of the object being parsed.
+ allow_custom: Whether to allow custom properties or not. Default: False.
+
+ Returns:
+ An instantiated Python STIX Cyber Observable object.
+ """
+
+ obj = get_dict(data)
+ obj['_valid_refs'] = _valid_refs
+
+ if 'type' not in obj:
+ raise ParseError("Can't parse object with no 'type' property: %s" % str(obj))
+ try:
+ obj_class = OBJ_MAP_OBSERVABLE[obj['type']]
+ except KeyError:
+ raise ParseError("Can't parse unknown object type '%s'! For custom observables, use the CustomObservable decorator." % obj['type'])
+
+ if 'extensions' in obj and obj['type'] in EXT_MAP:
+ for name, ext in obj['extensions'].items():
+ if name not in EXT_MAP[obj['type']]:
+ raise ParseError("Can't parse Unknown extension type '%s' for object type '%s'!" % (name, obj['type']))
+ ext_class = EXT_MAP[obj['type']][name]
+ obj['extensions'][name] = ext_class(allow_custom=allow_custom, **obj['extensions'][name])
+
+ return obj_class(allow_custom=allow_custom, **obj)
+
+
+def _register_observable(new_observable):
+ """Register a custom STIX Cyber Observable type.
+ """
+
+ OBJ_MAP_OBSERVABLE[new_observable._type] = new_observable
+
+
def CustomObservable(type='x-custom-observable', properties={}):
"""Custom STIX Cyber Observable type decorator
-
"""
def custom_builder(cls):
@@ -608,7 +756,7 @@ def CustomObservable(type='x-custom-observable', properties={}):
_Observable.__init__(self, **kwargs)
cls.__init__(self, **kwargs)
- stix2._register_observable(_Custom)
+ _register_observable(_Custom)
return _Custom
return custom_builder
diff --git a/stix2/properties.py b/stix2/properties.py
index 80e5345..35c239a 100644
--- a/stix2/properties.py
+++ b/stix2/properties.py
@@ -7,7 +7,7 @@ import uuid
from six import text_type
-from .base import _Observable, _STIXBase
+from .base import _STIXBase
from .exceptions import DictionaryKeyError
from .utils import get_dict, parse_into_datetime
@@ -220,29 +220,6 @@ class TimestampProperty(Property):
return parse_into_datetime(value, self.precision)
-class ObservableProperty(Property):
-
- def clean(self, value):
- try:
- dictified = get_dict(value)
- except ValueError:
- raise ValueError("The observable property must contain a dictionary")
- if dictified == {}:
- raise ValueError("The dictionary property must contain a non-empty dictionary")
-
- valid_refs = dict((k, v['type']) for (k, v) in dictified.items())
-
- from .__init__ import parse_observable # avoid circular import
- for key, obj in dictified.items():
- parsed_obj = parse_observable(obj, valid_refs)
- if not issubclass(type(parsed_obj), _Observable):
- raise ValueError("Objects in an observable property must be "
- "Cyber Observable Objects")
- dictified[key] = parsed_obj
-
- return dictified
-
-
class DictionaryProperty(Property):
def clean(self, value):
@@ -393,35 +370,3 @@ class EnumProperty(StringProperty):
if value not in self.allowed:
raise ValueError("value '%s' is not valid for this enumeration." % value)
return self.string_type(value)
-
-
-class ExtensionsProperty(DictionaryProperty):
- def __init__(self, enclosing_type=None, required=False):
- self.enclosing_type = enclosing_type
- super(ExtensionsProperty, self).__init__(required)
-
- def clean(self, value):
- try:
- dictified = get_dict(value)
- except ValueError:
- raise ValueError("The extensions property must contain a dictionary")
- if dictified == {}:
- raise ValueError("The dictionary property must contain a non-empty dictionary")
-
- from .__init__ import EXT_MAP # avoid circular import
- if self.enclosing_type in EXT_MAP:
- specific_type_map = EXT_MAP[self.enclosing_type]
- for key, subvalue in dictified.items():
- if key in specific_type_map:
- cls = specific_type_map[key]
- if type(subvalue) is dict:
- dictified[key] = cls(**subvalue)
- elif type(subvalue) is cls:
- dictified[key] = subvalue
- else:
- raise ValueError("Cannot determine extension type.")
- else:
- raise ValueError("The key used in the extensions dictionary is not an extension type name")
- else:
- raise ValueError("The enclosing type has no extensions defined")
- return dictified
diff --git a/stix2/sdo.py b/stix2/sdo.py
index 1ec3b21..8115b9d 100644
--- a/stix2/sdo.py
+++ b/stix2/sdo.py
@@ -4,10 +4,11 @@ import stix2
from .base import _STIXBase
from .common import COMMON_PROPERTIES
+from .observables import ObservableProperty
from .other import KillChainPhase
from .properties import (IDProperty, IntegerProperty, ListProperty,
- ObservableProperty, ReferenceProperty, StringProperty,
- TimestampProperty, TypeProperty)
+ ReferenceProperty, StringProperty, TimestampProperty,
+ TypeProperty)
from .utils import NOW
diff --git a/tox.ini b/tox.ini
index 69ae434..b1265ec 100644
--- a/tox.ini
+++ b/tox.ini
@@ -30,7 +30,9 @@ max-line-length=160
[testenv:isort-check]
deps = isort
-commands = isort -rc stix2 examples -c -df
+commands =
+ isort -rc stix2 examples -df
+ isort -rc stix2 examples -c
[travis]
python =
| Can't add custom observables to Observed Data object | oasis-open/cti-python-stix2 | diff --git a/stix2/test/test_custom.py b/stix2/test/test_custom.py
index 1a816bd..60e982c 100644
--- a/stix2/test/test_custom.py
+++ b/stix2/test/test_custom.py
@@ -2,6 +2,8 @@ import pytest
import stix2
+from .constants import FAKE_TIME
+
def test_identity_custom_property():
with pytest.raises(ValueError):
@@ -166,3 +168,15 @@ def test_observable_custom_property_allowed():
allow_custom=True,
)
assert no.x_foo == "bar"
+
+
+def test_observed_data_with_custom_observable_object():
+ no = NewObservable(property1='something')
+ ob_data = stix2.ObservedData(
+ first_observed=FAKE_TIME,
+ last_observed=FAKE_TIME,
+ number_observed=1,
+ objects={'0': no},
+ allow_custom=True,
+ )
+ assert ob_data.objects['0'].property1 == 'something'
diff --git a/stix2/test/test_properties.py b/stix2/test/test_properties.py
index 5395a9f..01daebf 100644
--- a/stix2/test/test_properties.py
+++ b/stix2/test/test_properties.py
@@ -2,13 +2,13 @@ import pytest
from stix2 import TCPExt
from stix2.exceptions import AtLeastOnePropertyError, DictionaryKeyError
-from stix2.observables import EmailMIMEComponent
+from stix2.observables import EmailMIMEComponent, ExtensionsProperty
from stix2.properties import (BinaryProperty, BooleanProperty,
DictionaryProperty, EmbeddedObjectProperty,
- EnumProperty, ExtensionsProperty, HashesProperty,
- HexProperty, IDProperty, IntegerProperty,
- ListProperty, Property, ReferenceProperty,
- StringProperty, TimestampProperty, TypeProperty)
+ EnumProperty, HashesProperty, HexProperty,
+ IDProperty, IntegerProperty, ListProperty,
+ Property, ReferenceProperty, StringProperty,
+ TimestampProperty, TypeProperty)
from .constants import FAKE_TIME
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 5
} | 0.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"coverage"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.16
babel==2.17.0
bump2version==1.0.1
bumpversion==0.6.0
cachetools==5.5.2
certifi==2025.1.31
cfgv==3.4.0
chardet==5.2.0
charset-normalizer==3.4.1
colorama==0.4.6
coverage==7.8.0
distlib==0.3.9
docutils==0.21.2
exceptiongroup==1.2.2
filelock==3.18.0
identify==2.6.9
idna==3.10
imagesize==1.4.1
importlib_metadata==8.6.1
iniconfig==2.1.0
Jinja2==3.1.6
MarkupSafe==3.0.2
nodeenv==1.9.1
packaging==24.2
platformdirs==4.3.7
pluggy==1.5.0
pre_commit==4.2.0
Pygments==2.19.1
pyproject-api==1.9.0
pytest==8.3.5
pytest-cov==6.0.0
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.2
requests==2.32.3
six==1.17.0
snowballstemmer==2.2.0
Sphinx==7.4.7
sphinx-prompt==1.8.0
sphinxcontrib-applehelp==2.0.0
sphinxcontrib-devhelp==2.0.0
sphinxcontrib-htmlhelp==2.1.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==2.0.0
sphinxcontrib-serializinghtml==2.0.0
-e git+https://github.com/oasis-open/cti-python-stix2.git@6f680be8a65028c303bae38bbe1fa0a2d08852a8#egg=stix2
tomli==2.2.1
tox==4.25.0
typing_extensions==4.13.0
urllib3==2.3.0
virtualenv==20.29.3
zipp==3.21.0
| name: cti-python-stix2
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.16
- babel==2.17.0
- bump2version==1.0.1
- bumpversion==0.6.0
- cachetools==5.5.2
- certifi==2025.1.31
- cfgv==3.4.0
- chardet==5.2.0
- charset-normalizer==3.4.1
- colorama==0.4.6
- coverage==7.8.0
- distlib==0.3.9
- docutils==0.21.2
- exceptiongroup==1.2.2
- filelock==3.18.0
- identify==2.6.9
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- jinja2==3.1.6
- markupsafe==3.0.2
- nodeenv==1.9.1
- packaging==24.2
- platformdirs==4.3.7
- pluggy==1.5.0
- pre-commit==4.2.0
- pygments==2.19.1
- pyproject-api==1.9.0
- pytest==8.3.5
- pytest-cov==6.0.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.2
- requests==2.32.3
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==7.4.7
- sphinx-prompt==1.8.0
- sphinxcontrib-applehelp==2.0.0
- sphinxcontrib-devhelp==2.0.0
- sphinxcontrib-htmlhelp==2.1.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==2.0.0
- sphinxcontrib-serializinghtml==2.0.0
- tomli==2.2.1
- tox==4.25.0
- typing-extensions==4.13.0
- urllib3==2.3.0
- virtualenv==20.29.3
- zipp==3.21.0
prefix: /opt/conda/envs/cti-python-stix2
| [
"stix2/test/test_custom.py::test_observed_data_with_custom_observable_object"
]
| [
"stix2/test/test_properties.py::test_hashes_property_valid[value0]"
]
| [
"stix2/test/test_custom.py::test_identity_custom_property",
"stix2/test/test_custom.py::test_identity_custom_property_invalid",
"stix2/test/test_custom.py::test_identity_custom_property_allowed",
"stix2/test/test_custom.py::test_parse_identity_custom_property[{\\n",
"stix2/test/test_custom.py::test_custom_object_type",
"stix2/test/test_custom.py::test_parse_custom_object_type",
"stix2/test/test_custom.py::test_custom_observable_object",
"stix2/test/test_custom.py::test_parse_custom_observable_object",
"stix2/test/test_custom.py::test_observable_custom_property",
"stix2/test/test_custom.py::test_observable_custom_property_invalid",
"stix2/test/test_custom.py::test_observable_custom_property_allowed",
"stix2/test/test_properties.py::test_property",
"stix2/test/test_properties.py::test_basic_clean",
"stix2/test/test_properties.py::test_property_default",
"stix2/test/test_properties.py::test_fixed_property",
"stix2/test/test_properties.py::test_list_property",
"stix2/test/test_properties.py::test_string_property",
"stix2/test/test_properties.py::test_type_property",
"stix2/test/test_properties.py::test_id_property",
"stix2/test/test_properties.py::test_integer_property_valid[2]",
"stix2/test/test_properties.py::test_integer_property_valid[-1]",
"stix2/test/test_properties.py::test_integer_property_valid[3.14]",
"stix2/test/test_properties.py::test_integer_property_valid[False]",
"stix2/test/test_properties.py::test_integer_property_invalid[something]",
"stix2/test/test_properties.py::test_integer_property_invalid[value1]",
"stix2/test/test_properties.py::test_boolean_property_valid[True0]",
"stix2/test/test_properties.py::test_boolean_property_valid[False0]",
"stix2/test/test_properties.py::test_boolean_property_valid[True1]",
"stix2/test/test_properties.py::test_boolean_property_valid[False1]",
"stix2/test/test_properties.py::test_boolean_property_valid[true]",
"stix2/test/test_properties.py::test_boolean_property_valid[false]",
"stix2/test/test_properties.py::test_boolean_property_valid[TRUE]",
"stix2/test/test_properties.py::test_boolean_property_valid[FALSE]",
"stix2/test/test_properties.py::test_boolean_property_valid[T]",
"stix2/test/test_properties.py::test_boolean_property_valid[F]",
"stix2/test/test_properties.py::test_boolean_property_valid[t]",
"stix2/test/test_properties.py::test_boolean_property_valid[f]",
"stix2/test/test_properties.py::test_boolean_property_valid[1]",
"stix2/test/test_properties.py::test_boolean_property_valid[0]",
"stix2/test/test_properties.py::test_boolean_property_invalid[abc]",
"stix2/test/test_properties.py::test_boolean_property_invalid[value1]",
"stix2/test/test_properties.py::test_boolean_property_invalid[value2]",
"stix2/test/test_properties.py::test_boolean_property_invalid[2]",
"stix2/test/test_properties.py::test_boolean_property_invalid[-1]",
"stix2/test/test_properties.py::test_reference_property",
"stix2/test/test_properties.py::test_timestamp_property_valid[2017-01-01T12:34:56Z]",
"stix2/test/test_properties.py::test_timestamp_property_valid[2017-01-01",
"stix2/test/test_properties.py::test_timestamp_property_valid[Jan",
"stix2/test/test_properties.py::test_timestamp_property_invalid",
"stix2/test/test_properties.py::test_binary_property",
"stix2/test/test_properties.py::test_hex_property",
"stix2/test/test_properties.py::test_dictionary_property_valid[d0]",
"stix2/test/test_properties.py::test_dictionary_property_valid[d1]",
"stix2/test/test_properties.py::test_dictionary_property_invalid[d0]",
"stix2/test/test_properties.py::test_dictionary_property_invalid[d1]",
"stix2/test/test_properties.py::test_dictionary_property_invalid[d2]",
"stix2/test/test_properties.py::test_hashes_property_valid[value1]",
"stix2/test/test_properties.py::test_hashes_property_invalid[value0]",
"stix2/test/test_properties.py::test_hashes_property_invalid[value1]",
"stix2/test/test_properties.py::test_embedded_property",
"stix2/test/test_properties.py::test_enum_property",
"stix2/test/test_properties.py::test_extension_property_valid",
"stix2/test/test_properties.py::test_extension_property_invalid[1]",
"stix2/test/test_properties.py::test_extension_property_invalid[data1]",
"stix2/test/test_properties.py::test_extension_property_invalid_type",
"stix2/test/test_properties.py::test_extension_at_least_one_property_constraint"
]
| []
| BSD 3-Clause "New" or "Revised" License | 1,465 | [
"tox.ini",
"stix2/properties.py",
"stix2/__init__.py",
"stix2/sdo.py",
"stix2/observables.py"
]
| [
"tox.ini",
"stix2/properties.py",
"stix2/__init__.py",
"stix2/sdo.py",
"stix2/observables.py"
]
|
|
ethanrowe__python-mandrel-18 | 2d1d0d4646145b7af1bc5658902d247fcf85cf6a | 2017-07-14 21:30:10 | 2d1d0d4646145b7af1bc5658902d247fcf85cf6a | diff --git a/.travis.yml b/.travis.yml
index d81b6d1..edf9bd2 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -2,4 +2,7 @@ language: python
python:
- "2.6"
- "2.7"
-script: python setup.py test
+ - "3.6"
+script:
+- python setup.py test
+
diff --git a/changelog.md b/changelog.md
index 7a665bc..4a85dcd 100644
--- a/changelog.md
+++ b/changelog.md
@@ -1,4 +1,8 @@
-2.0.0
+0.3.0
+
+* Python 3 support
+
+0.2.0
* mandrel-runner uses return value of callable as the exit code.
diff --git a/mandrel/bootstrap.py b/mandrel/bootstrap.py
index f405605..45a2cfa 100644
--- a/mandrel/bootstrap.py
+++ b/mandrel/bootstrap.py
@@ -47,7 +47,7 @@ def find_logging_configuration():
"""
for path in util.find_files(LOGGING_CONFIG_BASENAME, SEARCH_PATHS, matches=1):
return path
- raise exception.UnknownConfigurationException, "Cannot find logging configuration file(s) '%s'" % LOGGING_CONFIG_BASENAME
+ raise exception.UnknownConfigurationException("Cannot find logging configuration file(s) '%s'" % LOGGING_CONFIG_BASENAME)
DEFAULT_LOGGING_CALLBACK = initialize_simple_logging
DISABLE_EXISTING_LOGGERS = True
@@ -109,7 +109,7 @@ def _find_bootstrap_base():
while not os.path.isfile(os.path.join(current, __BOOTSTRAP_BASENAME)):
parent = os.path.dirname(current)
if parent == current:
- raise exception.MissingBootstrapException, 'Cannot find %s file in directory hierarchy' % __BOOTSTRAP_BASENAME
+ raise exception.MissingBootstrapException('Cannot find %s file in directory hierarchy' % __BOOTSTRAP_BASENAME)
current = parent
return current, os.path.join(current, __BOOTSTRAP_BASENAME)
diff --git a/mandrel/config/core.py b/mandrel/config/core.py
index c75ca36..d2bb7ed 100644
--- a/mandrel/config/core.py
+++ b/mandrel/config/core.py
@@ -81,7 +81,7 @@ def find_configuration_file(name):
"""
for path in find_configuration_files(name):
return path
- raise exception.UnknownConfigurationException, "No configuration file found for name '%s'" % name
+ raise exception.UnknownConfigurationException("No configuration file found for name '%s'" % name)
def get_loader(path):
"""Gets the configuration loader for path according to file extension.
@@ -98,7 +98,7 @@ def get_loader(path):
fullext = '.' + ext
if path[-len(fullext):] == fullext:
return loader
- raise exception.UnknownConfigurationException, "No configuration loader found for path '%s'" % path
+ raise exception.UnknownConfigurationException("No configuration loader found for path '%s'" % path)
def load_configuration_file(path):
"""Loads the configuration at path and returns it.
@@ -267,7 +267,7 @@ class Configuration(object):
except AttributeError:
pass
- raise AttributeError, 'No such attribute: %s' % attribute
+ raise AttributeError('No such attribute: %s' % attribute)
def __getattr__(self, attr):
return self.chained_get(attr)
diff --git a/mandrel/runner.py b/mandrel/runner.py
index 8a01ccf..5349ebd 100644
--- a/mandrel/runner.py
+++ b/mandrel/runner.py
@@ -109,7 +109,12 @@ class ScriptRunner(AbstractRunner):
def execute_script(self, script):
glb = globals()
glb.update(__name__='__main__', __file__=script)
- return execfile(script, glb)
+ if sys.version_info[0] > 2:
+ import builtins
+ execf = getattr(builtins, 'exec')
+ return execf(compile(open(script).read(), script, 'exec'), glb)
+ else:
+ return execfile(script, glb)
def execute(self, target, args):
self.prepare_environment(args)
diff --git a/mandrel/util.py b/mandrel/util.py
index fbc17e0..4fe9bbc 100644
--- a/mandrel/util.py
+++ b/mandrel/util.py
@@ -1,5 +1,13 @@
import os
import re
+import sys
+
+
+if sys.version_info[0] == 2:
+ string_type = basestring
+else:
+ string_type = str
+
class TransformingList(object):
__slots__ = ('_list', '_transformer')
@@ -9,7 +17,10 @@ class TransformingList(object):
self._transformer = transformer
def __setitem__(self, i, y):
- self._list[i] = self._transformer(y)
+ if isinstance(i, slice):
+ self._list[i] = (self._transformer(v) for v in y)
+ else:
+ self._list[i] = self._transformer(y)
def __setslice__(self, i, j, y):
self._list[i:j] = (self._transformer(v) for v in y)
@@ -72,7 +83,7 @@ def find_files(name_or_names, paths, matches=None):
yielded value is the full path to a matching file.
"""
- if isinstance(name_or_names, basestring):
+ if isinstance(name_or_names, string_type):
name_or_names = [name_or_names]
if matches is None:
@@ -138,9 +149,9 @@ def convention_loader(format_string):
not contain one and only one '%s' within it.
"""
if not format_string:
- raise TypeError, 'format_string cannot be blank'
+ raise TypeError('format_string cannot be blank')
if re.findall('%.', format_string) != ['%s']:
- raise TypeError, 'format_string must contain one and only one "%s" token'
+ raise TypeError('format_string must contain one and only one "%s" token')
def func(name):
return get_by_fqn(format_string % name)
diff --git a/setup.py b/setup.py
index 791c3b5..4cea793 100644
--- a/setup.py
+++ b/setup.py
@@ -3,7 +3,7 @@ import os
setup(
name = "mandrel",
- version = "0.2.0",
+ version = "0.3.0",
author = "Ethan Rowe",
author_email = "[email protected]",
description = ("Provides bootstrapping for sane configuration management"),
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 0000000..fbbf9e5
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,12 @@
+# Tox (https://tox.readthedocs.io/) is a tool for running tests
+# in multiple virtualenvs. This configuration file will run the
+# test suite on all supported python versions. To use it, "pip install tox"
+# and then run "tox" from this directory.
+
+[tox]
+envlist = py26,py27,py36
+
+[testenv]
+commands = {envpython} setup.py test
+deps =
+
| Python 3 support
I'd like to port treehouse, which uses Mandrel, to python 3, but ran into problems because Mandrel isn't python 3 compatible. | ethanrowe/python-mandrel | diff --git a/mandrel/test/config/configuration_class_test.py b/mandrel/test/config/configuration_class_test.py
index 7294f72..519ecc1 100644
--- a/mandrel/test/config/configuration_class_test.py
+++ b/mandrel/test/config/configuration_class_test.py
@@ -1,8 +1,9 @@
-import unittest
import mock
+
import mandrel.config
-from mandrel.test import utils
from mandrel import exception
+from mandrel.test import utils
+
class TestConfigurationClass(utils.TestCase):
@mock.patch('mandrel.config.core.get_configuration')
@@ -21,7 +22,7 @@ class TestConfigurationClass(utils.TestCase):
result = mandrel.config.core.Configuration.get_logger_name('some.nested.name')
self.assertEqual(mock_name + '.some.nested.name', result)
- @mock.patch('mandrel.bootstrap')
+ @mock.patch('mandrel.bootstrap', create=True)
@mock.patch('mandrel.config.core.Configuration.get_logger_name')
def testGetLogger(self, get_logger_name, bootstrap):
mock_name = str(mock.Mock(name='AnotherMockConfigurationName'))
@@ -45,7 +46,7 @@ class TestConfigurationClass(utils.TestCase):
self.assertEqual(config, c.configuration)
self.assertEqual((), c.chain)
- chain = tuple(mock.Mock(name='Chain%d' % x) for x in xrange(3))
+ chain = tuple(mock.Mock(name='Chain%d' % x) for x in range(3))
c = mandrel.config.core.Configuration(config, *chain)
self.assertEqual(config, c.configuration)
self.assertEqual(chain, c.chain)
@@ -136,7 +137,7 @@ class TestConfigurationClass(utils.TestCase):
self.assertEqual(loader.return_value, c.configuration)
self.assertEqual((), c.chain)
- chain = tuple(mock.Mock() for x in xrange(5))
+ chain = tuple(mock.Mock() for x in range(5))
c = mandrel.config.core.Configuration.get_configuration(*chain)
self.assertEqual(loader.return_value, c.configuration)
self.assertEqual(chain, c.chain)
diff --git a/mandrel/test/config/loader_functionality_test.py b/mandrel/test/config/loader_functionality_test.py
index e2ab975..b88ceae 100644
--- a/mandrel/test/config/loader_functionality_test.py
+++ b/mandrel/test/config/loader_functionality_test.py
@@ -1,13 +1,20 @@
-import contextlib
import unittest
+
import mock
+
import mandrel
from mandrel import exception
-from mandrel.test import utils
+
+try:
+ # python 3 compatibility
+ from importlib import reload
+except ImportError:
+ pass
+
def scenario(func):
def wrapper(*a, **kw):
- with mock.patch('mandrel.bootstrap') as bootstrap:
+ with mock.patch('mandrel.bootstrap', create=True) as bootstrap:
if hasattr(mandrel, 'config'):
reload(mandrel.config.core)
reload(mandrel.config)
@@ -48,7 +55,7 @@ class TestConfigLoaderFunctionality(unittest.TestCase):
def testFindConfigurationFiles(self):
with mock.patch('mandrel.util.find_files') as find_files:
with mock.patch('mandrel.config.core.get_possible_basenames') as get_possible_basenames:
- exts = [mock.Mock(name='Extension%d' % x) for x in xrange(3)]
+ exts = [mock.Mock(name='Extension%d' % x) for x in range(3)]
mandrel.config.core.LOADERS = [(ext, mock.Mock(name='Reader')) for ext in exts]
mandrel.bootstrap.SEARCH_PATHS = mock.Mock()
name = mock.Mock(name='FileBase')
@@ -60,7 +67,7 @@ class TestConfigLoaderFunctionality(unittest.TestCase):
@scenario
def testFindConfigurationFileWithMatch(self):
with mock.patch('mandrel.config.core.find_configuration_files') as find_configuration_files:
- paths = [mock.Mock(name='Path%d' % x) for x in xrange(10)]
+ paths = [mock.Mock(name='Path%d' % x) for x in range(10)]
find_configuration_files.side_effect = lambda x: iter(paths)
name = mock.Mock(name='SomeBase')
result = mandrel.config.core.find_configuration_file(name)
@@ -76,11 +83,11 @@ class TestConfigLoaderFunctionality(unittest.TestCase):
@scenario
def testGetLoader(self):
- exts = [str(mock.Mock(name='Extension%d' % x)) for x in xrange(3)]
- loaders = [mock.Mock(name='Loader%d' % x) for x in xrange(len(exts))]
- mandrel.config.core.LOADERS = zip(exts, loaders)
+ exts = [str(mock.Mock(name='Extension%d' % x)) for x in range(3)]
+ loaders = [mock.Mock(name='Loader%d' % x) for x in range(len(exts))]
+ mandrel.config.core.LOADERS = list(zip(exts, loaders))
- for i in xrange(len(exts)):
+ for i in range(len(exts)):
ext, loader = mandrel.config.core.LOADERS[i]
path = '%s.%s' % (mock.Mock(name='someFile'), ext)
result = mandrel.config.core.get_loader(path)
diff --git a/mandrel/test/runner_test.py b/mandrel/test/runner_test.py
index 9a809ec..a84b305 100644
--- a/mandrel/test/runner_test.py
+++ b/mandrel/test/runner_test.py
@@ -31,6 +31,12 @@ def scenario(*opts, **driver_opt):
KNOWN_PATH = os.path.realpath('')
+if sys.version_info[0] > 2:
+ builtin_name = 'builtins'
+else:
+ builtin_name = '__builtin__'
+
+
class TestRunner(unittest.TestCase):
@scenario('-s', 'foo:bar:bah:', 'gloof', 'glof', 'floo', ensure_target=False)
def testSearchPathSet(self, path):
@@ -74,7 +80,7 @@ class TestRunner(unittest.TestCase):
@mock.patch.object(runner.AbstractRunner, 'process_options')
@mock.patch.object(runner.AbstractRunner, 'execute')
def testOrderOfOperations(self, path, execute, process_options):
- mocks = [mock.Mock('positional%d' % x) for x in xrange(4)]
+ mocks = [mock.Mock('positional%d' % x) for x in range(4)]
target = mocks.pop(0)
process_options.return_value = (target, mocks)
runner.AbstractRunner().run()
@@ -90,7 +96,7 @@ class TestRunner(unittest.TestCase):
@scenario()
def testImporting(self, _):
obj = runner.AbstractRunner()
- with mock.patch('__builtin__.__import__') as importer:
+ with mock.patch(builtin_name + '.__import__') as importer:
self.assertEqual(importer.return_value.bootstrap, obj.bootstrapper)
importer.assert_called_once_with('mandrel.bootstrap')
@@ -102,18 +108,18 @@ class TestRunner(unittest.TestCase):
@mock.patch('mandrel.util.get_by_fqn')
def testCallableRunner(self, get_by_fqn):
target = str(mock.Mock(name='MockTarget'))
- opts = [str(mock.Mock(name='Arg%d' % x)) for x in xrange(3)]
+ opts = [str(mock.Mock(name='Arg%d' % x)) for x in range(3)]
result = runner.CallableRunner().execute(target, opts)
get_by_fqn.assert_called_once_with(target)
get_by_fqn.return_value.assert_called_once_with(opts)
self.assertEqual(get_by_fqn.return_value.return_value, result)
- @mock.patch('__builtin__.globals')
+ @mock.patch(builtin_name + '.globals')
@mock.patch('sys.argv', new=['foo', 'bar', 'bah'])
- @mock.patch('__builtin__.execfile')
+ @mock.patch('mandrel.runner.ScriptRunner.execute_script')
def testScriptRunner(self, mock_exec, mock_globals):
target = str(mock.Mock(name='MockTarget'))
- opts = [str(mock.Mock(name='Arg%d' % x)) for x in xrange(3)]
+ opts = [str(mock.Mock(name='Arg%d' % x)) for x in range(3)]
glb = {'foo': mock.Mock(), 'bar': mock.Mock(), '__file__': mock.Mock(), '__name__': mock.Mock()}
mock_globals.side_effect = lambda: dict(glb)
exp = dict(glb)
@@ -121,7 +127,7 @@ class TestRunner(unittest.TestCase):
exp['__name__'] = '__main__'
result = runner.ScriptRunner().execute(target, opts)
- mock_exec.assert_called_once_with(target, exp)
+ mock_exec.assert_called_once_with(target)
self.assertEqual(mock_exec.return_value, result)
# Should add args at sys.argv[1:]
self.assertEqual(['foo'] + opts, sys.argv)
diff --git a/mandrel/test/util/file_finder_test.py b/mandrel/test/util/file_finder_test.py
index 47f03de..5bb6065 100644
--- a/mandrel/test/util/file_finder_test.py
+++ b/mandrel/test/util/file_finder_test.py
@@ -14,7 +14,7 @@ def scenario(**files_to_levels):
levels.append(b)
with utils.tempdir() as c:
levels.append(c)
- for name, dirs in files_to_levels.items():
+ for name, dirs in list(files_to_levels.items()):
for level in dirs:
with open(os.path.join(levels[level], name), 'w') as f:
f.write(str(level))
@@ -32,21 +32,21 @@ def get_level(path):
class TestFileFinder(unittest.TestCase):
def testSingleFindOneMatch(self):
with scenario(**{'a.txt': (0, 1, 2), 'b.foo': (1, 2), 'c.bar': (2,)}) as dirs:
- for name, level in {'a.txt': 0, 'b.foo': 1, 'c.bar': 2}.items():
+ for name, level in list({'a.txt': 0, 'b.foo': 1, 'c.bar': 2}.items()):
result = tuple(util.find_files(name, dirs, matches=1))
self.assertEqual(1, len(result))
self.assertEqual(level, get_level(result[0]))
def testSingleFindTwoMatch(self):
with scenario(**{'0.x': (0,), 'a.txt': (0, 1, 2), 'b.foo': (1, 2), 'c.bar': (2,)}) as dirs:
- for name, levels in {'0.x': (0,), 'a.txt': (0, 1), 'b.foo': (1, 2), 'c.bar': (2,)}.items():
+ for name, levels in list({'0.x': (0,), 'a.txt': (0, 1), 'b.foo': (1, 2), 'c.bar': (2,)}.items()):
got = tuple(get_level(r) for r in util.find_files(name, dirs, matches=2))
self.assertEqual(levels, got)
def testSingleFindMultiMatch(self):
mapping = {'0.x': (0,), 'a.txt': (0, 1), 'b.blah': (0, 1, 2), 'c.pork': (1, 2), 'd.plonk': (1,), 'e.sporks': (2,)}
with scenario(**mapping) as dirs:
- for name, levels in mapping.items():
+ for name, levels in list(mapping.items()):
got = tuple(get_level(r) for r in util.find_files(name, dirs))
self.assertEqual(levels, got)
diff --git a/mandrel/test/util/loaders_test.py b/mandrel/test/util/loaders_test.py
index 698478d..e5b412f 100644
--- a/mandrel/test/util/loaders_test.py
+++ b/mandrel/test/util/loaders_test.py
@@ -30,7 +30,7 @@ class TestLoaderFunctions(unittest.TestCase):
callback = mock.Mock(name='Callback')
harness = util.harness_loader(loader)(callback)
name = mock.Mock(name='Plugin')
- args = [mock.Mock(name='Arg%d' % x) for x in xrange(3)]
+ args = [mock.Mock(name='Arg%d' % x) for x in range(3)]
result = harness(name)
loader.assert_called_once_with(name)
diff --git a/mandrel/test/util/transforming_list_test.py b/mandrel/test/util/transforming_list_test.py
index 2f6e92c..abe9e38 100644
--- a/mandrel/test/util/transforming_list_test.py
+++ b/mandrel/test/util/transforming_list_test.py
@@ -6,14 +6,14 @@ from mandrel import util
_trans_count = itertools.count(0)
def mock_transform():
- t = mock.Mock(name='MockTransform%d' % _trans_count.next())
+ t = mock.Mock(name='MockTransform%d' % next(_trans_count))
t.side_effect = lambda v: v.transformed
return t
_vals_count = itertools.count(0)
def mock_value():
- return mock.Mock(name='MockValue%d' % _vals_count.next())
+ return mock.Mock(name='MockValue%d' % next(_vals_count))
class TestTransformingList(unittest.TestCase):
def testAppendBasics(self):
@@ -33,7 +33,7 @@ class TestTransformingList(unittest.TestCase):
def testAccessBasics(self):
t = mock_transform()
l = util.TransformingList(t)
- vals = [mock_value() for i in xrange(5)]
+ vals = [mock_value() for i in range(5)]
l[0:3] = vals[0:3]
self.assertEqual(3, len(l))
self.assertEqual(tuple(v.transformed for v in vals[0:3]), tuple(l))
@@ -51,7 +51,7 @@ class TestTransformingList(unittest.TestCase):
def testExtend(self):
t = mock_transform()
- vals = [mock_value() for i in xrange(5)]
+ vals = [mock_value() for i in range(5)]
exp = tuple(v.transformed for v in vals)
l = util.TransformingList(t)
l.extend(vals)
@@ -64,7 +64,7 @@ class TestTransformingList(unittest.TestCase):
def testInsertPop(self):
t = mock_transform()
- a, b, c = (mock_value() for i in xrange(3))
+ a, b, c = (mock_value() for i in range(3))
l = util.TransformingList(t)
l.append(a)
l.append(b)
@@ -77,7 +77,7 @@ class TestTransformingList(unittest.TestCase):
def testContainment(self):
t = mock_transform()
- vals = [mock_value() for i in xrange(5)]
+ vals = [mock_value() for i in range(5)]
l = util.TransformingList(t)
member = lambda v: v in l
self.assertFalse(member(vals[0]))
diff --git a/mandrel/test/utils.py b/mandrel/test/utils.py
index df7dbc8..3688ce5 100644
--- a/mandrel/test/utils.py
+++ b/mandrel/test/utils.py
@@ -5,6 +5,12 @@ import tempfile
import mandrel
import unittest
+try:
+ # python 3 compatibility
+ from importlib import reload
+except ImportError:
+ pass
+
class TestCase(unittest.TestCase):
def assertIs(self, a, b):
# python 2.6/2.7 compatibility
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 3,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 7
} | 0.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"mock"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
-e git+https://github.com/ethanrowe/python-mandrel.git@2d1d0d4646145b7af1bc5658902d247fcf85cf6a#egg=mandrel
mock==5.2.0
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
pytest @ file:///croot/pytest_1738938843180/work
PyYAML==6.0.2
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
| name: python-mandrel
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- mock==5.2.0
- pyyaml==6.0.2
prefix: /opt/conda/envs/python-mandrel
| [
"mandrel/test/config/configuration_class_test.py::TestConfigurationClass::testAttributeLookup",
"mandrel/test/config/configuration_class_test.py::TestConfigurationClass::testAttributePropertySet",
"mandrel/test/config/configuration_class_test.py::TestConfigurationClass::testAttributeSet",
"mandrel/test/config/configuration_class_test.py::TestConfigurationClass::testBasicAttributes",
"mandrel/test/config/configuration_class_test.py::TestConfigurationClass::testConfigurationSetGet",
"mandrel/test/config/configuration_class_test.py::TestConfigurationClass::testForgivingConfiguration",
"mandrel/test/config/configuration_class_test.py::TestConfigurationClass::testGetConfiguration",
"mandrel/test/config/configuration_class_test.py::TestConfigurationClass::testGetLogger",
"mandrel/test/config/configuration_class_test.py::TestConfigurationClass::testGetLoggerName",
"mandrel/test/config/configuration_class_test.py::TestConfigurationClass::testHotCopy",
"mandrel/test/config/configuration_class_test.py::TestConfigurationClass::testInstanceSetGet",
"mandrel/test/config/configuration_class_test.py::TestConfigurationClass::testInstanceVersusConfiguration",
"mandrel/test/config/configuration_class_test.py::TestConfigurationClass::testLoadConfiguration",
"mandrel/test/config/configuration_class_test.py::TestConfigurationClass::testUnknownConfigurationExceptionHandling",
"mandrel/test/config/loader_functionality_test.py::TestConfigLoaderFunctionality::testDefaultLoadersList",
"mandrel/test/config/loader_functionality_test.py::TestConfigLoaderFunctionality::testFindConfigurationFileWithMatch",
"mandrel/test/config/loader_functionality_test.py::TestConfigLoaderFunctionality::testFindConfigurationFiles",
"mandrel/test/config/loader_functionality_test.py::TestConfigLoaderFunctionality::testFindConfigurationFilesWithoutMatch",
"mandrel/test/config/loader_functionality_test.py::TestConfigLoaderFunctionality::testGetConfiguration",
"mandrel/test/config/loader_functionality_test.py::TestConfigLoaderFunctionality::testGetLoader",
"mandrel/test/config/loader_functionality_test.py::TestConfigLoaderFunctionality::testGetPossibleBasenames",
"mandrel/test/config/loader_functionality_test.py::TestConfigLoaderFunctionality::testLoadConfigurationFile",
"mandrel/test/runner_test.py::TestRunner::testAppendModulePath",
"mandrel/test/runner_test.py::TestRunner::testAppendSearchPath",
"mandrel/test/runner_test.py::TestRunner::testCallableRunner",
"mandrel/test/runner_test.py::TestRunner::testImporting",
"mandrel/test/runner_test.py::TestRunner::testLaunch",
"mandrel/test/runner_test.py::TestRunner::testLaunchFunctions",
"mandrel/test/runner_test.py::TestRunner::testLogConfigBasenamePath",
"mandrel/test/runner_test.py::TestRunner::testNotImplemented",
"mandrel/test/runner_test.py::TestRunner::testOrderOfOperations",
"mandrel/test/runner_test.py::TestRunner::testPrependModulePath",
"mandrel/test/runner_test.py::TestRunner::testPrependSearchPath",
"mandrel/test/runner_test.py::TestRunner::testScriptRunner",
"mandrel/test/runner_test.py::TestRunner::testSearchPathSet",
"mandrel/test/util/file_finder_test.py::TestFileFinder::testMultiFind",
"mandrel/test/util/file_finder_test.py::TestFileFinder::testSingleFindMultiMatch",
"mandrel/test/util/file_finder_test.py::TestFileFinder::testSingleFindOneMatch",
"mandrel/test/util/file_finder_test.py::TestFileFinder::testSingleFindTwoMatch",
"mandrel/test/util/loaders_test.py::TestLoaderFunctions::test_convention_loader",
"mandrel/test/util/loaders_test.py::TestLoaderFunctions::test_convention_loader_pattern_violations",
"mandrel/test/util/loaders_test.py::TestLoaderFunctions::test_harness_loader_straight",
"mandrel/test/util/transforming_list_test.py::TestTransformingList::testAccessBasics",
"mandrel/test/util/transforming_list_test.py::TestTransformingList::testAppendBasics",
"mandrel/test/util/transforming_list_test.py::TestTransformingList::testContainment",
"mandrel/test/util/transforming_list_test.py::TestTransformingList::testExtend",
"mandrel/test/util/transforming_list_test.py::TestTransformingList::testInsertPop"
]
| [
"mandrel/test/util/loaders_test.py::TestLoaderFunctions::test_harness_loader_decorator"
]
| []
| []
| MIT License | 1,466 | [
"setup.py",
"mandrel/config/core.py",
"changelog.md",
".travis.yml",
"mandrel/runner.py",
"tox.ini",
"mandrel/bootstrap.py",
"mandrel/util.py"
]
| [
"setup.py",
"mandrel/config/core.py",
"changelog.md",
".travis.yml",
"mandrel/runner.py",
"tox.ini",
"mandrel/bootstrap.py",
"mandrel/util.py"
]
|
|
tox-dev__tox-554 | 682b96094b971b294c931c7464fbafe846308d4d | 2017-07-15 13:23:25 | e374ce61bf101fb2cc2eddd955f57048df153017 | diff --git a/tox/_quickstart.py b/tox/_quickstart.py
index 37c48ddc..bc283a8a 100644
--- a/tox/_quickstart.py
+++ b/tox/_quickstart.py
@@ -40,6 +40,7 @@
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
+import argparse
import sys
from os import path
from codecs import open
@@ -224,19 +225,24 @@ def generate(d, overwrite=True, silent=False):
def write_file(fpath, mode, content):
print('Creating file %s.' % fpath)
- f = open(fpath, mode, encoding='utf-8')
try:
- f.write(content)
- finally:
- f.close()
+ with open(fpath, mode, encoding='utf-8') as f:
+ f.write(content)
+ except IOError:
+ print('Error writing file.')
+ raise
sys.stdout.write('\n')
- fpath = 'tox.ini'
+ fpath = path.join(d.get('path', ''), 'tox.ini')
if path.isfile(fpath) and not overwrite:
print('File %s already exists.' % fpath)
- do_prompt(d, 'fpath', 'Alternative path to write tox.ini contents to', 'tox-generated.ini')
+ do_prompt(
+ d,
+ 'fpath',
+ 'Alternative path to write tox.ini contents to',
+ path.join(d.get('path', ''), 'tox-generated.ini'))
fpath = d['fpath']
write_file(fpath, 'w', conf_text)
@@ -251,14 +257,25 @@ Execute `tox` to test your project.
''')
+def parse_args(argv):
+ parser = argparse.ArgumentParser(
+ description='Command-line script to quickly setup tox.ini for a Python project.'
+ )
+ parser.add_argument(
+ 'root', type=str, nargs='?', default='.',
+ help='Custom root directory to write tox.ini to. Defaults to current directory.'
+ )
+ parser.add_argument('--version', action='version', version='%(prog)s ' + __version__)
+
+ args = argv[1:]
+ return parser.parse_args(args)
+
+
def main(argv=sys.argv):
- d = {}
+ args = parse_args(argv)
- if len(argv) > 3:
- print('Usage: tox-quickstart [root]')
- sys.exit(1)
- elif len(argv) == 2:
- d['path'] = argv[1]
+ d = {}
+ d['path'] = args.root
try:
ask_user(d)
@@ -268,8 +285,13 @@ def main(argv=sys.argv):
return
d = process_input(d)
- generate(d, overwrite=False)
+ try:
+ generate(d, overwrite=False)
+ except Exception:
+ return 2
+
+ return 0
if __name__ == '__main__':
- main()
+ sys.exit(main())
| tox-quickstart should have a --help
- Bitbucket: https://bitbucket.org/hpk42/tox/issue/315
- Originally reported by: @warsaw
- Originally created at: 2016-02-16T16:16:47.537
and probably a --version too.
| tox-dev/tox | diff --git a/tests/test_quickstart.py b/tests/test_quickstart.py
index 49b2b0c0..76551008 100644
--- a/tests/test_quickstart.py
+++ b/tests/test_quickstart.py
@@ -379,6 +379,106 @@ deps =
result = read_tox('tox-generated.ini')
assert(result == expected_tox_ini)
+ def test_quickstart_main_tox_ini_location_can_be_overridden(
+ self,
+ tmpdir,
+ monkeypatch):
+ monkeypatch.setattr(
+ tox._quickstart, 'term_input',
+ self.get_mock_term_input(
+ [
+ '1', # py27 and py33
+ 'py.test', # command to run tests
+ '', # test dependencies
+ ]
+ )
+ )
+
+ root_dir = tmpdir.mkdir('alt-root')
+ tox_ini_path = root_dir.join('tox.ini')
+
+ tox._quickstart.main(argv=['tox-quickstart', root_dir.basename])
+
+ assert tox_ini_path.isfile()
+
+ expected_tox_ini = """
+# Tox (https://tox.readthedocs.io/) is a tool for running tests
+# in multiple virtualenvs. This configuration file will run the
+# test suite on all supported python versions. To use it, "pip install tox"
+# and then run "tox" from this directory.
+
+[tox]
+envlist = py27
+
+[testenv]
+commands = py.test
+deps =
+ pytest
+""".lstrip()
+ result = read_tox(fname=tox_ini_path.strpath)
+ assert(result == expected_tox_ini)
+
+ def test_quickstart_main_custom_tox_ini_location_with_existing_tox_ini(
+ self,
+ tmpdir,
+ monkeypatch):
+ monkeypatch.setattr(
+ tox._quickstart, 'term_input',
+ self.get_mock_term_input(
+ [
+ '1', # py27 and py33
+ 'py.test', # command to run tests
+ '', # test dependencies
+ '', # tox.ini already exists; overwrite?
+ ]
+ )
+ )
+
+ root_dir = tmpdir.mkdir('alt-root')
+ tox_ini_path = root_dir.join('tox.ini')
+ tox_ini_path.write('foo\nbar\n')
+
+ tox._quickstart.main(argv=['tox-quickstart', root_dir.basename])
+ tox_ini_path = root_dir.join('tox-generated.ini')
+
+ assert tox_ini_path.isfile()
+
+ expected_tox_ini = """
+# Tox (https://tox.readthedocs.io/) is a tool for running tests
+# in multiple virtualenvs. This configuration file will run the
+# test suite on all supported python versions. To use it, "pip install tox"
+# and then run "tox" from this directory.
+
+[tox]
+envlist = py27
+
+[testenv]
+commands = py.test
+deps =
+ pytest
+""".lstrip()
+ result = read_tox(fname=tox_ini_path.strpath)
+ assert(result == expected_tox_ini)
+
+ def test_quickstart_main_custom_nonexistent_tox_ini_location(
+ self,
+ tmpdir,
+ monkeypatch):
+ monkeypatch.setattr(
+ tox._quickstart, 'term_input',
+ self.get_mock_term_input(
+ [
+ '1', # py27 and py33
+ 'py.test', # command to run tests
+ '', # test dependencies
+ ]
+ )
+ )
+
+ root_dir = tmpdir.join('nonexistent-root')
+
+ assert tox._quickstart.main(argv=['tox-quickstart', root_dir.basename]) == 2
+
class TestToxQuickstart(object):
def test_pytest(self):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 2.7 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-timeout"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
distlib==0.3.9
filelock==3.4.1
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
platformdirs==2.4.0
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-timeout==2.1.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
-e git+https://github.com/tox-dev/tox.git@682b96094b971b294c931c7464fbafe846308d4d#egg=tox
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
virtualenv==20.17.1
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: tox
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- distlib==0.3.9
- filelock==3.4.1
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- platformdirs==2.4.0
- pytest-timeout==2.1.0
- virtualenv==20.17.1
prefix: /opt/conda/envs/tox
| [
"tests/test_quickstart.py::TestToxQuickstartMain::test_quickstart_main_tox_ini_location_can_be_overridden",
"tests/test_quickstart.py::TestToxQuickstartMain::test_quickstart_main_custom_tox_ini_location_with_existing_tox_ini",
"tests/test_quickstart.py::TestToxQuickstartMain::test_quickstart_main_custom_nonexistent_tox_ini_location"
]
| []
| [
"tests/test_quickstart.py::TestToxQuickstartMain::test_quickstart_main_choose_individual_pythons_and_pytest",
"tests/test_quickstart.py::TestToxQuickstartMain::test_quickstart_main_choose_individual_pythons_and_nose_adds_deps",
"tests/test_quickstart.py::TestToxQuickstartMain::test_quickstart_main_choose_individual_pythons_and_trial_adds_deps",
"tests/test_quickstart.py::TestToxQuickstartMain::test_quickstart_main_choose_individual_pythons_and_pytest_adds_deps",
"tests/test_quickstart.py::TestToxQuickstartMain::test_quickstart_main_choose_py27_and_pytest_adds_deps",
"tests/test_quickstart.py::TestToxQuickstartMain::test_quickstart_main_choose_py27_and_py33_and_pytest_adds_deps",
"tests/test_quickstart.py::TestToxQuickstartMain::test_quickstart_main_choose_all_pythons_and_pytest_adds_deps",
"tests/test_quickstart.py::TestToxQuickstartMain::test_quickstart_main_choose_individual_pythons_and_defaults",
"tests/test_quickstart.py::TestToxQuickstartMain::test_quickstart_main_existing_tox_ini",
"tests/test_quickstart.py::TestToxQuickstart::test_pytest",
"tests/test_quickstart.py::TestToxQuickstart::test_setup_py_test",
"tests/test_quickstart.py::TestToxQuickstart::test_trial",
"tests/test_quickstart.py::TestToxQuickstart::test_nosetests"
]
| []
| MIT License | 1,467 | [
"tox/_quickstart.py"
]
| [
"tox/_quickstart.py"
]
|
|
asottile__add-trailing-comma-18 | f1666043a4ef3aabec4021acd8946b36209d546e | 2017-07-15 19:42:04 | e6cfc6a9976fc305b0054b30995b5407fea833a5 | diff --git a/add_trailing_comma.py b/add_trailing_comma.py
index 0f86212..2884adf 100644
--- a/add_trailing_comma.py
+++ b/add_trailing_comma.py
@@ -271,7 +271,7 @@ def _fix_brace(fix_data, add_comma, tokens):
indents = []
insert_indents = []
for i in range(first_brace + 3, last_brace):
- if tokens[i - 1].name == 'NL':
+ if tokens[i - 1].name == 'NL' and tokens[i].name != 'NL':
if tokens[i].name != UNIMPORTANT_WS:
min_indent = 0
insert_indents.append(i)
| Blank lines may be considered as "minimum" indentation while unhugging
They should be ignored, this currently introduces trailing whitespace:
```python
x('foo', (
'bar',
'baz',
))
``` | asottile/add-trailing-comma | diff --git a/tests/add_trailing_comma_test.py b/tests/add_trailing_comma_test.py
index b8b6f73..1cd26c9 100644
--- a/tests/add_trailing_comma_test.py
+++ b/tests/add_trailing_comma_test.py
@@ -473,6 +473,22 @@ def test_noop_unhugs(src):
' "bar"\n'
')',
),
+ # Regression test for #17
+ (
+ 'x("foo", (\n'
+ ' "bar",\n'
+ '\n'
+ ' "baz",\n'
+ '))',
+
+ 'x(\n'
+ ' "foo", (\n'
+ ' "bar",\n'
+ '\n'
+ ' "baz",\n'
+ ' ),\n'
+ ')',
+ ),
),
)
def test_fix_unhugs(src, expected):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 1
} | 0.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/asottile/add-trailing-comma.git@f1666043a4ef3aabec4021acd8946b36209d546e#egg=add_trailing_comma
attrs==22.2.0
certifi==2021.5.30
cfgv==3.3.1
coverage==6.2
distlib==0.3.9
filelock==3.4.1
flake8==5.0.4
identify==2.4.4
importlib-metadata==4.2.0
importlib-resources==5.2.3
iniconfig==1.1.1
mccabe==0.7.0
nodeenv==1.6.0
packaging==21.3
platformdirs==2.4.0
pluggy==1.0.0
pre-commit==2.17.0
py==1.11.0
pycodestyle==2.9.1
pyflakes==2.5.0
pyparsing==3.1.4
pytest==7.0.1
PyYAML==6.0.1
tokenize-rt==4.2.1
toml==0.10.2
tomli==1.2.3
typing_extensions==4.1.1
virtualenv==20.16.2
zipp==3.6.0
| name: add-trailing-comma
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- cfgv==3.3.1
- coverage==6.2
- distlib==0.3.9
- filelock==3.4.1
- flake8==5.0.4
- identify==2.4.4
- importlib-metadata==4.2.0
- importlib-resources==5.2.3
- iniconfig==1.1.1
- mccabe==0.7.0
- nodeenv==1.6.0
- packaging==21.3
- platformdirs==2.4.0
- pluggy==1.0.0
- pre-commit==2.17.0
- py==1.11.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
- pyparsing==3.1.4
- pytest==7.0.1
- pyyaml==6.0.1
- tokenize-rt==4.2.1
- toml==0.10.2
- tomli==1.2.3
- typing-extensions==4.1.1
- virtualenv==20.16.2
- zipp==3.6.0
prefix: /opt/conda/envs/add-trailing-comma
| [
"tests/add_trailing_comma_test.py::test_fix_unhugs[x(\"foo\","
]
| []
| [
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x(1)]",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[tuple(\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x(\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x((\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[(\\n",
"tests/add_trailing_comma_test.py::test_ignores_invalid_ast_node",
"tests/add_trailing_comma_test.py::test_py35_plus_rewrite",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[(1,",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[[1,",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[{1,",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[{1:",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[y(*args1,",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[y(**kwargs1,",
"tests/add_trailing_comma_test.py::test_fixes_calls[x(\\n",
"tests/add_trailing_comma_test.py::test_fixes_calls[foo()(\\n",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[(1,",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[[1,",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[{1,",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[{1:",
"tests/add_trailing_comma_test.py::test_fixes_literals[x",
"tests/add_trailing_comma_test.py::test_fixes_py35_plus_literals[x",
"tests/add_trailing_comma_test.py::test_noop_tuple_literal_without_braces",
"tests/add_trailing_comma_test.py::test_noop_function_defs[def",
"tests/add_trailing_comma_test.py::test_fixes_defs[def",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f(x,",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f(\\n",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f((\\n",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f([\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(a,\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[with",
"tests/add_trailing_comma_test.py::test_fix_unhugs[if",
"tests/add_trailing_comma_test.py::test_fix_unhugs[{'foo':",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(g(\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[{\"foo\":",
"tests/add_trailing_comma_test.py::test_fix_unhugs[x",
"tests/add_trailing_comma_test.py::test_fix_unhugs_py3_only[def",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[[]]",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[x",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[y",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[foo.\\\\\\n",
"tests/add_trailing_comma_test.py::test_fix_trailing_brace[x",
"tests/add_trailing_comma_test.py::test_main_trivial",
"tests/add_trailing_comma_test.py::test_main_noop",
"tests/add_trailing_comma_test.py::test_main_changes_a_file",
"tests/add_trailing_comma_test.py::test_main_syntax_error",
"tests/add_trailing_comma_test.py::test_main_non_utf8_bytes",
"tests/add_trailing_comma_test.py::test_main_py35_plus_argument_star_args",
"tests/add_trailing_comma_test.py::test_main_py35_plus_argument_star_star_kwargs"
]
| []
| MIT License | 1,468 | [
"add_trailing_comma.py"
]
| [
"add_trailing_comma.py"
]
|
|
jupyter__nbgrader-838 | 7736f44b18dd8841bf8f5bca56720b58d59baf43 | 2017-07-15 22:27:39 | 5bc6f37c39c8b10b8f60440b2e6d9487e63ef3f1 | diff --git a/nbgrader/apps/fetchapp.py b/nbgrader/apps/fetchapp.py
index 2c923ff7..5cfc9f88 100644
--- a/nbgrader/apps/fetchapp.py
+++ b/nbgrader/apps/fetchapp.py
@@ -73,16 +73,26 @@ class FetchApp(NbGrader):
def start(self):
super(FetchApp, self).start()
- # set assignemnt and course
- if len(self.extra_args) == 1:
- self.coursedir.assignment_id = self.extra_args[0]
- elif len(self.extra_args) > 2:
- self.fail("Too many arguments")
- elif self.coursedir.assignment_id == "":
+ # set assignment and course
+ if len(self.extra_args) == 0 and self.coursedir.assignment_id == "":
self.fail("Must provide assignment name:\nnbgrader <command> ASSIGNMENT [ --course COURSE ]")
- fetch = ExchangeFetch(coursedir=self.coursedir, parent=self)
- try:
- fetch.start()
- except ExchangeError:
- self.fail("nbgrader fetch failed")
+ if self.coursedir.assignment_id != "":
+ fetch = ExchangeFetch(coursedir=self.coursedir, parent=self)
+ try:
+ fetch.start()
+ except ExchangeError:
+ self.fail("nbgrader fetch failed")
+ else:
+ failed = False
+
+ for arg in self.extra_args:
+ self.coursedir.assignment_id = arg
+ fetch = ExchangeFetch(coursedir=self.coursedir, parent=self)
+ try:
+ fetch.start()
+ except ExchangeError:
+ failed = True
+
+ if failed:
+ self.fail("nbgrader fetch failed")
diff --git a/nbgrader/docs/source/user_guide/creating_and_grading_assignments.ipynb b/nbgrader/docs/source/user_guide/creating_and_grading_assignments.ipynb
index c41f430d..c3983f29 100644
--- a/nbgrader/docs/source/user_guide/creating_and_grading_assignments.ipynb
+++ b/nbgrader/docs/source/user_guide/creating_and_grading_assignments.ipynb
@@ -397,7 +397,7 @@
"cell_type": "raw",
"metadata": {},
"source": [
- "However, this doesn't actually mean that it is truly read-only when opened in the notebook. Instead, what it means is that during the ``nbgrader assign`` step (see :ref:`assign-and-release-an-assignment`), the source of these cells will be recorded into the database. Then, during the ``nbgrader autograde`` step (see :ref:`autograde-assignments`), nbgrader will check whether the source of the student's version of the cell has changed. If it has, it will replace the cell's source with the version in the database, thus effectively overwriting any changes the student made.\n",
+ "However, this doesn't actually mean that it is truly read-only when opened in the notebook. Instead, what is means is that during the ``nbgrader assign`` step (see :ref:`assign-and-release-an-assignment`), the source of these cells will be recorded into the database. Then, during the ``nbgrader autograde`` step (see :ref:`autograde-assignments`), nbgrader will check whether the source of the student's version of the cell has changed. If it has, it will replace the cell's source with the version in the database, thus effectively overwriting any changes the student made.\n",
"\n",
".. versionadded:: 0.4.0\n",
" Read-only cells (and test cells) are now truly read-only! However, at the moment this functionality will only work on the master version of the notebook (5.0.0.dev)."
diff --git a/nbgrader/docs/source/user_guide/faq.rst b/nbgrader/docs/source/user_guide/faq.rst
index 719a5900..a51d0335 100644
--- a/nbgrader/docs/source/user_guide/faq.rst
+++ b/nbgrader/docs/source/user_guide/faq.rst
@@ -167,15 +167,3 @@ newer version, which you run through the autograder and which attains full
credit. Since the manual grade always takes precedence over the autograde, the
student would still receive the low score unless you updated your grade: hence
the motivation for marking it as needing to be manually graded (again).
-
-Do students have to install anything on their own computers to use nbgrader?
-----------------------------------------------------------------------------
-No, nbgrader only needs to be installed for the instructor. However, students
-may optionally install the Validate extension to verify that their submission
-passes all the test cases.
-
-Can tests be only temporarily hidden, so that students can reveal them?
------------------------------------------------------------------------
-No, the tests are either present in the student version of the notebook or they
-are not. However, there exist extensions such as
-https://github.com/kirbs-/hide_code which can assist in hiding code cells.
diff --git a/nbgrader/docs/source/user_guide/installation.rst b/nbgrader/docs/source/user_guide/installation.rst
index 401ca3e3..08ceab3e 100644
--- a/nbgrader/docs/source/user_guide/installation.rst
+++ b/nbgrader/docs/source/user_guide/installation.rst
@@ -11,7 +11,6 @@ system and command line tools::
Or, if you use `Anaconda <https://www.continuum.io/downloads>`__::
- conda install jupyter
conda install -c conda-forge nbgrader
nbgrader extensions
diff --git a/nbgrader/docs/source/user_guide/philosophy.rst b/nbgrader/docs/source/user_guide/philosophy.rst
index e495fa01..88bf65c4 100644
--- a/nbgrader/docs/source/user_guide/philosophy.rst
+++ b/nbgrader/docs/source/user_guide/philosophy.rst
@@ -66,42 +66,6 @@ the autograded version to:
where ``student_id`` and ``notebook_id`` were parsed from the input file
path.
-Here is how a sample directory structure for the course named
-``course101`` might look, where the users ``bitdiddle`` and ``hacker``
-have submitted solutions to the assignment ``ps1``:
-
-::
-
- course101/
- ├── gradebook.db
- ├── nbgrader_config.py
- ├── source
- │ ├── header.ipynb
- │ └── ps1
- │ ├── jupyter.png
- │ ├── problem1.ipynb
- │ └── problem2.ipynb
- ├── release
- │ └── ps1
- │ ├── jupyter.png
- │ ├── problem1.ipynb
- │ └── problem2.ipynb
- ├── submitted
- │ ├── bitdiddle
- │ │ └── ps1
- │ │ ├── jupyter.png
- │ │ ├── problem1.html
- │ │ ├── problem2.html
- │ │ └── timestamp.txt
- │ └── hacker
- │ └── ps1
- │ ├── jupyter.png
- │ ├── problem1.html
- │ ├── problem2.html
- │ └── timestamp.txt
- ├── autograded/
- └── feedback/
-
Database of assignments
~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/nbgrader/server_extensions/formgrader/static/css/nbgrader.css b/nbgrader/server_extensions/formgrader/static/css/nbgrader.css
index 497053c4..61cc0d77 100644
--- a/nbgrader/server_extensions/formgrader/static/css/nbgrader.css
+++ b/nbgrader/server_extensions/formgrader/static/css/nbgrader.css
@@ -44,8 +44,3 @@ div.modal div.panel pre {
div.modal p {
margin-bottom: 20px;
}
-
-.jupyter-logo {
- height: 100px;
- width: 100px;
-}
diff --git a/nbgrader/server_extensions/formgrader/templates/base.tpl b/nbgrader/server_extensions/formgrader/templates/base.tpl
index b2c6168f..93866c2e 100644
--- a/nbgrader/server_extensions/formgrader/templates/base.tpl
+++ b/nbgrader/server_extensions/formgrader/templates/base.tpl
@@ -31,7 +31,7 @@
<h1>nbgrader</h1>
</div>
</div>
- <div class="col-md-8">
+ <div class="col-md-10">
<div class="page-header">
<h1>
{%- block title -%}
@@ -39,37 +39,6 @@
</h1>
</div>
</div>
- <div class="col-md-2">
- <div class="pull-right jupyter-logo">
- <svg viewBox="0 0 440 440" xmlns="http://www.w3.org/2000/svg" style="width:100%;height:100%">
- <g fill="#414042">
- <path d="M60.9 232c0 12.7-1 16.9-3.6 20-2.3 2.5-6.1 3.7-10.5 4.1l1 7.6c5.2 0 12.2-1.8 16.5-6 4.7-4.8 6.4-11.5 6.4-21.9v-48.2H61V232zM133.1 226.2c0 5.5.1 10.3.4 14.5h-8.6l-.5-8.7h-.2c-2.5 4.3-8.1 9.9-17.6 9.9-8.3 0-18.3-4.6-18.3-23.3v-31.1H98V217c0 10.1 3.1 16.9 11.9 16.9 6.5 0 11-4.5 12.7-8.8.5-1.4.9-3.2.9-4.9v-32.6h9.7v38.6zM151.3 204.9c0-6.8-.2-12.3-.4-17.3h8.7l.4 9.1h.2c4-6.5 10.2-10.3 18.9-10.3 12.8 0 22.5 10.9 22.5 27 0 19.1-11.6 28.5-24.2 28.5-7 0-13.2-3.1-16.4-8.3h-.2v28.9h-9.6v-57.6zm9.5 14.2c0 1.4.2 2.7.4 4 1.8 6.7 7.6 11.3 14.5 11.3 10.2 0 16.1-8.3 16.1-20.5 0-10.7-5.6-19.8-15.8-19.8-6.6 0-12.7 4.7-14.6 12-.3 1.2-.7 2.6-.7 4v9zM218.7 187.6l11.6 31.4c1.2 3.5 2.5 7.7 3.4 10.9h.2c1-3.2 2.1-7.2 3.4-11.1l10.5-31.2H258l-14.5 37.9c-6.9 18.2-11.6 27.6-18.2 33.3-4.7 4.2-9.4 5.8-11.9 6.3L211 257c2.4-.8 5.6-2.3 8.5-4.7 2.6-2.1 5.9-5.8 8.1-10.8.4-1 .8-1.8.8-2.3 0-.5-.2-1.3-.7-2.5l-19.7-49h10.7zM283.5 172.3v15.3h13.8v7.4h-13.8v28.7c0 6.6 1.9 10.3 7.2 10.3 2.5 0 4.4-.3 5.6-.7l.4 7.2c-1.9.8-4.8 1.3-8.6 1.3-4.5 0-8.1-1.4-10.4-4.1-2.7-2.9-3.7-7.6-3.7-13.8v-29h-8.2v-7.4h8.2v-12.7l9.5-2.5zM315.2 215.9c.2 13.1 8.6 18.4 18.2 18.4 6.9 0 11.1-1.2 14.7-2.7l1.6 6.9c-3.4 1.5-9.2 3.3-17.7 3.3-16.4 0-26.1-10.8-26.1-26.8s9.4-28.7 24.9-28.7c17.3 0 22 15.3 22 25 0 2-.2 3.5-.3 4.5h-37.3zm28.3-6.9c.1-6.1-2.5-15.7-13.4-15.7-9.8 0-14.1 9-14.8 15.7h28.2zM367 204.1c0-6.3-.1-11.6-.4-16.6h8.5l.3 10.4h.4c2.4-7.1 8.2-11.6 14.7-11.6 1.1 0 1.9.1 2.7.3v9.1c-1-.2-2-.3-3.3-.3-6.8 0-11.6 5.2-13 12.4-.2 1.3-.4 2.9-.4 4.5v28.3H367v-36.5z"/>
- </g>
- <circle cx="329.8" cy="40.6" fill="#6D6E71" r="21.4"/>
- <linearGradient gradientUnits="userSpaceOnUse" id="a" x1="67.752" x2="372.271" y1="321.544" y2="321.544">
- <stop offset=".052" stop-color="#F78D26"/>
- <stop offset=".206" stop-color="#F68826"/>
- <stop offset=".432" stop-color="#F37A25"/>
- <stop offset=".477" stop-color="#F37625"/>
- <stop offset=".616" stop-color="#E76623"/>
- <stop offset=".836" stop-color="#DC5221"/>
- <stop offset=".987" stop-color="#D84B21"/>
- </linearGradient>
- <path d="M220 326.4c-65.5 0-122.6-23.5-152.3-58.3C90.2 330.4 149.9 375 220 375s129.8-44.6 152.3-106.9c-29.7 34.8-86.8 58.3-152.3 58.3z" fill="url(#a)"/>
- <linearGradient gradientUnits="userSpaceOnUse" id="b" x1="67.752" x2="372.271" y1="104.869" y2="104.869">
- <stop offset=".052" stop-color="#F78D26"/>
- <stop offset=".206" stop-color="#F68826"/>
- <stop offset=".432" stop-color="#F37A25"/>
- <stop offset=".477" stop-color="#F37625"/>
- <stop offset=".616" stop-color="#E76623"/>
- <stop offset=".836" stop-color="#DC5221"/><stop offset=".987" stop-color="#D84B21"/>
- </linearGradient>
- <path d="M220 100c65.5 0 122.6 23.5 152.3 58.3C349.8 96 290.1 51.4 220 51.4S90.2 96 67.8 158.3C97.5 123.6 154.5 100 220 100z" fill="url(#b)"/>
- <circle cx="110.5" cy="394.4" fill="#939598" r="25.8"/>
- <circle cx="85.1" cy="70" fill="#58595B" r="15.7"/>
- </svg>
- </div>
- </div>
</div>
<div class="row">
<div class="col-md-2">
| Add the ability to fetch multiple assignment with one command
As suggested by @mikebolt in #433, adding the ability to fetch multiple assignments with one command would be a nice future enhancement.
> It would also be nice to be able to fetch multiple assignments with one command:
>
> nbgrader fetch --course data301a exampleA exampleB
> This isn't supported right now. Perhaps specifying --assignment exampleC would just add to the set of >assignments to fetch, and you could also use commas with the assignment flag like this:
>
> nbgrader fetch --assignment=exampleA,exampleB,exampleC --course data301a
| jupyter/nbgrader | diff --git a/nbgrader/tests/apps/test_nbgrader_fetch.py b/nbgrader/tests/apps/test_nbgrader_fetch.py
index 044ebf25..c3e2b39e 100644
--- a/nbgrader/tests/apps/test_nbgrader_fetch.py
+++ b/nbgrader/tests/apps/test_nbgrader_fetch.py
@@ -10,7 +10,7 @@ from .conftest import notwindows
class TestNbGraderFetch(BaseTestApp):
def _release(self, assignment, exchange, course_dir, course="abc101"):
- self._copy_file(join("files", "test.ipynb"), join(course_dir, "release", "ps1", "p1.ipynb"))
+ self._copy_file(join("files", "test.ipynb"), join(course_dir, "release", assignment, "p1.ipynb"))
run_nbgrader([
"release", assignment,
"--course", course,
@@ -29,6 +29,19 @@ class TestNbGraderFetch(BaseTestApp):
run_nbgrader(cmd, retcode=retcode)
+ def _fetch_multi(self, assignments, exchange, flags=None, retcode=0, course="abc101"):
+ cmd = [
+ "fetch",
+ "--course", course,
+ "--Exchange.root={}".format(exchange)
+ ]
+ cmd.extend(assignments)
+
+ if flags is not None:
+ cmd.extend(flags)
+
+ run_nbgrader(cmd, retcode=retcode)
+
def test_help(self):
"""Does the help display without error?"""
run_nbgrader(["fetch", "--help-all"])
@@ -80,3 +93,11 @@ class TestNbGraderFetch(BaseTestApp):
self._release("ps1", exchange, course_dir, course="abc102")
self._fetch("ps1", exchange, course="abc102", flags=["--Exchange.path_includes_course=True"])
assert os.path.isfile(join("abc102", "ps1", "p1.ipynb"))
+
+ def test_fetch_multiple_assignments(self, exchange, course_dir):
+ self._release("ps1", exchange, course_dir, course="abc101")
+
+ self._release("ps2", exchange, course_dir, course="abc101")
+ self._fetch_multi(["ps1", "ps2"], exchange, course="abc101", flags=["--Exchange.path_includes_course=True"])
+ assert os.path.isfile(join("abc101", "ps1", "p1.ipynb"))
+ assert os.path.isfile(join("abc101", "ps2", "p1.ipynb"))
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_issue_reference",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 7
} | 0.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pyenchant",
"sphinxcontrib-spelling",
"sphinx_rtd_theme",
"nbval",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.5",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
alembic==1.7.7
anyio==3.6.2
argon2-cffi==21.3.0
argon2-cffi-bindings==21.2.0
async-generator==1.10
attrs==22.2.0
Babel==2.11.0
backcall==0.2.0
bleach==4.1.0
certifi==2021.5.30
cffi==1.15.1
charset-normalizer==2.0.12
comm==0.1.4
contextvars==2.4
coverage==6.2
dataclasses==0.8
decorator==5.1.1
defusedxml==0.7.1
docutils==0.18.1
entrypoints==0.4
greenlet==2.0.2
idna==3.10
imagesize==1.4.1
immutables==0.19
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
ipykernel==5.5.6
ipython==7.16.3
ipython-genutils==0.2.0
ipywidgets==7.8.5
jedi==0.17.2
Jinja2==3.0.3
json5==0.9.16
jsonschema==3.2.0
jupyter==1.1.1
jupyter-client==7.1.2
jupyter-console==6.4.3
jupyter-core==4.9.2
jupyter-server==1.13.1
jupyterlab==3.2.9
jupyterlab-pygments==0.1.2
jupyterlab-server==2.10.3
jupyterlab_widgets==1.1.11
Mako==1.1.6
MarkupSafe==2.0.1
mistune==0.8.4
nbclassic==0.3.5
nbclient==0.5.9
nbconvert==6.0.7
nbformat==5.1.3
-e git+https://github.com/jupyter/nbgrader.git@7736f44b18dd8841bf8f5bca56720b58d59baf43#egg=nbgrader
nbval==0.10.0
nest-asyncio==1.6.0
notebook==6.4.10
packaging==21.3
pandocfilters==1.5.1
parso==0.7.1
pexpect==4.9.0
pickleshare==0.7.5
pluggy==1.0.0
prometheus-client==0.17.1
prompt-toolkit==3.0.36
ptyprocess==0.7.0
py==1.11.0
pycparser==2.21
pyenchant==3.2.2
Pygments==2.14.0
pyparsing==3.1.4
pyrsistent==0.18.0
pytest==7.0.1
python-dateutil==2.9.0.post0
pytz==2025.2
pyzmq==25.1.2
requests==2.27.1
Send2Trash==1.8.3
six==1.17.0
sniffio==1.2.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-rtd-theme==2.0.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
sphinxcontrib-spelling==7.7.0
SQLAlchemy==1.4.54
terminado==0.12.1
testpath==0.6.0
tomli==1.2.3
tornado==6.1
traitlets==4.3.3
typing_extensions==4.1.1
urllib3==1.26.20
wcwidth==0.2.13
webencodings==0.5.1
websocket-client==1.3.1
widgetsnbextension==3.6.10
zipp==3.6.0
| name: nbgrader
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- alembic==1.7.7
- anyio==3.6.2
- argon2-cffi==21.3.0
- argon2-cffi-bindings==21.2.0
- async-generator==1.10
- attrs==22.2.0
- babel==2.11.0
- backcall==0.2.0
- bleach==4.1.0
- cffi==1.15.1
- charset-normalizer==2.0.12
- comm==0.1.4
- contextvars==2.4
- coverage==6.2
- dataclasses==0.8
- decorator==5.1.1
- defusedxml==0.7.1
- docutils==0.18.1
- entrypoints==0.4
- greenlet==2.0.2
- idna==3.10
- imagesize==1.4.1
- immutables==0.19
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- ipykernel==5.5.6
- ipython==7.16.3
- ipython-genutils==0.2.0
- ipywidgets==7.8.5
- jedi==0.17.2
- jinja2==3.0.3
- json5==0.9.16
- jsonschema==3.2.0
- jupyter==1.1.1
- jupyter-client==7.1.2
- jupyter-console==6.4.3
- jupyter-core==4.9.2
- jupyter-server==1.13.1
- jupyterlab==3.2.9
- jupyterlab-pygments==0.1.2
- jupyterlab-server==2.10.3
- jupyterlab-widgets==1.1.11
- mako==1.1.6
- markupsafe==2.0.1
- mistune==0.8.4
- nbclassic==0.3.5
- nbclient==0.5.9
- nbconvert==6.0.7
- nbformat==5.1.3
- nbval==0.10.0
- nest-asyncio==1.6.0
- notebook==6.4.10
- packaging==21.3
- pandocfilters==1.5.1
- parso==0.7.1
- pexpect==4.9.0
- pickleshare==0.7.5
- pluggy==1.0.0
- prometheus-client==0.17.1
- prompt-toolkit==3.0.36
- ptyprocess==0.7.0
- py==1.11.0
- pycparser==2.21
- pyenchant==3.2.2
- pygments==2.14.0
- pyparsing==3.1.4
- pyrsistent==0.18.0
- pytest==7.0.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyzmq==25.1.2
- requests==2.27.1
- send2trash==1.8.3
- six==1.17.0
- sniffio==1.2.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-rtd-theme==2.0.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- sphinxcontrib-spelling==7.7.0
- sqlalchemy==1.4.54
- terminado==0.12.1
- testpath==0.6.0
- tomli==1.2.3
- tornado==6.1
- traitlets==4.3.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- wcwidth==0.2.13
- webencodings==0.5.1
- websocket-client==1.3.1
- widgetsnbextension==3.6.10
- zipp==3.6.0
prefix: /opt/conda/envs/nbgrader
| [
"nbgrader/tests/apps/test_nbgrader_fetch.py::TestNbGraderFetch::test_fetch_multiple_assignments"
]
| []
| [
"nbgrader/tests/apps/test_nbgrader_fetch.py::TestNbGraderFetch::test_help",
"nbgrader/tests/apps/test_nbgrader_fetch.py::TestNbGraderFetch::test_no_course_id",
"nbgrader/tests/apps/test_nbgrader_fetch.py::TestNbGraderFetch::test_fetch",
"nbgrader/tests/apps/test_nbgrader_fetch.py::TestNbGraderFetch::test_fetch_with_assignment_flag",
"nbgrader/tests/apps/test_nbgrader_fetch.py::TestNbGraderFetch::test_fetch_multiple_courses"
]
| []
| BSD 3-Clause "New" or "Revised" License | 1,469 | [
"nbgrader/server_extensions/formgrader/templates/base.tpl",
"nbgrader/server_extensions/formgrader/static/css/nbgrader.css",
"nbgrader/docs/source/user_guide/philosophy.rst",
"nbgrader/docs/source/user_guide/installation.rst",
"nbgrader/apps/fetchapp.py",
"nbgrader/docs/source/user_guide/creating_and_grading_assignments.ipynb",
"nbgrader/docs/source/user_guide/faq.rst"
]
| [
"nbgrader/server_extensions/formgrader/templates/base.tpl",
"nbgrader/server_extensions/formgrader/static/css/nbgrader.css",
"nbgrader/docs/source/user_guide/philosophy.rst",
"nbgrader/docs/source/user_guide/installation.rst",
"nbgrader/apps/fetchapp.py",
"nbgrader/docs/source/user_guide/creating_and_grading_assignments.ipynb",
"nbgrader/docs/source/user_guide/faq.rst"
]
|
|
workhorsy__py-cpuinfo-78 | 894ff3fdbf67953b089e93597b0c361e50968170 | 2017-07-16 00:51:05 | 894ff3fdbf67953b089e93597b0c361e50968170 | diff --git a/cpuinfo/cpuinfo.py b/cpuinfo/cpuinfo.py
index d682a64..f386536 100644
--- a/cpuinfo/cpuinfo.py
+++ b/cpuinfo/cpuinfo.py
@@ -430,7 +430,6 @@ def _parse_dmesg_output(output):
fields = [n.strip().split('=') for n in fields]
fields = [{n[0].strip().lower() : n[1].strip()} for n in fields]
#print('fields: ', fields)
-
for field in fields:
name = list(field.keys())[0]
value = list(field.values())[0]
@@ -443,7 +442,6 @@ def _parse_dmesg_output(output):
model = int(value.lstrip('0x'), 16)
elif name in ['fam', 'family']:
family = int(value.lstrip('0x'), 16)
-
#print('FIELDS: ', (vendor_id, stepping, model, family))
# Features
@@ -1076,7 +1074,6 @@ def actual_get_cpu_info_from_cpuid():
# Get the Hz and scale
scale, hz_advertised = _get_hz_string_from_brand(processor_brand)
-
info = {
'vendor_id' : cpuid.get_vendor_id(),
'hardware' : '',
@@ -1171,7 +1168,7 @@ def _get_cpu_info_from_proc_cpuinfo():
'hardware' : hardware,
'brand' : processor_brand,
- 'l2_cache_size' : cache_size,
+ 'l3_cache_size' : cache_size,
'flags' : flags,
'vendor_id' : vendor_id,
'stepping' : stepping,
@@ -1288,6 +1285,22 @@ def _get_cpu_info_from_lscpu():
if model and model.isdigit():
info['model'] = int(model)
+ l1_data_cache_size = _get_field(False, output, None, None, 'L1d cache')
+ if l1_data_cache_size:
+ info['l1_data_cache_size'] = l1_data_cache_size
+
+ l1_instruction_cache_size = _get_field(False, output, None, None, 'L1i cache')
+ if l1_instruction_cache_size:
+ info['l1_instruction_cache_size'] = l1_instruction_cache_size
+
+ l2_cache_size = _get_field(False, output, None, None, 'L2 cache')
+ if l2_cache_size:
+ info['l2_cache_size'] = l2_cache_size
+
+ l3_cache_size = _get_field(False, output, None, None, 'L3 cache')
+ if l3_cache_size:
+ info['l3_cache_size'] = l3_cache_size
+
# Flags
flags = _get_field(False, output, None, None, 'flags', 'Features')
if flags:
@@ -1732,7 +1745,8 @@ def CopyNewFields(info, new_info):
'hz_advertised_raw', 'hz_actual_raw', 'arch', 'bits', 'count',
'raw_arch_string', 'l2_cache_size', 'l2_cache_line_size',
'l2_cache_associativity', 'stepping', 'model', 'family',
- 'processor_type', 'extended_model', 'extended_family', 'flags'
+ 'processor_type', 'extended_model', 'extended_family', 'flags',
+ 'l3_cache_size', 'l1_data_cache_size', 'l1_instruction_cache_size'
]
for key in keys:
@@ -1820,10 +1834,12 @@ def main():
print('Raw Arch String: {0}'.format(info.get('raw_arch_string', '')))
+ print('L1 Data Cache Size: {0}'.format(info.get('l1_data_cache_size', '')))
+ print('L1 Instruction Cache Size: {0}'.format(info.get('l1_instruction_cache_size', '')))
print('L2 Cache Size: {0}'.format(info.get('l2_cache_size', '')))
print('L2 Cache Line Size: {0}'.format(info.get('l2_cache_line_size', '')))
print('L2 Cache Associativity: {0}'.format(info.get('l2_cache_associativity', '')))
-
+ print('L3 Cache Size: {0}'.format(info.get('l3_cache_size', '')))
print('Stepping: {0}'.format(info.get('stepping', '')))
print('Model: {0}'.format(info.get('model', '')))
print('Family: {0}'.format(info.get('family', '')))
| Get L1, L2, and L3 cache info from lscpu
| workhorsy/py-cpuinfo | diff --git a/tests/test_linux_aarch64_64.py b/tests/test_linux_aarch64_64.py
index 44b6c66..c0fc4aa 100644
--- a/tests/test_linux_aarch64_64.py
+++ b/tests/test_linux_aarch64_64.py
@@ -117,7 +117,7 @@ class TestLinux_Aarch_64(unittest.TestCase):
def test_returns(self):
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_registry()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_cpufreq_info()))
- self.assertEqual(0, len(cpuinfo._get_cpu_info_from_lscpu()))
+ self.assertEqual(3, len(cpuinfo._get_cpu_info_from_lscpu()))
self.assertEqual(1, len(cpuinfo._get_cpu_info_from_proc_cpuinfo()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_sysctl()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_kstat()))
@@ -126,12 +126,17 @@ class TestLinux_Aarch_64(unittest.TestCase):
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_ibm_pa_features()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_sysinfo()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_cpuid()))
- self.assertEqual(6, len(cpuinfo.get_cpu_info()))
+ self.assertEqual(9, len(cpuinfo.get_cpu_info()))
def test_get_cpu_info_from_lscpu(self):
info = cpuinfo._get_cpu_info_from_lscpu()
- self.assertEqual(0, len(info))
+ self.assertEqual('78K', info['l1_instruction_cache_size'])
+ self.assertEqual('32K', info['l1_data_cache_size'])
+
+ self.assertEqual('16384K', info['l2_cache_size'])
+
+ self.assertEqual(3, len(info))
def test_get_cpu_info_from_proc_cpuinfo(self):
info = cpuinfo._get_cpu_info_from_proc_cpuinfo()
@@ -160,10 +165,15 @@ class TestLinux_Aarch_64(unittest.TestCase):
self.assertEqual('aarch64', info['raw_arch_string'])
- self.assertEqual('', info['l2_cache_size'])
+ self.assertEqual('78K', info['l1_instruction_cache_size'])
+ self.assertEqual('32K', info['l1_data_cache_size'])
+
+ self.assertEqual('16384K', info['l2_cache_size'])
self.assertEqual(0, info['l2_cache_line_size'])
self.assertEqual(0, info['l2_cache_associativity'])
+ self.assertEqual('', info['l3_cache_size'])
+
self.assertEqual(0, info['stepping'])
self.assertEqual(0, info['model'])
self.assertEqual(0, info['family'])
diff --git a/tests/test_linux_debian_8_5_x86_64.py b/tests/test_linux_debian_8_5_x86_64.py
index 38c4624..87fe4be 100644
--- a/tests/test_linux_debian_8_5_x86_64.py
+++ b/tests/test_linux_debian_8_5_x86_64.py
@@ -452,7 +452,7 @@ class TestLinuxDebian_8_5_X86_64(unittest.TestCase):
def test_returns(self):
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_registry()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_cpufreq_info()))
- self.assertEqual(9, len(cpuinfo._get_cpu_info_from_lscpu()))
+ self.assertEqual(13, len(cpuinfo._get_cpu_info_from_lscpu()))
self.assertEqual(11, len(cpuinfo._get_cpu_info_from_proc_cpuinfo()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_sysctl()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_kstat()))
@@ -461,7 +461,7 @@ class TestLinuxDebian_8_5_X86_64(unittest.TestCase):
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_ibm_pa_features()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_sysinfo()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_cpuid()))
- self.assertEqual(16, len(cpuinfo.get_cpu_info()))
+ self.assertEqual(19, len(cpuinfo.get_cpu_info()))
def test_get_cpu_info_from_lscpu(self):
info = cpuinfo._get_cpu_info_from_lscpu()
@@ -477,6 +477,12 @@ class TestLinuxDebian_8_5_X86_64(unittest.TestCase):
self.assertEqual(42, info['model'])
self.assertEqual(6, info['family'])
+ self.assertEqual('32K', info['l1_instruction_cache_size'])
+ self.assertEqual('32K', info['l1_data_cache_size'])
+
+ self.assertEqual('256K', info['l2_cache_size'])
+ self.assertEqual('3072K', info['l3_cache_size'])
+
def test_get_cpu_info_from_dmesg(self):
info = cpuinfo._get_cpu_info_from_dmesg()
@@ -500,7 +506,7 @@ class TestLinuxDebian_8_5_X86_64(unittest.TestCase):
self.assertEqual((2800000000, 0), info['hz_advertised_raw'])
self.assertEqual((2793652000, 0), info['hz_actual_raw'])
- self.assertEqual('3072 KB', info['l2_cache_size'])
+ self.assertEqual('3072 KB', info['l3_cache_size'])
self.assertEqual(7, info['stepping'])
self.assertEqual(42, info['model'])
@@ -531,7 +537,11 @@ class TestLinuxDebian_8_5_X86_64(unittest.TestCase):
self.assertEqual('x86_64', info['raw_arch_string'])
- self.assertEqual('3072 KB', info['l2_cache_size'])
+ self.assertEqual('32K', info['l1_instruction_cache_size'])
+ self.assertEqual('32K', info['l1_data_cache_size'])
+
+ self.assertEqual('256K', info['l2_cache_size'])
+ self.assertEqual('3072 KB', info['l3_cache_size'])
self.assertEqual(7, info['stepping'])
self.assertEqual(42, info['model'])
diff --git a/tests/test_linux_debian_8_7_1_ppc64le.py b/tests/test_linux_debian_8_7_1_ppc64le.py
index 9e65d9d..a6abf3f 100644
--- a/tests/test_linux_debian_8_7_1_ppc64le.py
+++ b/tests/test_linux_debian_8_7_1_ppc64le.py
@@ -422,7 +422,7 @@ class TestLinuxDebian_8_7_1_ppc64le(unittest.TestCase):
def test_returns(self):
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_registry()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_cpufreq_info()))
- self.assertEqual(0, len(cpuinfo._get_cpu_info_from_lscpu()))
+ self.assertEqual(2, len(cpuinfo._get_cpu_info_from_lscpu()))
self.assertEqual(5, len(cpuinfo._get_cpu_info_from_proc_cpuinfo()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_sysctl()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_kstat()))
@@ -431,11 +431,13 @@ class TestLinuxDebian_8_7_1_ppc64le(unittest.TestCase):
self.assertEqual(1, len(cpuinfo._get_cpu_info_from_ibm_pa_features()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_sysinfo()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_cpuid()))
- self.assertEqual(11, len(cpuinfo.get_cpu_info()))
+ self.assertEqual(13, len(cpuinfo.get_cpu_info()))
def test_get_cpu_info_from_lscpu(self):
info = cpuinfo._get_cpu_info_from_lscpu()
- self.assertEqual(0, len(info))
+ self.assertEqual('32K', info['l1_instruction_cache_size'])
+ self.assertEqual('32K', info['l1_data_cache_size'])
+ self.assertEqual(2, len(info))
def test_get_cpu_info_from_ibm_pa_features(self):
info = cpuinfo._get_cpu_info_from_ibm_pa_features()
@@ -464,6 +466,8 @@ class TestLinuxDebian_8_7_1_ppc64le(unittest.TestCase):
self.assertEqual('PPC_64', info['arch'])
self.assertEqual(64, info['bits'])
self.assertEqual(2, info['count'])
+ self.assertEqual('32K', info['l1_instruction_cache_size'])
+ self.assertEqual('32K', info['l1_data_cache_size'])
self.assertEqual('ppc64le', info['raw_arch_string'])
self.assertEqual(
['dabr', 'dabrx', 'dsisr', 'fpu', 'lp', 'mmu', 'pp', 'rislb', 'run', 'slb', 'sprg3'],
diff --git a/tests/test_linux_debian_8_x86_64.py b/tests/test_linux_debian_8_x86_64.py
index 3307594..8e6f1c4 100644
--- a/tests/test_linux_debian_8_x86_64.py
+++ b/tests/test_linux_debian_8_x86_64.py
@@ -87,7 +87,7 @@ class TestLinuxDebian_8_X86_64(unittest.TestCase):
self.assertEqual((2930000000, 0), info['hz_advertised_raw'])
self.assertEqual((2928283000, 0), info['hz_actual_raw'])
- self.assertEqual('6144 KB', info['l2_cache_size'])
+ self.assertEqual('6144 KB', info['l3_cache_size'])
self.assertEqual(5, info['stepping'])
self.assertEqual(30, info['model'])
@@ -116,7 +116,7 @@ class TestLinuxDebian_8_X86_64(unittest.TestCase):
self.assertEqual('x86_64', info['raw_arch_string'])
- self.assertEqual('6144 KB', info['l2_cache_size'])
+ self.assertEqual('6144 KB', info['l3_cache_size'])
self.assertEqual(5, info['stepping'])
self.assertEqual(30, info['model'])
diff --git a/tests/test_linux_fedora_24_ppc64le.py b/tests/test_linux_fedora_24_ppc64le.py
index 8a0fc5a..b228387 100644
--- a/tests/test_linux_fedora_24_ppc64le.py
+++ b/tests/test_linux_fedora_24_ppc64le.py
@@ -355,7 +355,7 @@ class TestLinuxFedora_24_ppc64le(unittest.TestCase):
def test_returns(self):
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_registry()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_cpufreq_info()))
- self.assertEqual(1, len(cpuinfo._get_cpu_info_from_lscpu()))
+ self.assertEqual(3, len(cpuinfo._get_cpu_info_from_lscpu()))
self.assertEqual(5, len(cpuinfo._get_cpu_info_from_proc_cpuinfo()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_sysctl()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_kstat()))
@@ -364,11 +364,14 @@ class TestLinuxFedora_24_ppc64le(unittest.TestCase):
self.assertEqual(1, len(cpuinfo._get_cpu_info_from_ibm_pa_features()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_sysinfo()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_cpuid()))
- self.assertEqual(11, len(cpuinfo.get_cpu_info()))
+ self.assertEqual(13, len(cpuinfo.get_cpu_info()))
def test_get_cpu_info_from_lscpu(self):
info = cpuinfo._get_cpu_info_from_lscpu()
+ self.assertEqual('32K', info['l1_instruction_cache_size'])
+ self.assertEqual('64K', info['l1_data_cache_size'])
+
self.assertEqual('POWER8E (raw), altivec supported', info['brand'])
def test_get_cpu_info_from_ibm_pa_features(self):
@@ -398,6 +401,8 @@ class TestLinuxFedora_24_ppc64le(unittest.TestCase):
self.assertEqual('PPC_64', info['arch'])
self.assertEqual(64, info['bits'])
self.assertEqual(2, info['count'])
+ self.assertEqual('32K', info['l1_instruction_cache_size'])
+ self.assertEqual('64K', info['l1_data_cache_size'])
self.assertEqual('ppc64le', info['raw_arch_string'])
self.assertEqual(
['dss_2.02', 'dss_2.05', 'dss_2.06', 'fpu', 'lsd_in_dscr', 'ppr', 'slb', 'sso_2.06', 'ugr_in_dscr'],
diff --git a/tests/test_linux_fedora_24_x86_64.py b/tests/test_linux_fedora_24_x86_64.py
index 6c5add7..b745472 100644
--- a/tests/test_linux_fedora_24_x86_64.py
+++ b/tests/test_linux_fedora_24_x86_64.py
@@ -452,7 +452,7 @@ class TestLinuxFedora_24_X86_64(unittest.TestCase):
def test_returns(self):
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_registry()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_cpufreq_info()))
- self.assertEqual(9, len(cpuinfo._get_cpu_info_from_lscpu()))
+ self.assertEqual(13, len(cpuinfo._get_cpu_info_from_lscpu()))
self.assertEqual(11, len(cpuinfo._get_cpu_info_from_proc_cpuinfo()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_sysctl()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_kstat()))
@@ -461,7 +461,7 @@ class TestLinuxFedora_24_X86_64(unittest.TestCase):
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_ibm_pa_features()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_sysinfo()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_cpuid()))
- self.assertEqual(16, len(cpuinfo.get_cpu_info()))
+ self.assertEqual(19, len(cpuinfo.get_cpu_info()))
def test_get_cpu_info_from_lscpu(self):
info = cpuinfo._get_cpu_info_from_lscpu()
@@ -477,6 +477,12 @@ class TestLinuxFedora_24_X86_64(unittest.TestCase):
self.assertEqual(42, info['model'])
self.assertEqual(6, info['family'])
+ self.assertEqual('32K', info['l1_instruction_cache_size'])
+ self.assertEqual('32K', info['l1_data_cache_size'])
+
+ self.assertEqual('256K', info['l2_cache_size'])
+ self.assertEqual('3072K', info['l3_cache_size'])
+
def test_get_cpu_info_from_dmesg(self):
info = cpuinfo._get_cpu_info_from_dmesg()
@@ -490,6 +496,7 @@ class TestLinuxFedora_24_X86_64(unittest.TestCase):
self.assertEqual(42, info['model'])
self.assertEqual(6, info['family'])
+
def test_get_cpu_info_from_proc_cpuinfo(self):
info = cpuinfo._get_cpu_info_from_proc_cpuinfo()
@@ -500,7 +507,7 @@ class TestLinuxFedora_24_X86_64(unittest.TestCase):
self.assertEqual((2800000000, 0), info['hz_advertised_raw'])
self.assertEqual((2793652000, 0), info['hz_actual_raw'])
- self.assertEqual('3072 KB', info['l2_cache_size'])
+ self.assertEqual('3072 KB', info['l3_cache_size'])
self.assertEqual(7, info['stepping'])
self.assertEqual(42, info['model'])
@@ -531,7 +538,11 @@ class TestLinuxFedora_24_X86_64(unittest.TestCase):
self.assertEqual('x86_64', info['raw_arch_string'])
- self.assertEqual('3072 KB', info['l2_cache_size'])
+ self.assertEqual('32K', info['l1_instruction_cache_size'])
+ self.assertEqual('32K', info['l1_data_cache_size'])
+
+ self.assertEqual('256K', info['l2_cache_size'])
+ self.assertEqual('3072 KB', info['l3_cache_size'])
self.assertEqual(7, info['stepping'])
self.assertEqual(42, info['model'])
diff --git a/tests/test_linux_gentoo_2_2_x86_64.py b/tests/test_linux_gentoo_2_2_x86_64.py
index ac18db4..6704573 100644
--- a/tests/test_linux_gentoo_2_2_x86_64.py
+++ b/tests/test_linux_gentoo_2_2_x86_64.py
@@ -455,7 +455,7 @@ class TestLinuxGentoo_2_2_X86_64(unittest.TestCase):
def test_returns(self):
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_registry()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_cpufreq_info()))
- self.assertEqual(10, len(cpuinfo._get_cpu_info_from_lscpu()))
+ self.assertEqual(14, len(cpuinfo._get_cpu_info_from_lscpu()))
self.assertEqual(11, len(cpuinfo._get_cpu_info_from_proc_cpuinfo()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_sysctl()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_kstat()))
@@ -464,7 +464,7 @@ class TestLinuxGentoo_2_2_X86_64(unittest.TestCase):
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_ibm_pa_features()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_sysinfo()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_cpuid()))
- self.assertEqual(16, len(cpuinfo.get_cpu_info()))
+ self.assertEqual(19, len(cpuinfo.get_cpu_info()))
def test_get_cpu_info_from_lscpu(self):
info = cpuinfo._get_cpu_info_from_lscpu()
@@ -479,6 +479,12 @@ class TestLinuxGentoo_2_2_X86_64(unittest.TestCase):
self.assertEqual(7, info['stepping'])
self.assertEqual(42, info['model'])
self.assertEqual(6, info['family'])
+
+ self.assertEqual('32K', info['l1_instruction_cache_size'])
+ self.assertEqual('32K', info['l1_data_cache_size'])
+ self.assertEqual('256K', info['l2_cache_size'])
+ self.assertEqual('3072K', info['l3_cache_size'])
+
self.assertEqual(
['apic', 'clflush', 'cmov', 'constant_tsc', 'cx16', 'cx8', 'de',
'fpu', 'fxsr', 'ht', 'hypervisor', 'lahf_lm', 'lm', 'mca', 'mce',
@@ -513,7 +519,7 @@ class TestLinuxGentoo_2_2_X86_64(unittest.TestCase):
self.assertEqual((2800000000, 0), info['hz_advertised_raw'])
self.assertEqual((2793652000, 0), info['hz_actual_raw'])
- self.assertEqual('3072 KB', info['l2_cache_size'])
+ self.assertEqual('3072 KB', info['l3_cache_size'])
self.assertEqual(7, info['stepping'])
self.assertEqual(42, info['model'])
@@ -544,7 +550,10 @@ class TestLinuxGentoo_2_2_X86_64(unittest.TestCase):
self.assertEqual('x86_64', info['raw_arch_string'])
- self.assertEqual('3072 KB', info['l2_cache_size'])
+ self.assertEqual('32K', info['l1_instruction_cache_size'])
+ self.assertEqual('32K', info['l1_data_cache_size'])
+ self.assertEqual('256K', info['l2_cache_size'])
+ self.assertEqual('3072 KB', info['l3_cache_size'])
self.assertEqual(7, info['stepping'])
self.assertEqual(42, info['model'])
diff --git a/tests/test_linux_rhel_7_3_ppc64le.py b/tests/test_linux_rhel_7_3_ppc64le.py
index 05b4821..b215fa7 100644
--- a/tests/test_linux_rhel_7_3_ppc64le.py
+++ b/tests/test_linux_rhel_7_3_ppc64le.py
@@ -333,7 +333,7 @@ class TestLinuxRHEL_7_3_ppc64le(unittest.TestCase):
def test_returns(self):
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_registry()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_cpufreq_info()))
- self.assertEqual(1, len(cpuinfo._get_cpu_info_from_lscpu()))
+ self.assertEqual(3, len(cpuinfo._get_cpu_info_from_lscpu()))
self.assertEqual(5, len(cpuinfo._get_cpu_info_from_proc_cpuinfo()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_sysctl()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_kstat()))
@@ -342,11 +342,12 @@ class TestLinuxRHEL_7_3_ppc64le(unittest.TestCase):
self.assertEqual(1, len(cpuinfo._get_cpu_info_from_ibm_pa_features()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_sysinfo()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_cpuid()))
- self.assertEqual(11, len(cpuinfo.get_cpu_info()))
+ self.assertEqual(13, len(cpuinfo.get_cpu_info()))
def test_get_cpu_info_from_lscpu(self):
info = cpuinfo._get_cpu_info_from_lscpu()
-
+ self.assertEqual('32K', info['l1_instruction_cache_size'])
+ self.assertEqual('64K', info['l1_data_cache_size'])
self.assertEqual('POWER8E (raw), altivec supported', info['brand'])
def test_get_cpu_info_from_ibm_pa_features(self):
@@ -374,6 +375,8 @@ class TestLinuxRHEL_7_3_ppc64le(unittest.TestCase):
self.assertEqual((3425000000, 0), info['hz_advertised_raw'])
self.assertEqual((3425000000, 0), info['hz_actual_raw'])
self.assertEqual('PPC_64', info['arch'])
+ self.assertEqual('32K', info['l1_instruction_cache_size'])
+ self.assertEqual('64K', info['l1_data_cache_size'])
self.assertEqual(64, info['bits'])
self.assertEqual(16, info['count'])
self.assertEqual('ppc64le', info['raw_arch_string'])
diff --git a/tests/test_linux_ubuntu_16_04_x86_64.py b/tests/test_linux_ubuntu_16_04_x86_64.py
index 3b4f146..1f66bf9 100644
--- a/tests/test_linux_ubuntu_16_04_x86_64.py
+++ b/tests/test_linux_ubuntu_16_04_x86_64.py
@@ -456,7 +456,7 @@ class TestLinuxUbuntu_16_04_X86_64(unittest.TestCase):
def test_returns(self):
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_registry()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_cpufreq_info()))
- self.assertEqual(10, len(cpuinfo._get_cpu_info_from_lscpu()))
+ self.assertEqual(14, len(cpuinfo._get_cpu_info_from_lscpu()))
self.assertEqual(11, len(cpuinfo._get_cpu_info_from_proc_cpuinfo()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_sysctl()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_kstat()))
@@ -465,7 +465,7 @@ class TestLinuxUbuntu_16_04_X86_64(unittest.TestCase):
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_ibm_pa_features()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_sysinfo()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_cpuid()))
- self.assertEqual(16, len(cpuinfo.get_cpu_info()))
+ self.assertEqual(19, len(cpuinfo.get_cpu_info()))
def test_get_cpu_info_from_lscpu(self):
info = cpuinfo._get_cpu_info_from_lscpu()
@@ -480,6 +480,11 @@ class TestLinuxUbuntu_16_04_X86_64(unittest.TestCase):
self.assertEqual(7, info['stepping'])
self.assertEqual(42, info['model'])
self.assertEqual(6, info['family'])
+
+ self.assertEqual('32K', info['l1_instruction_cache_size'])
+ self.assertEqual('32K', info['l1_data_cache_size'])
+ self.assertEqual('256K', info['l2_cache_size'])
+ self.assertEqual('3072K', info['l3_cache_size'])
self.assertEqual(
['acpi', 'aperfmperf', 'apic', 'arat', 'arch_perfmon', 'bts',
'clflush', 'cmov', 'constant_tsc', 'cx16', 'cx8', 'de', 'ds_cpl',
@@ -519,7 +524,7 @@ class TestLinuxUbuntu_16_04_X86_64(unittest.TestCase):
self.assertEqual((2800000000, 0), info['hz_advertised_raw'])
self.assertEqual((1901375000, 0), info['hz_actual_raw'])
- self.assertEqual('3072 KB', info['l2_cache_size'])
+ self.assertEqual('3072 KB', info['l3_cache_size'])
self.assertEqual(7, info['stepping'])
self.assertEqual(42, info['model'])
@@ -555,7 +560,12 @@ class TestLinuxUbuntu_16_04_X86_64(unittest.TestCase):
self.assertEqual('x86_64', info['raw_arch_string'])
- self.assertEqual('3072 KB', info['l2_cache_size'])
+ self.assertEqual('32K', info['l1_instruction_cache_size'])
+ self.assertEqual('32K', info['l1_data_cache_size'])
+
+ self.assertEqual('256K', info['l2_cache_size'])
+
+ self.assertEqual('3072 KB', info['l3_cache_size'])
self.assertEqual(7, info['stepping'])
self.assertEqual(42, info['model'])
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 3,
"test_score": 2
},
"num_modified_files": 1
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
-e git+https://github.com/workhorsy/py-cpuinfo.git@894ff3fdbf67953b089e93597b0c361e50968170#egg=py_cpuinfo
pytest @ file:///croot/pytest_1738938843180/work
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
| name: py-cpuinfo
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
prefix: /opt/conda/envs/py-cpuinfo
| [
"tests/test_linux_aarch64_64.py::TestLinux_Aarch_64::test_get_cpu_info_from_lscpu",
"tests/test_linux_aarch64_64.py::TestLinux_Aarch_64::test_returns",
"tests/test_linux_debian_8_5_x86_64.py::TestLinuxDebian_8_5_X86_64::test_all",
"tests/test_linux_debian_8_5_x86_64.py::TestLinuxDebian_8_5_X86_64::test_get_cpu_info_from_lscpu",
"tests/test_linux_debian_8_5_x86_64.py::TestLinuxDebian_8_5_X86_64::test_get_cpu_info_from_proc_cpuinfo",
"tests/test_linux_debian_8_5_x86_64.py::TestLinuxDebian_8_5_X86_64::test_returns",
"tests/test_linux_debian_8_7_1_ppc64le.py::TestLinuxDebian_8_7_1_ppc64le::test_all",
"tests/test_linux_debian_8_7_1_ppc64le.py::TestLinuxDebian_8_7_1_ppc64le::test_get_cpu_info_from_lscpu",
"tests/test_linux_debian_8_7_1_ppc64le.py::TestLinuxDebian_8_7_1_ppc64le::test_returns",
"tests/test_linux_debian_8_x86_64.py::TestLinuxDebian_8_X86_64::test_all",
"tests/test_linux_debian_8_x86_64.py::TestLinuxDebian_8_X86_64::test_get_cpu_info_from_proc_cpuinfo",
"tests/test_linux_fedora_24_ppc64le.py::TestLinuxFedora_24_ppc64le::test_all",
"tests/test_linux_fedora_24_ppc64le.py::TestLinuxFedora_24_ppc64le::test_get_cpu_info_from_lscpu",
"tests/test_linux_fedora_24_ppc64le.py::TestLinuxFedora_24_ppc64le::test_returns",
"tests/test_linux_fedora_24_x86_64.py::TestLinuxFedora_24_X86_64::test_all",
"tests/test_linux_fedora_24_x86_64.py::TestLinuxFedora_24_X86_64::test_get_cpu_info_from_lscpu",
"tests/test_linux_fedora_24_x86_64.py::TestLinuxFedora_24_X86_64::test_get_cpu_info_from_proc_cpuinfo",
"tests/test_linux_fedora_24_x86_64.py::TestLinuxFedora_24_X86_64::test_returns",
"tests/test_linux_gentoo_2_2_x86_64.py::TestLinuxGentoo_2_2_X86_64::test_all",
"tests/test_linux_gentoo_2_2_x86_64.py::TestLinuxGentoo_2_2_X86_64::test_get_cpu_info_from_lscpu",
"tests/test_linux_gentoo_2_2_x86_64.py::TestLinuxGentoo_2_2_X86_64::test_get_cpu_info_from_proc_cpuinfo",
"tests/test_linux_gentoo_2_2_x86_64.py::TestLinuxGentoo_2_2_X86_64::test_returns",
"tests/test_linux_rhel_7_3_ppc64le.py::TestLinuxRHEL_7_3_ppc64le::test_all",
"tests/test_linux_rhel_7_3_ppc64le.py::TestLinuxRHEL_7_3_ppc64le::test_get_cpu_info_from_lscpu",
"tests/test_linux_rhel_7_3_ppc64le.py::TestLinuxRHEL_7_3_ppc64le::test_returns",
"tests/test_linux_ubuntu_16_04_x86_64.py::TestLinuxUbuntu_16_04_X86_64::test_all",
"tests/test_linux_ubuntu_16_04_x86_64.py::TestLinuxUbuntu_16_04_X86_64::test_get_cpu_info_from_lscpu",
"tests/test_linux_ubuntu_16_04_x86_64.py::TestLinuxUbuntu_16_04_X86_64::test_get_cpu_info_from_proc_cpuinfo",
"tests/test_linux_ubuntu_16_04_x86_64.py::TestLinuxUbuntu_16_04_X86_64::test_returns"
]
| []
| [
"tests/test_linux_aarch64_64.py::TestLinux_Aarch_64::test_get_cpu_info_from_proc_cpuinfo",
"tests/test_linux_debian_8_5_x86_64.py::TestLinuxDebian_8_5_X86_64::test_get_cpu_info_from_dmesg",
"tests/test_linux_debian_8_7_1_ppc64le.py::TestLinuxDebian_8_7_1_ppc64le::test_get_cpu_info_from_ibm_pa_features",
"tests/test_linux_debian_8_7_1_ppc64le.py::TestLinuxDebian_8_7_1_ppc64le::test_get_cpu_info_from_proc_cpuinfo",
"tests/test_linux_debian_8_x86_64.py::TestLinuxDebian_8_X86_64::test_returns",
"tests/test_linux_fedora_24_ppc64le.py::TestLinuxFedora_24_ppc64le::test_get_cpu_info_from_ibm_pa_features",
"tests/test_linux_fedora_24_ppc64le.py::TestLinuxFedora_24_ppc64le::test_get_cpu_info_from_proc_cpuinfo",
"tests/test_linux_fedora_24_x86_64.py::TestLinuxFedora_24_X86_64::test_get_cpu_info_from_dmesg",
"tests/test_linux_gentoo_2_2_x86_64.py::TestLinuxGentoo_2_2_X86_64::test_get_cpu_info_from_dmesg",
"tests/test_linux_rhel_7_3_ppc64le.py::TestLinuxRHEL_7_3_ppc64le::test_get_cpu_info_from_ibm_pa_features",
"tests/test_linux_rhel_7_3_ppc64le.py::TestLinuxRHEL_7_3_ppc64le::test_get_cpu_info_from_proc_cpuinfo",
"tests/test_linux_ubuntu_16_04_x86_64.py::TestLinuxUbuntu_16_04_X86_64::test_get_cpu_info_from_dmesg"
]
| []
| MIT License | 1,470 | [
"cpuinfo/cpuinfo.py"
]
| [
"cpuinfo/cpuinfo.py"
]
|
|
tox-dev__tox-557 | e374ce61bf101fb2cc2eddd955f57048df153017 | 2017-07-16 10:25:53 | e374ce61bf101fb2cc2eddd955f57048df153017 | diff --git a/CONTRIBUTORS b/CONTRIBUTORS
index aa65cccd..84ccf207 100644
--- a/CONTRIBUTORS
+++ b/CONTRIBUTORS
@@ -48,3 +48,4 @@ Selim Belhaouane
Nick Douma
Cyril Roelandt
Bartolome Sanchez Salado
+Laszlo Vasko
diff --git a/doc/config.txt b/doc/config.txt
index 2a7a1465..8880123c 100644
--- a/doc/config.txt
+++ b/doc/config.txt
@@ -435,6 +435,14 @@ then the value will be retrieved as ``os.environ['KEY']``
and replace with and empty string if the environment variable does not
exist.
+Substitutions can also be nested. In that case they are expanded starting
+from the innermost expression::
+
+ {env:KEY:{env:DEFAULT_OF_KEY}}
+
+the above example is roughly equivalent to
+``os.environ.get('KEY', os.environ['DEFAULT_OF_KEY'])``
+
.. _`command positional substitution`:
.. _`positional substitution`:
diff --git a/tox/config.py b/tox/config.py
index 33bf14f2..d797951c 100755
--- a/tox/config.py
+++ b/tox/config.py
@@ -1059,8 +1059,20 @@ class Replacer:
self.reader = reader
self.crossonly = crossonly
- def do_replace(self, x):
- return self.RE_ITEM_REF.sub(self._replace_match, x)
+ def do_replace(self, value):
+ '''
+ Recursively expand substitutions starting from the innermost expression
+ '''
+ def substitute_once(x):
+ return self.RE_ITEM_REF.sub(self._replace_match, x)
+
+ expanded = substitute_once(value)
+
+ while expanded != value: # substitution found
+ value = expanded
+ expanded = substitute_once(value)
+
+ return expanded
def _replace_match(self, match):
g = match.groupdict()
| {env:} macro is not expanded in setenv if the default value contains {envdir}
- Bitbucket: https://bitbucket.org/hpk42/tox/issue/301
- Originally reported by: @booxter
- Originally created at: 2016-01-05T19:28:00.290
The following tox target will not expand {env:} macro with the default value (the substitution for {envdir}):
```
[testenv:dsvm-functional]
setenv = OS_ROOTWRAP_CMD={env:OS_ROOTWRAP_CMD:{envdir}}
commands =
env
```
```
$ tox -e dsvm-functional
...
OS_ROOTWRAP_CMD={env:OS_ROOTWRAP_CMD:/home/vagrant/git/neutron-vpnaas/.tox/dsvm-functional}
...
```
Once I replace {envdir} with a hardcoded value, it expands {env:} correctly using the default value.
```
[testenv:dsvm-functional]
setenv = OS_ROOTWRAP_CMD={env:OS_ROOTWRAP_CMD:XXX}
commands =
env
```
```
$ tox -e dsvm-functional
...
OS_ROOTWRAP_CMD=XXX
...
``` | tox-dev/tox | diff --git a/tests/test_config.py b/tests/test_config.py
index 1f57dcec..9254e992 100644
--- a/tests/test_config.py
+++ b/tests/test_config.py
@@ -1088,8 +1088,7 @@ class TestConfigTestEnv:
assert 'FOO' in env
assert 'BAR' in env
- @pytest.mark.xfail(raises=AssertionError, reason="issue #301")
- def test_substitution_env_defaults_issue301(tmpdir, newconfig, monkeypatch):
+ def test_substitution_nested_env_defaults_issue301(tmpdir, newconfig, monkeypatch):
monkeypatch.setenv("IGNORE_STATIC_DEFAULT", "env")
monkeypatch.setenv("IGNORE_DYNAMIC_DEFAULT", "env")
config = newconfig("""
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 3
} | 2.7 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-timeout",
"pytest-flakes",
"pytest-pep8"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
distlib==0.3.9
execnet==1.9.0
filelock==3.4.1
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pep8==1.7.1
platformdirs==2.4.0
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyflakes==3.0.1
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-cache==1.0
pytest-flakes==4.0.5
pytest-pep8==1.0.6
pytest-timeout==2.1.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
-e git+https://github.com/tox-dev/tox.git@e374ce61bf101fb2cc2eddd955f57048df153017#egg=tox
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
virtualenv==20.17.1
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: tox
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- distlib==0.3.9
- execnet==1.9.0
- filelock==3.4.1
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- pep8==1.7.1
- platformdirs==2.4.0
- pyflakes==3.0.1
- pytest-cache==1.0
- pytest-flakes==4.0.5
- pytest-pep8==1.0.6
- pytest-timeout==2.1.0
- virtualenv==20.17.1
prefix: /opt/conda/envs/tox
| [
"tests/test_config.py::TestConfigTestEnv::test_substitution_nested_env_defaults_issue301"
]
| [
"tests/test_config.py::TestVenvConfig::test_force_dep_with_url",
"tests/test_config.py::TestIniParser::test_getbool"
]
| [
"tests/test_config.py::TestVenvConfig::test_config_parsing_minimal",
"tests/test_config.py::TestVenvConfig::test_config_parsing_multienv",
"tests/test_config.py::TestVenvConfig::test_envdir_set_manually",
"tests/test_config.py::TestVenvConfig::test_envdir_set_manually_with_substitutions",
"tests/test_config.py::TestVenvConfig::test_force_dep_version",
"tests/test_config.py::TestVenvConfig::test_is_same_dep",
"tests/test_config.py::TestConfigPlatform::test_config_parse_platform",
"tests/test_config.py::TestConfigPlatform::test_config_parse_platform_rex",
"tests/test_config.py::TestConfigPlatform::test_config_parse_platform_with_factors[win]",
"tests/test_config.py::TestConfigPlatform::test_config_parse_platform_with_factors[lin]",
"tests/test_config.py::TestConfigPackage::test_defaults",
"tests/test_config.py::TestConfigPackage::test_defaults_distshare",
"tests/test_config.py::TestConfigPackage::test_defaults_changed_dir",
"tests/test_config.py::TestConfigPackage::test_project_paths",
"tests/test_config.py::TestParseconfig::test_search_parents",
"tests/test_config.py::TestParseconfig::test_explicit_config_path",
"tests/test_config.py::test_get_homedir",
"tests/test_config.py::TestGetcontextname::test_blank",
"tests/test_config.py::TestGetcontextname::test_jenkins",
"tests/test_config.py::TestGetcontextname::test_hudson_legacy",
"tests/test_config.py::TestIniParserAgainstCommandsKey::test_command_substitution_from_other_section",
"tests/test_config.py::TestIniParserAgainstCommandsKey::test_command_substitution_from_other_section_multiline",
"tests/test_config.py::TestIniParserAgainstCommandsKey::test_command_substitution_from_other_section_posargs",
"tests/test_config.py::TestIniParserAgainstCommandsKey::test_command_section_and_posargs_substitution",
"tests/test_config.py::TestIniParserAgainstCommandsKey::test_command_env_substitution",
"tests/test_config.py::TestIniParserAgainstCommandsKey::test_command_env_substitution_global",
"tests/test_config.py::TestIniParser::test_getstring_single",
"tests/test_config.py::TestIniParser::test_missing_substitution",
"tests/test_config.py::TestIniParser::test_getstring_fallback_sections",
"tests/test_config.py::TestIniParser::test_getstring_substitution",
"tests/test_config.py::TestIniParser::test_getlist",
"tests/test_config.py::TestIniParser::test_getdict",
"tests/test_config.py::TestIniParser::test_getstring_environment_substitution",
"tests/test_config.py::TestIniParser::test_getstring_environment_substitution_with_default",
"tests/test_config.py::TestIniParser::test_value_matches_section_substituion",
"tests/test_config.py::TestIniParser::test_value_doesn_match_section_substitution",
"tests/test_config.py::TestIniParser::test_getstring_other_section_substitution",
"tests/test_config.py::TestIniParser::test_argvlist",
"tests/test_config.py::TestIniParser::test_argvlist_windows_escaping",
"tests/test_config.py::TestIniParser::test_argvlist_multiline",
"tests/test_config.py::TestIniParser::test_argvlist_quoting_in_command",
"tests/test_config.py::TestIniParser::test_argvlist_comment_after_command",
"tests/test_config.py::TestIniParser::test_argvlist_command_contains_hash",
"tests/test_config.py::TestIniParser::test_argvlist_positional_substitution",
"tests/test_config.py::TestIniParser::test_argvlist_quoted_posargs",
"tests/test_config.py::TestIniParser::test_argvlist_posargs_with_quotes",
"tests/test_config.py::TestIniParser::test_positional_arguments_are_only_replaced_when_standing_alone",
"tests/test_config.py::TestIniParser::test_posargs_are_added_escaped_issue310",
"tests/test_config.py::TestIniParser::test_substitution_with_multiple_words",
"tests/test_config.py::TestIniParser::test_getargv",
"tests/test_config.py::TestIniParser::test_getpath",
"tests/test_config.py::TestIniParserPrefix::test_basic_section_access",
"tests/test_config.py::TestIniParserPrefix::test_fallback_sections",
"tests/test_config.py::TestIniParserPrefix::test_value_matches_prefixed_section_substituion",
"tests/test_config.py::TestIniParserPrefix::test_value_doesn_match_prefixed_section_substitution",
"tests/test_config.py::TestIniParserPrefix::test_other_section_substitution",
"tests/test_config.py::TestConfigTestEnv::test_commentchars_issue33",
"tests/test_config.py::TestConfigTestEnv::test_defaults",
"tests/test_config.py::TestConfigTestEnv::test_sitepackages_switch",
"tests/test_config.py::TestConfigTestEnv::test_installpkg_tops_develop",
"tests/test_config.py::TestConfigTestEnv::test_specific_command_overrides",
"tests/test_config.py::TestConfigTestEnv::test_whitelist_externals",
"tests/test_config.py::TestConfigTestEnv::test_changedir",
"tests/test_config.py::TestConfigTestEnv::test_ignore_errors",
"tests/test_config.py::TestConfigTestEnv::test_envbindir",
"tests/test_config.py::TestConfigTestEnv::test_envbindir_jython[jython]",
"tests/test_config.py::TestConfigTestEnv::test_envbindir_jython[pypy]",
"tests/test_config.py::TestConfigTestEnv::test_envbindir_jython[pypy3]",
"tests/test_config.py::TestConfigTestEnv::test_passenv_as_multiline_list[win32]",
"tests/test_config.py::TestConfigTestEnv::test_passenv_as_multiline_list[linux2]",
"tests/test_config.py::TestConfigTestEnv::test_passenv_as_space_separated_list[win32]",
"tests/test_config.py::TestConfigTestEnv::test_passenv_as_space_separated_list[linux2]",
"tests/test_config.py::TestConfigTestEnv::test_passenv_with_factor",
"tests/test_config.py::TestConfigTestEnv::test_passenv_from_global_env",
"tests/test_config.py::TestConfigTestEnv::test_passenv_glob_from_global_env",
"tests/test_config.py::TestConfigTestEnv::test_changedir_override",
"tests/test_config.py::TestConfigTestEnv::test_install_command_setting",
"tests/test_config.py::TestConfigTestEnv::test_install_command_must_contain_packages",
"tests/test_config.py::TestConfigTestEnv::test_install_command_substitutions",
"tests/test_config.py::TestConfigTestEnv::test_pip_pre",
"tests/test_config.py::TestConfigTestEnv::test_pip_pre_cmdline_override",
"tests/test_config.py::TestConfigTestEnv::test_simple",
"tests/test_config.py::TestConfigTestEnv::test_substitution_error",
"tests/test_config.py::TestConfigTestEnv::test_substitution_defaults",
"tests/test_config.py::TestConfigTestEnv::test_substitution_notfound_issue246",
"tests/test_config.py::TestConfigTestEnv::test_substitution_positional",
"tests/test_config.py::TestConfigTestEnv::test_substitution_noargs_issue240",
"tests/test_config.py::TestConfigTestEnv::test_substitution_double",
"tests/test_config.py::TestConfigTestEnv::test_posargs_backslashed_or_quoted",
"tests/test_config.py::TestConfigTestEnv::test_rewrite_posargs",
"tests/test_config.py::TestConfigTestEnv::test_rewrite_simple_posargs",
"tests/test_config.py::TestConfigTestEnv::test_take_dependencies_from_other_testenv[envlist0-deps0]",
"tests/test_config.py::TestConfigTestEnv::test_take_dependencies_from_other_testenv[envlist1-deps1]",
"tests/test_config.py::TestConfigTestEnv::test_take_dependencies_from_other_section",
"tests/test_config.py::TestConfigTestEnv::test_multilevel_substitution",
"tests/test_config.py::TestConfigTestEnv::test_recursive_substitution_cycle_fails",
"tests/test_config.py::TestConfigTestEnv::test_single_value_from_other_secton",
"tests/test_config.py::TestConfigTestEnv::test_factors",
"tests/test_config.py::TestConfigTestEnv::test_factor_ops",
"tests/test_config.py::TestConfigTestEnv::test_default_factors",
"tests/test_config.py::TestConfigTestEnv::test_factors_in_boolean",
"tests/test_config.py::TestConfigTestEnv::test_factors_in_setenv",
"tests/test_config.py::TestConfigTestEnv::test_factor_use_not_checked",
"tests/test_config.py::TestConfigTestEnv::test_factors_groups_touch",
"tests/test_config.py::TestConfigTestEnv::test_period_in_factor",
"tests/test_config.py::TestConfigTestEnv::test_ignore_outcome",
"tests/test_config.py::TestGlobalOptions::test_notest",
"tests/test_config.py::TestGlobalOptions::test_verbosity",
"tests/test_config.py::TestGlobalOptions::test_substitution_jenkins_default",
"tests/test_config.py::TestGlobalOptions::test_substitution_jenkins_context",
"tests/test_config.py::TestGlobalOptions::test_sdist_specification",
"tests/test_config.py::TestGlobalOptions::test_env_selection",
"tests/test_config.py::TestGlobalOptions::test_py_venv",
"tests/test_config.py::TestGlobalOptions::test_default_environments",
"tests/test_config.py::TestGlobalOptions::test_envlist_expansion",
"tests/test_config.py::TestGlobalOptions::test_envlist_cross_product",
"tests/test_config.py::TestGlobalOptions::test_envlist_multiline",
"tests/test_config.py::TestGlobalOptions::test_minversion",
"tests/test_config.py::TestGlobalOptions::test_skip_missing_interpreters_true",
"tests/test_config.py::TestGlobalOptions::test_skip_missing_interpreters_false",
"tests/test_config.py::TestGlobalOptions::test_defaultenv_commandline",
"tests/test_config.py::TestGlobalOptions::test_defaultenv_partial_override",
"tests/test_config.py::TestHashseedOption::test_default",
"tests/test_config.py::TestHashseedOption::test_passing_integer",
"tests/test_config.py::TestHashseedOption::test_passing_string",
"tests/test_config.py::TestHashseedOption::test_passing_empty_string",
"tests/test_config.py::TestHashseedOption::test_setenv",
"tests/test_config.py::TestHashseedOption::test_noset",
"tests/test_config.py::TestHashseedOption::test_noset_with_setenv",
"tests/test_config.py::TestHashseedOption::test_one_random_hashseed",
"tests/test_config.py::TestHashseedOption::test_setenv_in_one_testenv",
"tests/test_config.py::TestSetenv::test_getdict_lazy",
"tests/test_config.py::TestSetenv::test_getdict_lazy_update",
"tests/test_config.py::TestSetenv::test_setenv_uses_os_environ",
"tests/test_config.py::TestSetenv::test_setenv_default_os_environ",
"tests/test_config.py::TestSetenv::test_setenv_uses_other_setenv",
"tests/test_config.py::TestSetenv::test_setenv_recursive_direct",
"tests/test_config.py::TestSetenv::test_setenv_overrides",
"tests/test_config.py::TestSetenv::test_setenv_with_envdir_and_basepython",
"tests/test_config.py::TestSetenv::test_setenv_ordering_1",
"tests/test_config.py::TestSetenv::test_setenv_cross_section_subst_issue294",
"tests/test_config.py::TestSetenv::test_setenv_cross_section_subst_twice",
"tests/test_config.py::TestSetenv::test_setenv_cross_section_mixed",
"tests/test_config.py::TestIndexServer::test_indexserver",
"tests/test_config.py::TestIndexServer::test_parse_indexserver",
"tests/test_config.py::TestIndexServer::test_multiple_homedir_relative_local_indexservers",
"tests/test_config.py::TestConfigConstSubstitutions::test_replace_pathsep_unix[:]",
"tests/test_config.py::TestConfigConstSubstitutions::test_replace_pathsep_unix[;]",
"tests/test_config.py::TestConfigConstSubstitutions::test_pathsep_regex",
"tests/test_config.py::TestParseEnv::test_parse_recreate",
"tests/test_config.py::TestCmdInvocation::test_help",
"tests/test_config.py::TestCmdInvocation::test_version",
"tests/test_config.py::TestCmdInvocation::test_listenvs",
"tests/test_config.py::TestCmdInvocation::test_listenvs_verbose_description",
"tests/test_config.py::TestCmdInvocation::test_listenvs_all",
"tests/test_config.py::TestCmdInvocation::test_listenvs_all_verbose_description",
"tests/test_config.py::TestCmdInvocation::test_listenvs_all_verbose_description_no_additional_environments",
"tests/test_config.py::TestCmdInvocation::test_config_specific_ini",
"tests/test_config.py::TestCmdInvocation::test_no_tox_ini",
"tests/test_config.py::TestCmdInvocation::test_override_workdir",
"tests/test_config.py::TestCmdInvocation::test_showconfig_with_force_dep_version",
"tests/test_config.py::test_env_spec[-e",
"tests/test_config.py::TestCommandParser::test_command_parser_for_word",
"tests/test_config.py::TestCommandParser::test_command_parser_for_posargs",
"tests/test_config.py::TestCommandParser::test_command_parser_for_multiple_words",
"tests/test_config.py::TestCommandParser::test_command_parser_for_substitution_with_spaces",
"tests/test_config.py::TestCommandParser::test_command_parser_with_complex_word_set",
"tests/test_config.py::TestCommandParser::test_command_with_runs_of_whitespace",
"tests/test_config.py::TestCommandParser::test_command_with_split_line_in_subst_arguments",
"tests/test_config.py::TestCommandParser::test_command_parsing_for_issue_10"
]
| []
| MIT License | 1,471 | [
"doc/config.txt",
"CONTRIBUTORS",
"tox/config.py"
]
| [
"doc/config.txt",
"CONTRIBUTORS",
"tox/config.py"
]
|
|
hylang__hy-1325 | c3c7af2db3e6314fc9503ead68b677daf6a4f089 | 2017-07-16 17:44:51 | 5c720c0110908e3f47dba2e4cc1c820d16f359a1 | Kodiologist: Oh great, Travis is showing that it segfaults on 3.3 and 3.4 for some reason. Probably because PEP 448 only made it into 3.5. I'll gate those tests appropriately and see what happens.
Kodiologist: Nope. I guess this won't be so easy. There were probably changes to the desired AST.
But please do take a look at this before I start hacking away at that.
gilch: Having written three of those four issues, I approve of the additional syntax. However, I didn't say anything about removing `apply`. But now that I think about it, if it's made redundant then it should be removed. It would certainly be easy to re-implement in terms of `#*` and `#**`, but when would we even *need* to?
To answer my own question, perhaps in macros and higher-order functions? Would we ever need the old `apply` in `->` or `doto`? Is there a non-reader syntax for these special forms, like `quote` is for `'`? Sometimes it's easier if you don't need the reader syntax when writing macros, at least in the case of `quote`. Our current `apply` doesn't work as a HOF anyway, but maybe it should be shadowed. I might need to think about this some more.
One other issue I take with the proposed implementation is how you lex the tokens. A tag macro can be any symbol, yet the tests demonstrate `(setv [a #*b c] "ghijklmno")` would unpack `b`. But what happens if you have a tag macro with symbol `*b`? Would it apply to `c` or just be unusable?
I think whitespace should be required between a `#*` and a symbol. But it should be fine to put it directly before a list with no space `#*[...`, likewise, a `#**` would work on a dict display `#**{...`, but not before a symbol.
Kodiologist: > Is there a non-reader syntax for these special forms, like `quote` is for `'`?
Yes, `#* foo` is just shorthand for `(unpack-iterable foo)`, and `#** foo` for `(unpack-mapping foo)`.
> A tag macro can be any symbol
Not if you want to be able to call it. Here are two examples:
=> (deftag "{b" [x] "hello")
<function <lambda> at 0x7f579c0d2d08>
=> (setv b "foo")
=> (print #{b 1 2})
{1, 2, 'foo'}
=> (deftag "[b" [x] "hello")
<function <lambda> at 0x7f913d8e0048>
=> (print #[b 1 2])
[…] NameError: name '#' is not defined
So the effect of this change is that a tag macro's name can't begin with an asterisk, the same way it can't begin with `{` or `[`. Because such a macro would look like unpacking, it's a bad idea anyway.
Kodiologist: > But it should be fine to put it directly before a list with no space `#*[...`, likewise, a `#**` would work on a dict display `#**{...`, but not before a symbol.
Beyond testing Hy itself, why would you construct a literal list or dictionary only to unpack it?
gilch: > Beyond testing Hy itself, why would you construct a literal list or dictionary only to unpack it?
I'm saying it should be valid syntactically, not that I had a use case in mind. But think about how this could interact with the threading macros `->` and `->>`. Could you thread in multiple arguments at once? Macros can do interesting things.
In the case of a dictionary, even in Python, you can use dictionary unpacking to put the kwarg name in a variable.
```
>>> def bar(*, a=None, b=None):
print(a,b)
>>> foo = 'a'
>>> bar(**{foo: 1})
1 None
>>> foo = 'b'
>>> bar(**{foo: 1})
None 1
```
Kodiologist: >
In the case of a dictionary, even in Python, you can use dictionary unpacking to put the kwarg name in a variable.
Oh, that's a good point.
gilch: > Not if you want to be able to call it.
I said any symbol, not any string. I meant the kind of symbol you can type in directly, without going through a `HySymbol` call to convert a string. Those can't contain things like `[` or `{`. See also #1117.
We do have a `*map` symbol in core. We also have the `*earmuffs*` convention. It's not that weird for a symbol to start with an asterisk in Lisp.
Kodiologist: > > Not if you want to be able to call it.
>
> I said any symbol, not any string. I meant the kind of symbol you can type in directly, without going through a `HySymbol` call to convert a string.
Okay, here's another counterexample:
=> (deftag "!a" [x] "hello")
<function <lambda> at 0x7f83b0b8ef28>
=> #!a 3
[Returns None]
=> (repr #!a 3)
File "<stdin>", line 1, column 7
(repr #!a 3)
^
LexException: Ran into a HASHBANG where it wasn't expected.
gilch: > HASHBANG
Yikes! I'd call that one that a bug in the lexer. I think we were only using that for the shebang on the first line? We should be able to use `#!` as a tag macro elsewhere.
Kodiologist: So, to be clear, you want `#*b` to be parsed as calling a tag macro named `b` instead of the same way as `#* b`?
gilch: It seems much more consistent to me to implement `#*` and `#**` as tag macros that happen to expand into the new special forms `unpack-iterable`, and `unpack-mapping`, respectively, without messing with the current lexer at all. (I mean, besides fixing that HASHBANG thing in a separate issue.)
gilch: `#*` and `#**` could be implemented like this:
```Hy
(deftag * [form] `(unpack-iterable ~form))
(deftag ** [form] `(unpack-mapping ~form))
```
Then you don't need the special case in the compiler, and don't have to change the lexer. You still need the new special forms.
> So, to be clear, you want `#*b` to be parsed as calling a tag macro named `b` instead of the same way as `#* b`?
No, `#*b` should be a tag macro named `*b`. So `#*b c` calls the `*b` tag macro with argument `c`. But `#* b c` calls the `*` tag macro with argument `b`.
Kodiologist: All right, that makes sense. | diff --git a/NEWS b/NEWS
index fe7ffb98..3bae0cae 100644
--- a/NEWS
+++ b/NEWS
@@ -2,6 +2,8 @@ Changes from 0.13.0
[ Language Changes ]
* `yield-from` is no longer supported under Python 2
+ * `apply` has been replaced with Python-style unpacking operators `#*` and
+ `#**` (e.g., `(f #* args #** kwargs)`)
* Single-character "sharp macros" changed to "tag macros", which can have
longer names
* Periods are no longer allowed in keywords
diff --git a/docs/language/api.rst b/docs/language/api.rst
index 5241b3ab..3904e863 100644
--- a/docs/language/api.rst
+++ b/docs/language/api.rst
@@ -154,41 +154,6 @@ it appends it as the last argument. The following code demonstrates this:
5 10
-apply
------
-
-``apply`` is used to apply an optional list of arguments and an
-optional dictionary of kwargs to a function. The symbol mangling
-transformations will be applied to all keys in the dictionary of
-kwargs, provided the dictionary and its keys are defined in-place.
-
-Usage: ``(apply fn-name [args] [kwargs])``
-
-Examples:
-
-.. code-block:: clj
-
- (defn thunk []
- "hy there")
-
- (apply thunk)
- ;=> "hy there"
-
- (defn total-purchase [price amount &optional [fees 1.05] [vat 1.1]]
- (* price amount fees vat))
-
- (apply total-purchase [10 15])
- ;=> 173.25
-
- (apply total-purchase [10 15] {"vat" 1.05})
- ;=> 165.375
-
- (apply total-purchase [] {"price" 10 "amount" 15 "vat" 1.05})
- ;=> 165.375
-
- (apply total-purchase [] {:price 10 :amount 15 :vat 1.05})
- ;=> 165.375
-
and
---
@@ -596,8 +561,8 @@ Parameters may have the following keywords in front of them:
parameter_1 1
parameter_2 2
- ; to avoid the mangling of '-' to '_', use apply:
- => (apply print-parameters [] {"parameter-1" 1 "parameter-2" 2})
+ ; to avoid the mangling of '-' to '_', use unpacking:
+ => (print-parameters #** {"parameter-1" 1 "parameter-2" 2})
parameter-1 1
parameter-2 2
@@ -634,19 +599,19 @@ Parameters may have the following keywords in front of them:
.. code-block:: clj
- => (defn compare [a b &kwonly keyfn [reverse false]]
+ => (defn compare [a b &kwonly keyfn [reverse False]]
... (setv result (keyfn a b))
... (if (not reverse)
... result
... (- result)))
- => (apply compare ["lisp" "python"]
- ... {"keyfn" (fn [x y]
- ... (reduce - (map (fn [s] (ord (first s))) [x y])))})
+ => (compare "lisp" "python"
+ ... :keyfn (fn [x y]
+ ... (reduce - (map (fn [s] (ord (first s))) [x y]))))
-4
- => (apply compare ["lisp" "python"]
- ... {"keyfn" (fn [x y]
+ => (compare "lisp" "python"
+ ... :keyfn (fn [x y]
... (reduce - (map (fn [s] (ord (first s))) [x y])))
- ... "reverse" True})
+ ... :reverse True)
4
.. code-block:: python
@@ -1576,6 +1541,49 @@ the given conditional is ``False``. The following shows the expansion of this ma
(do statement))
+unpack-iterable, unpack-mapping
+-------------------------------
+
+``unpack-iterable`` and ``unpack-mapping`` allow an iterable or mapping
+object (respectively) to provide positional or keywords arguments
+(respectively) to a function.
+
+.. code-block:: clj
+
+ => (defn f [a b c d] [a b c d])
+ => (f (unpack-iterable [1 2]) (unpack-mapping {"c" 3 "d" 4}))
+ [1, 2, 3, 4]
+
+``unpack-iterable`` is usually written with the shorthand ``#*``, and
+``unpack-mapping`` with ``#**``.
+
+.. code-block:: clj
+
+ => (f #* [1 2] #** {"c" 3 "d" 4})
+ [1, 2, 3, 4]
+
+With Python 3, you can unpack in an assignment list (:pep:`3132`).
+
+.. code-block:: clj
+
+ => (setv [a #* b c] [1 2 3 4 5])
+ => [a b c]
+ [1, [2, 3, 4], 5]
+
+With Python 3.5 or greater, unpacking is allowed in more contexts than just
+function calls, and you can unpack more than once in the same expression
+(:pep:`448`).
+
+.. code-block:: clj
+
+ => [#* [1 2] #* [3 4]]
+ [1, 2, 3, 4]
+ => {#** {1 2} #** {3 4}}
+ {1: 2, 3: 4}
+ => (f #* [1] #* [2] #** {"c" 3} #** {"d" 4})
+ [1, 2, 3, 4]
+
+
unquote
-------
diff --git a/docs/language/core.rst b/docs/language/core.rst
index 21c111b4..4983af9b 100644
--- a/docs/language/core.rst
+++ b/docs/language/core.rst
@@ -1216,9 +1216,9 @@ if *from-file* ends before a complete expression can be parsed.
=> (import io)
=> (def buffer (io.StringIO "(+ 2 2)\n(- 2 1)"))
- => (eval (apply read [] {"from_file" buffer}))
+ => (eval (read :from_file buffer))
4
- => (eval (apply read [] {"from_file" buffer}))
+ => (eval (read :from_file buffer))
1
=> ; assuming "example.hy" contains:
diff --git a/docs/tutorial.rst b/docs/tutorial.rst
index ceeab775..3883abe6 100644
--- a/docs/tutorial.rst
+++ b/docs/tutorial.rst
@@ -423,8 +423,7 @@ The same thing in Hy::
=> (optional-arg 1 2 3 4)
[1 2 3 4]
-If you're running a version of Hy past 0.10.1 (eg, git master),
-there's also a nice new keyword argument syntax::
+You can call keyword arguments like this::
=> (optional-arg :keyword1 1
... :pos2 2
@@ -432,21 +431,13 @@ there's also a nice new keyword argument syntax::
... :keyword2 4)
[3, 2, 1, 4]
-Otherwise, you can always use `apply`. But what's `apply`?
-
-Are you familiar with passing in `*args` and `**kwargs` in Python?::
-
- >>> args = [1 2]
- >>> kwargs = {"keyword2": 3
- ... "keyword1": 4}
- >>> optional_arg(*args, **kwargs)
-
-We can reproduce this with `apply`::
+You can unpack arguments with the syntax ``#* args`` and ``#** kwargs``,
+similar to `*args` and `**kwargs` in Python::
=> (setv args [1 2])
=> (setv kwargs {"keyword2" 3
... "keyword1" 4})
- => (apply optional-arg args kwargs)
+ => (optional-arg #* args #** kwargs)
[1, 2, 4, 3]
There's also a dictionary-style keyword arguments construction that
@@ -460,7 +451,7 @@ looks like:
The difference here is that since it's a dictionary, you can't rely on
any specific ordering to the arguments.
-Hy also supports ``*args`` and ``**kwargs``. In Python::
+Hy also supports ``*args`` and ``**kwargs`` in parameter lists. In Python::
def some_func(foo, bar, *args, **kwargs):
import pprint
diff --git a/hy/compiler.py b/hy/compiler.py
index b9903324..5fd073fd 100755
--- a/hy/compiler.py
+++ b/hy/compiler.py
@@ -359,6 +359,13 @@ def checkargs(exact=None, min=None, max=None, even=None, multiple=None):
return _dec
+def is_unpack(kind, x):
+ return (isinstance(x, HyExpression)
+ and len(x) > 0
+ and isinstance(x[0], HySymbol)
+ and x[0] == "unpack_" + kind)
+
+
class HyASTCompiler(object):
def __init__(self, module_name):
@@ -441,7 +448,8 @@ class HyASTCompiler(object):
raise HyCompileError(Exception("Unknown type: `%s'" % _type))
- def _compile_collect(self, exprs, with_kwargs=False):
+ def _compile_collect(self, exprs, with_kwargs=False, dict_display=False,
+ oldpy_unpack=False):
"""Collect the expression contexts from a list of compiled expression.
This returns a list of the expression contexts, and the sum of the
@@ -451,10 +459,39 @@ class HyASTCompiler(object):
compiled_exprs = []
ret = Result()
keywords = []
+ oldpy_starargs = None
+ oldpy_kwargs = None
exprs_iter = iter(exprs)
for expr in exprs_iter:
- if with_kwargs and isinstance(expr, HyKeyword):
+
+ if not PY35 and oldpy_unpack and is_unpack("iterable", expr):
+ if oldpy_starargs:
+ raise HyTypeError(expr, "Pythons < 3.5 allow only one "
+ "`unpack-iterable` per call")
+ oldpy_starargs = self.compile(expr[1])
+ ret += oldpy_starargs
+ oldpy_starargs = oldpy_starargs.force_expr
+
+ elif is_unpack("mapping", expr):
+ ret += self.compile(expr[1])
+ if PY35:
+ if dict_display:
+ compiled_exprs.append(None)
+ compiled_exprs.append(ret.force_expr)
+ elif with_kwargs:
+ keywords.append(ast.keyword(
+ arg=None,
+ value=ret.force_expr,
+ lineno=expr.start_line,
+ col_offset=expr.start_column))
+ elif oldpy_unpack:
+ if oldpy_kwargs:
+ raise HyTypeError(expr, "Pythons < 3.5 allow only one "
+ "`unpack-mapping` per call")
+ oldpy_kwargs = ret.force_expr
+
+ elif with_kwargs and isinstance(expr, HyKeyword):
try:
value = next(exprs_iter)
except StopIteration:
@@ -474,11 +511,15 @@ class HyASTCompiler(object):
value=compiled_value.force_expr,
lineno=expr.start_line,
col_offset=expr.start_column))
+
else:
ret += self.compile(expr)
compiled_exprs.append(ret.force_expr)
- return compiled_exprs, ret, keywords
+ if oldpy_unpack:
+ return compiled_exprs, ret, keywords, oldpy_starargs, oldpy_kwargs
+ else:
+ return compiled_exprs, ret, keywords
def _compile_branch(self, exprs):
return _branch(self.compile(expr) for expr in exprs)
@@ -610,6 +651,9 @@ class HyASTCompiler(object):
new_name = ast.Subscript(value=name.value, slice=name.slice)
elif isinstance(name, ast.Attribute):
new_name = ast.Attribute(value=name.value, attr=name.attr)
+ elif PY3 and isinstance(name, ast.Starred):
+ new_name = ast.Starred(
+ value=self._storeize(expr, name.value, func))
else:
raise HyTypeError(expr,
"Can't assign or delete a %s" %
@@ -717,6 +761,23 @@ class HyASTCompiler(object):
raise HyTypeError(expr,
"`%s' can't be used at the top-level" % expr[0])
+ @builds("unpack_iterable")
+ @checkargs(exact=1)
+ def compile_unpack_iterable(self, expr):
+ if not PY3:
+ raise HyTypeError(expr, "`unpack-iterable` isn't allowed here")
+ ret = self.compile(expr[1])
+ ret += ast.Starred(value=ret.force_expr,
+ lineno=expr.start_line,
+ col_offset=expr.start_column,
+ ctx=ast.Load())
+ return ret
+
+ @builds("unpack_mapping")
+ @checkargs(exact=1)
+ def compile_unpack_mapping(self, expr):
+ raise HyTypeError(expr, "`unpack-mapping` isn't allowed here")
+
@builds("do")
def compile_do(self, expression):
expression.pop(0)
@@ -1526,115 +1587,6 @@ class HyASTCompiler(object):
generators=expr.generators)
return ret
- @builds("apply")
- @checkargs(min=1, max=3)
- def compile_apply_expression(self, expr):
- expr.pop(0) # apply
-
- ret = Result()
-
- fun = expr.pop(0)
-
- # We actually defer the compilation of the function call to
- # @builds(HyExpression), allowing us to work on method calls
- call = HyExpression([fun]).replace(fun)
-
- if isinstance(fun, HySymbol) and fun.startswith("."):
- # (apply .foo lst) needs to work as lst[0].foo(*lst[1:])
- if not expr:
- raise HyTypeError(
- expr, "apply of a method needs to have an argument"
- )
-
- # We need to grab the arguments, and split them.
-
- # Assign them to a variable if they're not one already
- if type(expr[0]) == HyList:
- if len(expr[0]) == 0:
- raise HyTypeError(
- expr, "apply of a method needs to have an argument"
- )
- call.append(expr[0].pop(0))
- else:
- if isinstance(expr[0], HySymbol):
- tempvar = expr[0]
- else:
- tempvar = HySymbol(self.get_anon_var()).replace(expr[0])
- assignment = HyExpression(
- [HySymbol("setv"), tempvar, expr[0]]
- ).replace(expr[0])
-
- # and add the assignment to our result
- ret += self.compile(assignment)
-
- # The first argument is the object on which to call the method
- # So we translate (apply .foo args) to (.foo (get args 0))
- call.append(HyExpression(
- [HySymbol("get"), tempvar, HyInteger(0)]
- ).replace(tempvar))
-
- # We then pass the other arguments to the function
- expr[0] = HyExpression(
- [HySymbol("cut"), tempvar, HyInteger(1)]
- ).replace(expr[0])
-
- ret += self.compile(call)
-
- if not isinstance(ret.expr, ast.Call):
- raise HyTypeError(
- fun, "compiling the application of `{}' didn't return a "
- "function call, but `{}'".format(fun, type(ret.expr).__name__)
- )
- if ret.expr.starargs or ret.expr.kwargs:
- raise HyTypeError(
- expr, "compiling the function application returned a function "
- "call with arguments"
- )
-
- if expr:
- stargs = expr.pop(0)
- if stargs is not None:
- stargs = self.compile(stargs)
- if PY35:
- stargs_expr = stargs.force_expr
- ret.expr.args.append(
- ast.Starred(stargs_expr, ast.Load(),
- lineno=stargs_expr.lineno,
- col_offset=stargs_expr.col_offset)
- )
- else:
- ret.expr.starargs = stargs.force_expr
- ret = stargs + ret
-
- if expr:
- kwargs = expr.pop(0)
- if isinstance(kwargs, HyDict):
- new_kwargs = []
- for k, v in kwargs.items():
- if isinstance(k, HySymbol):
- pass
- elif isinstance(k, HyString):
- k = HyString(hy_symbol_mangle(str_type(k))).replace(k)
- elif isinstance(k, HyKeyword):
- sym = hy_symbol_mangle(str_type(k)[2:])
- k = HyString(sym).replace(k)
- new_kwargs += [k, v]
- kwargs = HyDict(new_kwargs).replace(kwargs)
-
- kwargs = self.compile(kwargs)
- if PY35:
- kwargs_expr = kwargs.force_expr
- ret.expr.keywords.append(
- ast.keyword(None, kwargs_expr,
- lineno=kwargs_expr.lineno,
- col_offset=kwargs_expr.col_offset)
- )
- else:
- ret.expr.kwargs = kwargs.force_expr
- ret = kwargs + ret
-
- return ret
-
@builds("not")
@builds("~")
@checkargs(1)
@@ -2001,9 +1953,15 @@ class HyASTCompiler(object):
return self._compile_keyword_call(expression)
if isinstance(fn, HySymbol):
- ret = self.compile_atom(fn, expression)
- if ret:
- return ret
+ # First check if `fn` is a special form, unless it has an
+ # `unpack_iterable` in it, since Python's operators (`+`,
+ # etc.) can't unpack. An exception to this exception is that
+ # tuple literals (`,`) can unpack.
+ if fn == "," or not (
+ any(is_unpack("iterable", x) for x in expression[1:])):
+ ret = self.compile_atom(fn, expression)
+ if ret:
+ return ret
if fn.startswith("."):
# (.split "test test") -> "test test".split()
@@ -2054,14 +2012,14 @@ class HyASTCompiler(object):
else:
with_kwargs = True
- args, ret, kwargs = self._compile_collect(expression[1:],
- with_kwargs)
+ args, ret, keywords, oldpy_starargs, oldpy_kwargs = self._compile_collect(
+ expression[1:], with_kwargs, oldpy_unpack=True)
ret += ast.Call(func=func.expr,
args=args,
- keywords=kwargs,
- starargs=None,
- kwargs=None,
+ keywords=keywords,
+ starargs=oldpy_starargs,
+ kwargs=oldpy_kwargs,
lineno=expression.start_line,
col_offset=expression.start_column)
@@ -2583,7 +2541,7 @@ class HyASTCompiler(object):
@builds(HyDict)
def compile_dict(self, m):
- keyvalues, ret, _ = self._compile_collect(m)
+ keyvalues, ret, _ = self._compile_collect(m, dict_display=True)
ret += ast.Dict(lineno=m.start_line,
col_offset=m.start_column,
diff --git a/hy/contrib/hy_repr.hy b/hy/contrib/hy_repr.hy
index 20535044..ea67efc4 100644
--- a/hy/contrib/hy_repr.hy
+++ b/hy/contrib/hy_repr.hy
@@ -37,6 +37,10 @@
(+ "~" (f (second x) q))
(= (first x) 'unquote_splice)
(+ "~@" (f (second x) q))
+ (= (first x) 'unpack_iterable)
+ (+ "#* " (f (second x) q))
+ (= (first x) 'unpack_mapping)
+ (+ "#** " (f (second x) q))
; else
(+ "(" (catted) ")"))
(+ "(" (catted) ")"))
diff --git a/hy/contrib/loop.hy b/hy/contrib/loop.hy
index 9ab9eb8d..34172569 100644
--- a/hy/contrib/loop.hy
+++ b/hy/contrib/loop.hy
@@ -27,7 +27,7 @@
(when (not (first active))
(assoc active 0 True)
(while (> (len accumulated) 0)
- (setv result (apply f (.pop accumulated))))
+ (setv result (f #* (.pop accumulated))))
(assoc active 0 False)
result)))
diff --git a/hy/contrib/multi.hy b/hy/contrib/multi.hy
index 639c96ad..8c894861 100644
--- a/hy/contrib/multi.hy
+++ b/hy/contrib/multi.hy
@@ -29,7 +29,7 @@
(setv output None)
(for [[i f] (.items (get self._fns self.f.__module__ self.f.__name__))]
(when (.fn? self i args kwargs)
- (setv output (apply f args kwargs))
+ (setv output (f #* args #** kwargs))
(break)))
(if output
output
@@ -37,10 +37,10 @@
(defn multi-decorator [dispatch-fn]
(setv inner (fn [&rest args &kwargs kwargs]
- (setv dispatch-key (apply dispatch-fn args kwargs))
+ (setv dispatch-key (dispatch-fn #* args #** kwargs))
(if (in dispatch-key inner.--multi--)
- (apply (get inner.--multi-- dispatch-key) args kwargs)
- (apply inner.--multi-default-- args kwargs))))
+ ((get inner.--multi-- dispatch-key) #* args #** kwargs)
+ (inner.--multi-default-- #* args #** kwargs))))
(setv inner.--multi-- {})
(setv inner.--doc-- dispatch-fn.--doc--)
(setv inner.--multi-default-- (fn [&rest args &kwargs kwargs] None))
diff --git a/hy/contrib/profile.hy b/hy/contrib/profile.hy
index afee3b07..42ef1a48 100644
--- a/hy/contrib/profile.hy
+++ b/hy/contrib/profile.hy
@@ -10,7 +10,7 @@
`(do
(import [pycallgraph [PyCallGraph]]
[pycallgraph.output [GraphvizOutput]])
- (with* [(apply PyCallGraph [] {"output" (GraphvizOutput)})]
+ (with* [(PyCallGraph :output (GraphvizOutput)))]
~@body)))
@@ -29,6 +29,6 @@
(.disable ~g!hy-pr)
(setv ~g!hy-s (StringIO))
(setv ~g!hy-ps
- (.sort-stats (apply pstats.Stats [~g!hy-pr] {"stream" ~g!hy-s})))
+ (.sort-stats (pstats.Stats ~g!hy-pr :stream ~g!hy-s)))
(.print-stats ~g!hy-ps)
(print (.getvalue ~g!hy-s))))
diff --git a/hy/core/language.hy b/hy/core/language.hy
index 6bcdb793..9c29d276 100644
--- a/hy/core/language.hy
+++ b/hy/core/language.hy
@@ -37,7 +37,7 @@
first-f (next rfs)
fs (tuple rfs))
(fn [&rest args &kwargs kwargs]
- (setv res (apply first-f args kwargs))
+ (setv res (first-f #* args #** kwargs))
(for* [f fs]
(setv res (f res)))
res))))
@@ -45,7 +45,7 @@
(defn complement [f]
"Create a function that reverses truth value of another function"
(fn [&rest args &kwargs kwargs]
- (not (apply f args kwargs))))
+ (not (f #* args #** kwargs))))
(defn cons [a b]
"Return a fresh cons cell with car = a and cdr = b"
@@ -160,8 +160,8 @@
(defn drop-last [n coll]
"Return a sequence of all but the last n elements in coll."
(setv iters (tee coll))
- (map first (apply zip [(get iters 0)
- (drop n (get iters 1))])))
+ (map first (zip #* [(get iters 0)
+ (drop n (get iters 1))])))
(defn empty? [coll]
"Return True if `coll` is empty"
@@ -250,7 +250,7 @@
(defn interleave [&rest seqs]
"Return an iterable of the first item in each of seqs, then the second etc."
- (chain.from-iterable (apply zip seqs)))
+ (chain.from-iterable (zip #* seqs)))
(defn interpose [item seq]
"Return an iterable of the elements of seq separated by item"
@@ -275,7 +275,7 @@
set of arguments and collects the results into a list."
(setv fs (cons f fs))
(fn [&rest args &kwargs kwargs]
- (list-comp (apply f args kwargs) [f fs])))
+ (list-comp (f #* args #** kwargs) [f fs])))
(defn last [coll]
"Return last item from `coll`"
@@ -285,7 +285,7 @@
"Return a dotted list construed from the elements of the argument"
(if (not tl)
hd
- (cons hd (apply list* tl))))
+ (cons hd (list* #* tl))))
(defn macroexpand [form]
"Return the full macro expansion of form"
@@ -350,8 +350,8 @@
slices (genexpr (islice (get coll-clones start) start None step)
[start (range n)]))
(if (is fillvalue -sentinel)
- (apply zip slices)
- (apply zip-longest slices {"fillvalue" fillvalue})))
+ (zip #* slices)
+ (zip-longest #* slices :fillvalue fillvalue)))
(defn pos? [n]
"Return true if n is > 0"
diff --git a/hy/core/macros.hy b/hy/core/macros.hy
index 27eadf63..82cf3ae4 100644
--- a/hy/core/macros.hy
+++ b/hy/core/macros.hy
@@ -207,7 +207,7 @@
(setv retval (gensym))
`(when (= --name-- "__main__")
(import sys)
- (setv ~retval (apply (fn [~@args] ~@body) sys.argv))
+ (setv ~retval ((fn [~@args] ~@body) #* sys.argv))
(if (integer? ~retval)
(sys.exit ~retval))))
diff --git a/hy/lex/lexer.py b/hy/lex/lexer.py
index c6b5636b..9779eda7 100755
--- a/hy/lex/lexer.py
+++ b/hy/lex/lexer.py
@@ -26,6 +26,7 @@ lg.add('QUASIQUOTE', r'`%s' % end_quote)
lg.add('UNQUOTESPLICE', r'~@%s' % end_quote)
lg.add('UNQUOTE', r'~%s' % end_quote)
lg.add('HASHBANG', r'#!.*[^\r\n]')
+lg.add('HASHSTARS', r'#\*+')
lg.add('HASHOTHER', r'#%s' % identifier)
# A regexp which matches incomplete strings, used to support
diff --git a/hy/lex/parser.py b/hy/lex/parser.py
index 1be896b2..d60cec5a 100755
--- a/hy/lex/parser.py
+++ b/hy/lex/parser.py
@@ -197,6 +197,22 @@ def term_unquote_splice(p):
return HyExpression([HySymbol("unquote_splice"), p[1]])
[email protected]("term : HASHSTARS term")
+@set_quote_boundaries
+def term_hashstars(p):
+ n_stars = len(p[0].getstr()[1:])
+ if n_stars == 1:
+ sym = "unpack_iterable"
+ elif n_stars == 2:
+ sym = "unpack_mapping"
+ else:
+ raise LexException(
+ "Too many stars in `#*` construct (if you want to unpack a symbol "
+ "beginning with a star, separate it with whitespace)",
+ p[0].source_pos.lineno, p[0].source_pos.colno)
+ return HyExpression([HySymbol(sym), p[1]])
+
+
@pg.production("term : HASHOTHER term")
@set_quote_boundaries
def hash_other(p):
| apply doesn't use shadowed operators
Now this is possible:
``` clj
(let [[op +]] (apply op ["a" "b" "c"]))
```
and also this:
``` clj
(reduce + ["a" "b" "c"])
```
However this is not:
``` clj
(apply + ["a" "b" "c"])
```
which yields `HyTypeError: compiling the application of`+' didn't return a function call, but `BinOp'`
@paultag <b>says</b>:
Since it defers back to compile expression, it'll defer to @builds("+"), which means we get a BinOp back. Which is *hilarious&. At the top there should be a self.compile call since it'll actually treat the first argument as the argument in the call position when it needs to be an ast.Call.
| hylang/hy | diff --git a/conftest.py b/conftest.py
index 72fa5530..0735cf74 100644
--- a/conftest.py
+++ b/conftest.py
@@ -1,12 +1,13 @@
import _pytest
import hy
-from hy._compat import PY3
+from hy._compat import PY3, PY35
def pytest_collect_file(parent, path):
if (path.ext == ".hy"
and "/tests/native_tests/" in path.dirname + "/"
and path.basename != "__init__.hy"
- and not ("py3_only" in path.basename and not PY3)):
+ and not ("py3_only" in path.basename and not PY3)
+ and not ("py35_only" in path.basename and not PY35)):
m = _pytest.python.pytest_pycollect_makemodule(path, parent)
# Spoof the module name to avoid hitting an assertion in pytest.
m.name = m.name[:-len(".hy")] + ".py"
diff --git a/tests/native_tests/contrib/hy_repr.hy b/tests/native_tests/contrib/hy_repr.hy
index 9bbdd095..a9c04755 100644
--- a/tests/native_tests/contrib/hy_repr.hy
+++ b/tests/native_tests/contrib/hy_repr.hy
@@ -28,8 +28,8 @@
[1 2 3] (, 1 2 3) #{1 2 3} (frozenset #{1 2 3})
'[1 2 3] '(, 1 2 3) '#{1 2 3} '(frozenset #{1 2 3})
{"a" 1 "b" 2 "a" 3} '{"a" 1 "b" 2 "a" 3}
- [1 [2 3] (, 4 (, 'mysymbol :mykeyword)) {"a" b"hello"}]
- '[1 [2 3] (, 4 (, mysymbol :mykeyword)) {"a" b"hello"}]])
+ [1 [2 3] (, 4 (, 'mysymbol :mykeyword)) {"a" b"hello"} '(f #* a #** b)]
+ '[1 [2 3] (, 4 (, mysymbol :mykeyword)) {"a" b"hello"} (f #* a #** b)]])
(for [original-val values]
(setv evaled (eval (read-str (hy-repr original-val))))
(assert (= evaled original-val))
@@ -59,7 +59,8 @@
"{1 20}"
"'{1 10 1 20}"
"'asymbol"
- ":akeyword"])
+ ":akeyword"
+ "'(f #* args #** kwargs)"])
(for [original-str strs]
(setv rep (hy-repr (eval (read-str original-str))))
(assert (= rep original-str))))
diff --git a/tests/native_tests/contrib/multi.hy b/tests/native_tests/contrib/multi.hy
index 90c85dcc..9b0294ca 100644
--- a/tests/native_tests/contrib/multi.hy
+++ b/tests/native_tests/contrib/multi.hy
@@ -95,9 +95,9 @@
([&optional [a "nop"] [b "p"]] (+ a b)))
(assert (= (fun 1) 1))
- (assert (= (apply fun [] {"a" "t"}) "t"))
- (assert (= (apply fun ["hello "] {"b" "world"}) "hello world"))
- (assert (= (apply fun [] {"a" "hello " "b" "world"}) "hello world")))
+ (assert (= (fun :a "t") "t"))
+ (assert (= (fun "hello " :b "world") "hello world"))
+ (assert (= (fun :a "hello " :b "world") "hello world")))
(defn test-docs []
diff --git a/tests/native_tests/language.hy b/tests/native_tests/language.hy
index cbef01e6..51e221fd 100644
--- a/tests/native_tests/language.hy
+++ b/tests/native_tests/language.hy
@@ -369,37 +369,25 @@
(assert (is (isfile ".") False)))
+(defn test-star-unpacking []
+ ; Python 3-only forms of unpacking are in py3_only_tests.hy
+ (setv l [1 2 3])
+ (setv d {"a" "x" "b" "y"})
+ (defn fun [&optional x1 x2 x3 x4 a b c] [x1 x2 x3 x4 a b c])
+ (assert (= (fun 5 #* l) [5 1 2 3 None None None]))
+ (assert (= (+ #* l) 6))
+ (assert (= (fun 5 #** d) [5 None None None "x" "y" None]))
+ (assert (= (fun 5 #* l #** d) [5 1 2 3 "x" "y" None])))
+
+
+
(defn test-kwargs []
"NATIVE: test kwargs things."
- (assert (= (apply kwtest [] {"one" "two"}) {"one" "two"}))
+ (assert (= (kwtest :one "two") {"one" "two"}))
(setv mydict {"one" "three"})
- (assert (= (apply kwtest [] mydict) mydict))
- (assert (= (apply kwtest [] ((fn [] {"one" "two"}))) {"one" "two"})))
-
-
-(defn test-apply []
- "NATIVE: test working with args and functions"
- (defn sumit [a b c] (+ a b c))
- (assert (= (apply sumit [1] {"b" 2 "c" 3}) 6))
- (assert (= (apply sumit [1 2 2]) 5))
- (assert (= (apply sumit [] {"a" 1 "b" 1 "c" 2}) 4))
- (assert (= (apply sumit ((fn [] [1 1])) {"c" 1}) 3))
- (defn noargs [] [1 2 3])
- (assert (= (apply noargs) [1 2 3]))
- (defn sumit-mangle [an-a a-b a-c a-d] (+ an-a a-b a-c a-d))
- (def Z "a_d")
- (assert (= (apply sumit-mangle [] {"an-a" 1 :a-b 2 'a-c 3 Z 4}) 10)))
-
-
-(defn test-apply-with-methods []
- "NATIVE: test apply to call a method"
- (setv str "foo {bar}")
- (assert (= (apply .format [str] {"bar" "baz"})
- (apply .format ["foo {0}" "baz"])
- "foo baz"))
- (setv lst ["a {0} {1} {foo} {bar}" "b" "c"])
- (assert (= (apply .format lst {"foo" "d" "bar" "e"})
- "a b c d e")))
+ (assert (= (kwtest #** mydict) mydict))
+ (assert (= (kwtest #** ((fn [] {"one" "two"}))) {"one" "two"})))
+
(defn test-dotted []
@@ -418,20 +406,20 @@
(assert (= (.meth m) "meth"))
(assert (= (.meth m "foo" "bar") "meth foo bar"))
(assert (= (.meth :b "1" :a "2" m "foo" "bar") "meth foo bar 2 1"))
- (assert (= (apply .meth [m "foo" "bar"]) "meth foo bar"))
+ (assert (= (.meth m #* ["foo" "bar"]) "meth foo bar"))
(setv x.p m)
(assert (= (.p.meth x) "meth"))
(assert (= (.p.meth x "foo" "bar") "meth foo bar"))
(assert (= (.p.meth :b "1" :a "2" x "foo" "bar") "meth foo bar 2 1"))
- (assert (= (apply .p.meth [x "foo" "bar"]) "meth foo bar"))
+ (assert (= (.p.meth x #* ["foo" "bar"]) "meth foo bar"))
(setv x.a (X))
(setv x.a.b m)
(assert (= (.a.b.meth x) "meth"))
(assert (= (.a.b.meth x "foo" "bar") "meth foo bar"))
(assert (= (.a.b.meth :b "1" :a "2" x "foo" "bar") "meth foo bar 2 1"))
- (assert (= (apply .a.b.meth [x "foo" "bar"]) "meth foo bar"))
+ (assert (= (.a.b.meth x #* ["foo" "bar"]) "meth foo bar"))
(assert (is (.isdigit :foo) False)))
@@ -1173,8 +1161,8 @@
"NATIVE: test &key function arguments"
(defn foo [&key {"a" None "b" 1}] [a b])
(assert (= (foo) [None 1]))
- (assert (= (apply foo [] {"a" 2}) [2 1]))
- (assert (= (apply foo [] {"b" 42}) [None 42])))
+ (assert (= (foo :a 2) [2 1]))
+ (assert (= (foo :b 42) [None 42])))
(defn test-optional-arguments []
diff --git a/tests/native_tests/native_macros.hy b/tests/native_tests/native_macros.hy
index 3a0d4287..d3efcb6e 100644
--- a/tests/native_tests/native_macros.hy
+++ b/tests/native_tests/native_macros.hy
@@ -84,12 +84,12 @@
"NATIVE: test macro calling a plain function"
(assert (= 3 (bar 1 2))))
-(defn test-optional-and-apply-in-macro []
+(defn test-optional-and-unpacking-in-macro []
; https://github.com/hylang/hy/issues/1154
(defn f [&rest args]
(+ "f:" (repr args)))
(defmacro mac [&optional x]
- `(apply f [~x]))
+ `(f #* [~x]))
(assert (= (mac) "f:(None,)")))
(defn test-midtree-yield []
diff --git a/tests/native_tests/operators.hy b/tests/native_tests/operators.hy
index b49c754a..be40cc5a 100644
--- a/tests/native_tests/operators.hy
+++ b/tests/native_tests/operators.hy
@@ -288,8 +288,3 @@
(assert (is (f 3 [1 2]) (!= f-name "in")))
(assert (is (f 2 [1 2]) (= f-name "in")))
(forbid (f 2 [1 2] [3 4])))
-
-#@(pytest.mark.xfail
-(defn test-apply-op []
- ; https://github.com/hylang/hy/issues/647
- (assert (= (eval '(apply + ["a" "b" "c"])) "abc"))))
diff --git a/tests/native_tests/py35_only_tests.hy b/tests/native_tests/py35_only_tests.hy
new file mode 100644
index 00000000..716a9f80
--- /dev/null
+++ b/tests/native_tests/py35_only_tests.hy
@@ -0,0 +1,26 @@
+;; Copyright 2017 the authors.
+;; This file is part of Hy, which is free software licensed under the Expat
+;; license. See the LICENSE.
+
+;; Tests where the emitted code relies on Python ≥3.5.
+;; conftest.py skips this file when running on Python <3.5.
+
+
+(defn test-unpacking-pep448-1star []
+ (setv l [1 2 3])
+ (setv p [4 5])
+ (assert (= ["a" #*l "b" #*p #*l] ["a" 1 2 3 "b" 4 5 1 2 3]))
+ (assert (= (, "a" #*l "b" #*p #*l) (, "a" 1 2 3 "b" 4 5 1 2 3)))
+ (assert (= #{"a" #*l "b" #*p #*l} #{"a" "b" 1 2 3 4 5}))
+ (defn f [&rest args] args)
+ (assert (= (f "a" #*l "b" #*p #*l) (, "a" 1 2 3 "b" 4 5 1 2 3)))
+ (assert (= (+ #*l #*p) 15))
+ (assert (= (and #*l) 3)))
+
+
+(defn test-unpacking-pep448-2star []
+ (setv d1 {"a" 1 "b" 2})
+ (setv d2 {"c" 3 "d" 4})
+ (assert (= {1 "x" #**d1 #**d2 2 "y"} {"a" 1 "b" 2 "c" 3 "d" 4 1 "x" 2 "y"}))
+ (defn fun [&optional a b c d e f] [a b c d e f])
+ (assert (= (fun #**d1 :e "eee" #**d2) [1 2 3 4 "eee" None])))
diff --git a/tests/native_tests/py3_only_tests.hy b/tests/native_tests/py3_only_tests.hy
index b376f6cd..84b8053b 100644
--- a/tests/native_tests/py3_only_tests.hy
+++ b/tests/native_tests/py3_only_tests.hy
@@ -16,15 +16,15 @@
"NATIVE: test keyword-only arguments"
;; keyword-only with default works
(defn kwonly-foo-default-false [&kwonly [foo False]] foo)
- (assert (= (apply kwonly-foo-default-false) False))
- (assert (= (apply kwonly-foo-default-false [] {"foo" True}) True))
+ (assert (= (kwonly-foo-default-false) False))
+ (assert (= (kwonly-foo-default-false :foo True) True))
;; keyword-only without default ...
(defn kwonly-foo-no-default [&kwonly foo] foo)
(setv attempt-to-omit-default (try
(kwonly-foo-no-default)
(except [e [Exception]] e)))
;; works
- (assert (= (apply kwonly-foo-no-default [] {"foo" "quux"}) "quux"))
+ (assert (= (kwonly-foo-no-default :foo "quux") "quux"))
;; raises TypeError with appropriate message if not supplied
(assert (isinstance attempt-to-omit-default TypeError))
(assert (in "missing 1 required keyword-only argument: 'foo'"
@@ -32,11 +32,20 @@
;; keyword-only with other arg types works
(defn function-of-various-args [a b &rest args &kwonly foo &kwargs kwargs]
(, a b args foo kwargs))
- (assert (= (apply function-of-various-args
- [1 2 3 4] {"foo" 5 "bar" 6 "quux" 7})
+ (assert (= (function-of-various-args 1 2 3 4 :foo 5 :bar 6 :quux 7)
(, 1 2 (, 3 4) 5 {"bar" 6 "quux" 7}))))
+(defn test-extended-unpacking-1star-lvalues []
+ (setv [x #*y] [1 2 3 4])
+ (assert (= x 1))
+ (assert (= y [2 3 4]))
+ (setv [a #*b c] "ghijklmno")
+ (assert (= a "g"))
+ (assert (= b (list "hijklmn")))
+ (assert (= c "o")))
+
+
(defn test-yield-from []
"NATIVE: testing yield from"
(defn yield-from-test []
diff --git a/tests/native_tests/tag_macros.hy b/tests/native_tests/tag_macros.hy
index ba586f65..18beaa79 100644
--- a/tests/native_tests/tag_macros.hy
+++ b/tests/native_tests/tag_macros.hy
@@ -100,9 +100,8 @@
"Increments each argument passed to the decorated function."
((wraps func)
(fn [&rest args &kwargs kwargs]
- (apply func
- (map inc args)
- (dict-comp k (inc v) [[k v] (.items kwargs)])))))
+ (func #* (map inc args)
+ #** (dict-comp k (inc v) [[k v] (.items kwargs)])))))
#@(increment-arguments
(defn foo [&rest args &kwargs kwargs]
diff --git a/tests/test_bin.py b/tests/test_bin.py
index c2e31e96..6df4d437 100644
--- a/tests/test_bin.py
+++ b/tests/test_bin.py
@@ -7,7 +7,7 @@
import os
import subprocess
import re
-from hy._compat import PY3
+from hy._compat import PY3, PY35
from hy.importer import get_bytecode_path
import pytest
@@ -210,12 +210,13 @@ def test_hy2py():
if f.endswith(".hy"):
if f == "py3_only_tests.hy" and not PY3:
continue
- else:
- i += 1
- output, err = run_cmd("hy2py -s -a " +
- os.path.join(dirpath, f))
- assert len(output) > 1, f
- assert len(err) == 0, f
+ if f == "py35_only_tests.hy" and not PY35:
+ continue
+ i += 1
+ output, err = run_cmd("hy2py -s -a " +
+ os.path.join(dirpath, f))
+ assert len(output) > 1, f
+ assert len(err) == 0, f
assert i
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 13
} | 0.13 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | appdirs==1.4.4
args==0.1.0
astor==0.8.1
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
clint==0.5.1
-e git+https://github.com/hylang/hy.git@c3c7af2db3e6314fc9503ead68b677daf6a4f089#egg=hy
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
rply==0.7.8
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: hy
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- appdirs==1.4.4
- args==0.1.0
- astor==0.8.1
- clint==0.5.1
- rply==0.7.8
prefix: /opt/conda/envs/hy
| [
"tests/native_tests/contrib/hy_repr.hy::test_hy_repr_roundtrip_from_value",
"tests/native_tests/contrib/hy_repr.hy::test_hy_repr_roundtrip_from_str",
"tests/native_tests/contrib/hy_repr.hy::test_hy_model_constructors",
"tests/native_tests/contrib/hy_repr.hy::test_hy_repr_self_reference",
"tests/native_tests/contrib/hy_repr.hy::test_hy_repr_dunder_method",
"tests/native_tests/contrib/hy_repr.hy::test_hy_repr_fallback",
"tests/native_tests/contrib/multi.hy::test_different_signatures",
"tests/native_tests/contrib/multi.hy::test_basic_dispatch",
"tests/native_tests/contrib/multi.hy::test_kwargs_handling",
"tests/native_tests/contrib/multi.hy::test_basic_multi",
"tests/native_tests/contrib/multi.hy::test_kw_args",
"tests/native_tests/contrib/multi.hy::test_docs",
"tests/native_tests/language.hy::test_sys_argv",
"tests/native_tests/language.hy::test_hex",
"tests/native_tests/language.hy::test_octal",
"tests/native_tests/language.hy::test_binary",
"tests/native_tests/language.hy::test_fractions",
"tests/native_tests/language.hy::test_lists",
"tests/native_tests/language.hy::test_dicts",
"tests/native_tests/language.hy::test_sets",
"tests/native_tests/language.hy::test_setv_get",
"tests/native_tests/language.hy::test_setv_builtin",
"tests/native_tests/language.hy::test_setv_pairs",
"tests/native_tests/language.hy::test_setv_returns_none",
"tests/native_tests/language.hy::test_store_errors",
"tests/native_tests/language.hy::test_no_str_as_sym",
"tests/native_tests/language.hy::test_fn_corner_cases",
"tests/native_tests/language.hy::test_alias_names_in_errors",
"tests/native_tests/language.hy::test_for_loop",
"tests/native_tests/language.hy::test_nasty_for_nesting",
"tests/native_tests/language.hy::test_while_loop",
"tests/native_tests/language.hy::test_branching",
"tests/native_tests/language.hy::test_branching_with_do",
"tests/native_tests/language.hy::test_branching_expr_count_with_do",
"tests/native_tests/language.hy::test_cond",
"tests/native_tests/language.hy::test_if",
"tests/native_tests/language.hy::test_index",
"tests/native_tests/language.hy::test_fn",
"tests/native_tests/language.hy::test_imported_bits",
"tests/native_tests/language.hy::test_star_unpacking",
"tests/native_tests/language.hy::test_kwargs",
"tests/native_tests/language.hy::test_dotted",
"tests/native_tests/language.hy::test_do",
"tests/native_tests/language.hy::test_exceptions",
"tests/native_tests/language.hy::test_earmuffs",
"tests/native_tests/language.hy::test_threading",
"tests/native_tests/language.hy::test_tail_threading",
"tests/native_tests/language.hy::test_threading_two",
"tests/native_tests/language.hy::test_as_threading",
"tests/native_tests/language.hy::test_assoc",
"tests/native_tests/language.hy::test_multiassoc",
"tests/native_tests/language.hy::test_pass",
"tests/native_tests/language.hy::test_yield",
"tests/native_tests/language.hy::test_yield_with_return",
"tests/native_tests/language.hy::test_yield_in_try",
"tests/native_tests/language.hy::test_first",
"tests/native_tests/language.hy::test_cut",
"tests/native_tests/language.hy::test_rest",
"tests/native_tests/language.hy::test_importas",
"tests/native_tests/language.hy::test_context",
"tests/native_tests/language.hy::test_context_yield",
"tests/native_tests/language.hy::test_with_return",
"tests/native_tests/language.hy::test_for_doodle",
"tests/native_tests/language.hy::test_for_else",
"tests/native_tests/language.hy::test_list_comprehensions",
"tests/native_tests/language.hy::test_set_comprehensions",
"tests/native_tests/language.hy::test_dict_comprehensions",
"tests/native_tests/language.hy::test_generator_expressions",
"tests/native_tests/language.hy::test_defn_order",
"tests/native_tests/language.hy::test_defn_return",
"tests/native_tests/language.hy::test_defn_lambdakey",
"tests/native_tests/language.hy::test_defn_do",
"tests/native_tests/language.hy::test_defn_do_return",
"tests/native_tests/language.hy::test_defn_dunder_name",
"tests/native_tests/language.hy::test_mangles",
"tests/native_tests/language.hy::test_fn_return",
"tests/native_tests/language.hy::test_if_mangler",
"tests/native_tests/language.hy::test_nested_mangles",
"tests/native_tests/language.hy::test_symbol_utf_8",
"tests/native_tests/language.hy::test_symbol_dash",
"tests/native_tests/language.hy::test_symbol_question_mark",
"tests/native_tests/language.hy::test_and",
"tests/native_tests/language.hy::test_and_#1151_do",
"tests/native_tests/language.hy::test_and_#1151_for",
"tests/native_tests/language.hy::test_and_#1151_del",
"tests/native_tests/language.hy::test_or",
"tests/native_tests/language.hy::test_or_#1151_do",
"tests/native_tests/language.hy::test_or_#1151_for",
"tests/native_tests/language.hy::test_or_#1151_del",
"tests/native_tests/language.hy::test_xor",
"tests/native_tests/language.hy::test_if_return_branching",
"tests/native_tests/language.hy::test_keyword",
"tests/native_tests/language.hy::test_keyword_clash",
"tests/native_tests/language.hy::test_empty_keyword",
"tests/native_tests/language.hy::test_nested_if",
"tests/native_tests/language.hy::test_eval",
"tests/native_tests/language.hy::test_eval_false",
"tests/native_tests/language.hy::test_eval_globals",
"tests/native_tests/language.hy::test_eval_failure",
"tests/native_tests/language.hy::test_eval_quasiquote",
"tests/native_tests/language.hy::test_import_syntax",
"tests/native_tests/language.hy::test_lambda_keyword_lists",
"tests/native_tests/language.hy::test_key_arguments",
"tests/native_tests/language.hy::test_optional_arguments",
"tests/native_tests/language.hy::test_undefined_name",
"tests/native_tests/language.hy::test_if_in_if",
"tests/native_tests/language.hy::test_try_except_return",
"tests/native_tests/language.hy::test_try_else_return",
"tests/native_tests/language.hy::test_require",
"tests/native_tests/language.hy::test_require_native",
"tests/native_tests/language.hy::test_encoding_nightmares",
"tests/native_tests/language.hy::test_keyword_dict_access",
"tests/native_tests/language.hy::test_break_breaking",
"tests/native_tests/language.hy::test_continue_continuation",
"tests/native_tests/language.hy::test_empty_list",
"tests/native_tests/language.hy::test_string",
"tests/native_tests/language.hy::test_del",
"tests/native_tests/language.hy::test_macroexpand",
"tests/native_tests/language.hy::test_macroexpand_1",
"tests/native_tests/language.hy::test_merge_with",
"tests/native_tests/language.hy::test_calling_module_name",
"tests/native_tests/language.hy::test_attribute_access",
"tests/native_tests/language.hy::test_keyword_quoting",
"tests/native_tests/language.hy::test_only_parse_lambda_list_in_defn",
"tests/native_tests/language.hy::test_read",
"tests/native_tests/language.hy::test_read_str",
"tests/native_tests/language.hy::test_keyword_creation",
"tests/native_tests/language.hy::test_name_conversion",
"tests/native_tests/language.hy::test_keywords",
"tests/native_tests/language.hy::test_keywords_and_macros",
"tests/native_tests/language.hy::test_argument_destr",
"tests/native_tests/native_macros.hy::test_rev_macro",
"tests/native_tests/native_macros.hy::test_macro_kw",
"tests/native_tests/native_macros.hy::test_fn_calling_macro",
"tests/native_tests/native_macros.hy::test_optional_and_unpacking_in_macro",
"tests/native_tests/native_macros.hy::test_midtree_yield",
"tests/native_tests/native_macros.hy::test_midtree_yield_in_for",
"tests/native_tests/native_macros.hy::test_midtree_yield_in_while",
"tests/native_tests/native_macros.hy::test_multi_yield",
"tests/native_tests/native_macros.hy::test_if_python2",
"tests/native_tests/native_macros.hy::test_gensym_in_macros",
"tests/native_tests/native_macros.hy::test_with_gensym",
"tests/native_tests/native_macros.hy::test_defmacro_g_bang",
"tests/native_tests/native_macros.hy::test_defmacro_bang",
"tests/native_tests/native_macros.hy::test_if_not",
"tests/native_tests/native_macros.hy::test_lif",
"tests/native_tests/native_macros.hy::test_lif_not",
"tests/native_tests/native_macros.hy::test_defmain",
"tests/native_tests/operators.hy::test_operator_+_real",
"tests/native_tests/operators.hy::test_operator_+_shadow",
"tests/native_tests/operators.hy::test_operator_-_real",
"tests/native_tests/operators.hy::test_operator_-_shadow",
"tests/native_tests/operators.hy::test_operator_*_real",
"tests/native_tests/operators.hy::test_operator_*_shadow",
"tests/native_tests/operators.hy::test_operator_**_real",
"tests/native_tests/operators.hy::test_operator_**_shadow",
"tests/native_tests/operators.hy::test_operator_/_real",
"tests/native_tests/operators.hy::test_operator_/_shadow",
"tests/native_tests/operators.hy::test_operator_//_real",
"tests/native_tests/operators.hy::test_operator_//_shadow",
"tests/native_tests/operators.hy::test_operator_%_real",
"tests/native_tests/operators.hy::test_operator_%_shadow",
"tests/native_tests/operators.hy::test_operator_@_real",
"tests/native_tests/operators.hy::test_operator_@_shadow",
"tests/native_tests/operators.hy::test_operator_<<_real",
"tests/native_tests/operators.hy::test_operator_<<_shadow",
"tests/native_tests/operators.hy::test_operator_>>_real",
"tests/native_tests/operators.hy::test_operator_>>_shadow",
"tests/native_tests/operators.hy::test_operator_&_real",
"tests/native_tests/operators.hy::test_operator_&_shadow",
"tests/native_tests/operators.hy::test_operator_|_real",
"tests/native_tests/operators.hy::test_operator_|_shadow",
"tests/native_tests/operators.hy::test_operator_^_real",
"tests/native_tests/operators.hy::test_operator_^_shadow",
"tests/native_tests/operators.hy::test_operator_~_real",
"tests/native_tests/operators.hy::test_operator_~_shadow",
"tests/native_tests/operators.hy::test_operator_<_real",
"tests/native_tests/operators.hy::test_operator_<_shadow",
"tests/native_tests/operators.hy::test_operator_>_real",
"tests/native_tests/operators.hy::test_operator_>_shadow",
"tests/native_tests/operators.hy::test_operator_<=_real",
"tests/native_tests/operators.hy::test_operator_<=_shadow",
"tests/native_tests/operators.hy::test_operator_>=_real",
"tests/native_tests/operators.hy::test_operator_>=_shadow",
"tests/native_tests/operators.hy::test_operator_=_real",
"tests/native_tests/operators.hy::test_operator_=_shadow",
"tests/native_tests/operators.hy::test_operator_is_real",
"tests/native_tests/operators.hy::test_operator_is_shadow",
"tests/native_tests/operators.hy::test_operator_!=_real",
"tests/native_tests/operators.hy::test_operator_!=_shadow",
"tests/native_tests/operators.hy::test_operator_is_not_real",
"tests/native_tests/operators.hy::test_operator_is_not_shadow",
"tests/native_tests/operators.hy::test_operator_and_real",
"tests/native_tests/operators.hy::test_operator_and_shadow",
"tests/native_tests/operators.hy::test_operator_or_real",
"tests/native_tests/operators.hy::test_operator_or_shadow",
"tests/native_tests/operators.hy::test_operator_not_real",
"tests/native_tests/operators.hy::test_operator_not_shadow",
"tests/native_tests/operators.hy::test_operator_in_real",
"tests/native_tests/operators.hy::test_operator_in_shadow",
"tests/native_tests/operators.hy::test_operator_not_in_real",
"tests/native_tests/operators.hy::test_operator_not_in_shadow",
"tests/native_tests/py35_only_tests.hy::test_unpacking_pep448_1star",
"tests/native_tests/py35_only_tests.hy::test_unpacking_pep448_2star",
"tests/native_tests/py3_only_tests.hy::test_exception_cause",
"tests/native_tests/py3_only_tests.hy::test_kwonly",
"tests/native_tests/py3_only_tests.hy::test_extended_unpacking_1star_lvalues",
"tests/native_tests/py3_only_tests.hy::test_yield_from",
"tests/native_tests/py3_only_tests.hy::test_yield_from_exception_handling",
"tests/native_tests/tag_macros.hy::test_tag_macro",
"tests/native_tests/tag_macros.hy::test_long_tag_macro",
"tests/native_tests/tag_macros.hy::test_hyphenated_tag_macro",
"tests/native_tests/tag_macros.hy::test_tag_macro_whitespace",
"tests/native_tests/tag_macros.hy::test_tag_macro_expr",
"tests/native_tests/tag_macros.hy::test_tag_macro_override",
"tests/native_tests/tag_macros.hy::test_tag_macros_macros",
"tests/native_tests/tag_macros.hy::test_tag_macro_string_name",
"tests/native_tests/tag_macros.hy::test_builtin_decorator_tag",
"tests/test_bin.py::test_bin_hy",
"tests/test_bin.py::test_bin_hy_stdin_multiline",
"tests/test_bin.py::test_bin_hy_stdin_comments",
"tests/test_bin.py::test_bin_hy_stdin_assignment",
"tests/test_bin.py::test_bin_hy_stdin_as_arrow",
"tests/test_bin.py::test_bin_hy_stdin_error_underline_alignment",
"tests/test_bin.py::test_bin_hy_stdin_except_do",
"tests/test_bin.py::test_bin_hy_stdin_hy_repr",
"tests/test_bin.py::test_bin_hy_cmd",
"tests/test_bin.py::test_bin_hy_icmd",
"tests/test_bin.py::test_bin_hy_icmd_file",
"tests/test_bin.py::test_bin_hy_missing_file",
"tests/test_bin.py::test_bin_hy_file_with_args",
"tests/test_bin.py::test_bin_hyc",
"tests/test_bin.py::test_bin_hyc_missing_file",
"tests/test_bin.py::test_hy2py",
"tests/test_bin.py::test_bin_hy_builtins",
"tests/test_bin.py::test_bin_hy_main",
"tests/test_bin.py::test_bin_hy_main_args",
"tests/test_bin.py::test_bin_hy_main_exitvalue",
"tests/test_bin.py::test_bin_hy_no_main",
"tests/test_bin.py::test_bin_hy_byte_compile[hy",
"tests/test_bin.py::test_bin_hy_module_main",
"tests/test_bin.py::test_bin_hy_module_main_args",
"tests/test_bin.py::test_bin_hy_module_main_exitvalue",
"tests/test_bin.py::test_bin_hy_module_no_main"
]
| [
"tests/native_tests/language.hy::test_disassemble",
"tests/test_bin.py::test_bin_hy_stdin",
"tests/test_bin.py::test_bin_hy_icmd_and_spy"
]
| []
| []
| MIT License | 1,472 | [
"hy/contrib/multi.hy",
"hy/core/macros.hy",
"hy/lex/lexer.py",
"hy/compiler.py",
"NEWS",
"hy/contrib/profile.hy",
"hy/core/language.hy",
"hy/contrib/hy_repr.hy",
"hy/lex/parser.py",
"docs/tutorial.rst",
"docs/language/core.rst",
"docs/language/api.rst",
"hy/contrib/loop.hy"
]
| [
"hy/contrib/multi.hy",
"hy/core/macros.hy",
"hy/lex/lexer.py",
"hy/compiler.py",
"NEWS",
"hy/contrib/profile.hy",
"hy/core/language.hy",
"hy/contrib/hy_repr.hy",
"hy/lex/parser.py",
"docs/tutorial.rst",
"docs/language/core.rst",
"docs/language/api.rst",
"hy/contrib/loop.hy"
]
|
drdoctr__doctr-225 | adcb6b2106ee463af488a901f9aac646e23b6853 | 2017-07-16 21:04:11 | 1b6acdb6e6cc5380a2c56f1ab22b67216176e482 | diff --git a/.travis.yml b/.travis.yml
index c621f5a3..13a07f42 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -51,6 +51,8 @@ script:
# GitHub Wiki deploy
echo "This page was automatically deployed by doctr on $(date)" > deploy-test.md;
python -m doctr deploy --sync --key-path deploy_key.enc --no-require-master --deploy-repo drdoctr/doctr.wiki . --built-docs deploy-test.md;
+ # Build on tags
+ python -m doctr deploy --sync --key-path deploy_key.enc "tag-$TRAVIS_TAG" --build-tags --branch-whitelist;
fi
- if [[ "${TESTS}" == "true" ]]; then
pyflakes doctr;
diff --git a/docs/recipes.rst b/docs/recipes.rst
index a9459026..756a7c06 100644
--- a/docs/recipes.rst
+++ b/docs/recipes.rst
@@ -41,6 +41,35 @@ For security purposes, it is not possible to deploy from branches on forks
requests from forks). If you want to deploy the docs for a branch from a pull
request, you will need to push it up to the main repository.
+Deploy docs from git tags
+=========================
+
+Travis CI runs separate builds for git tags that are pushed to your repo. By
+default, doctr does not deploy on these builds, but it can be enabled with the
+``--build-tags`` flag to ``doctr deploy``. This is useful if you want to use
+doctr to deploy versioned docs for releases, for example.
+
+On Travis CI, the tag is set to the environment variable ``$TRAVIS_TAG``,
+which is empty otherwise. The following will deploy the docs to ``dev`` for
+normal ``master`` builds, and ``version-<TAG NAME>`` for tag builds:
+
+.. code:: yaml
+
+ - if [[ -z "$TRAVIS_TAG" ]]; then
+ DEPLOY_DIR=dev;
+ else
+ DEPLOY_DIR="version-$TRAVIS_TAG";
+ fi
+ - doctr deploy --build-tags --built-docs build/ $DEPLOY_DIR
+
+If you want to deploy only on a tag, use ``--branch-whitelist`` with no
+arguments to tell doctr to not deploy from any branch. For instance, to deploy
+only tags to ``latest``:
+
+.. code:: yaml
+
+ - doctr deploy latest --built-docs build/ --build-tags --branch-whitelist
+
Deploy to a separate repo
=========================
diff --git a/doctr/__main__.py b/doctr/__main__.py
index 524dacc8..befc05a2 100644
--- a/doctr/__main__.py
+++ b/doctr/__main__.py
@@ -145,6 +145,10 @@ options available.
help=argparse.SUPPRESS)
deploy_parser_add_argument('--deploy-repo', default=None, help="""Repo to
deploy the docs to. By default, it deploys to the repo Doctr is run from.""")
+ deploy_parser_add_argument('--branch-whitelist', default=None, nargs='*',
+ help="""Branches to deploy from. Pass no arguments to not build on any branch
+ (typically used in conjunction with --build-tags). Note that you can
+ deploy from every branch with --no-require-master.""", type=set, metavar="BRANCH")
deploy_parser_add_argument('--no-require-master', dest='require_master', action='store_false',
default=True, help="""Allow docs to be pushed from a branch other than master""")
deploy_parser_add_argument('--command', default=None,
@@ -163,6 +167,11 @@ options available.
deploy_parser_add_argument('--no-push', dest='push', action='store_false',
default=True, help="Run all the steps except the last push step. "
"Useful for debugging")
+ deploy_parser_add_argument('--build-tags', action='store_true',
+ default=False, help="""Deploy on tag builds. On a tag build,
+ $TRAVIS_TAG is set to the name of the tag. The default is to not
+ deploy on tag builds. Note that this will still build on a branch,
+ unless --branch-whitelist (with no arguments) is passed.""")
deploy_parser_add_argument('--gh-pages-docs', default=None,
help="""!!DEPRECATED!! Directory to deploy the html documentation to on gh-pages.
The default is %(default)r. The deploy directory should be passed as
@@ -273,13 +282,19 @@ def deploy(args, parser):
current_commit = subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode('utf-8').strip()
try:
- branch_whitelist = {'master'} if args.require_master else set(get_travis_branch())
+ branch_whitelist = set() if args.require_master else set(get_travis_branch())
branch_whitelist.update(set(config.get('branches',set({}))))
+ if args.branch_whitelist is not None:
+ branch_whitelist.update(args.branch_whitelist)
+ if not args.branch_whitelist:
+ branch_whitelist = {'master'}
canpush = setup_GitHub_push(deploy_repo, deploy_branch=deploy_branch,
auth_type='token' if args.token else 'deploy_key',
full_key_path=keypath,
- branch_whitelist=branch_whitelist, env_name=env_name)
+ branch_whitelist=branch_whitelist,
+ build_tags=args.build_tags,
+ env_name=env_name)
if args.sync:
built_docs = args.built_docs or find_sphinx_build_dir()
diff --git a/doctr/travis.py b/doctr/travis.py
index 557c4a9d..9682022c 100644
--- a/doctr/travis.py
+++ b/doctr/travis.py
@@ -183,8 +183,10 @@ def get_travis_branch():
else:
return os.environ.get("TRAVIS_BRANCH", "")
-def setup_GitHub_push(deploy_repo, auth_type='deploy_key', full_key_path='github_deploy_key.enc',
- require_master=None, branch_whitelist=None, deploy_branch='gh-pages', env_name='DOCTR_DEPLOY_ENCRYPTION_KEY'):
+def setup_GitHub_push(deploy_repo, *, auth_type='deploy_key',
+ full_key_path='github_deploy_key.enc', require_master=None,
+ branch_whitelist=None, deploy_branch='gh-pages',
+ env_name='DOCTR_DEPLOY_ENCRYPTION_KEY', build_tags=False):
"""
Setup the remote to push to GitHub (to be run on Travis).
@@ -196,6 +198,8 @@ def setup_GitHub_push(deploy_repo, auth_type='deploy_key', full_key_path='github
For ``auth_type='deploy_key'``, this sets up the remote with ssh access.
"""
+ # Set to the name of the tag for tag builds
+ TRAVIS_TAG = os.environ.get("TRAVIS_TAG", "")
if not branch_whitelist:
branch_whitelist={'master'}
@@ -213,8 +217,12 @@ def setup_GitHub_push(deploy_repo, auth_type='deploy_key', full_key_path='github
TRAVIS_BRANCH = os.environ.get("TRAVIS_BRANCH", "")
TRAVIS_PULL_REQUEST = os.environ.get("TRAVIS_PULL_REQUEST", "")
- canpush = determine_push_rights(branch_whitelist, TRAVIS_BRANCH,
- TRAVIS_PULL_REQUEST)
+ canpush = determine_push_rights(
+ branch_whitelist=branch_whitelist,
+ TRAVIS_BRANCH=TRAVIS_BRANCH,
+ TRAVIS_PULL_REQUEST=TRAVIS_PULL_REQUEST,
+ TRAVIS_TAG=TRAVIS_TAG,
+ build_tags=build_tags)
print("Setting git attributes")
set_git_user_email()
@@ -441,6 +449,9 @@ def commit_docs(*, added, removed):
TRAVIS_COMMIT = os.environ.get("TRAVIS_COMMIT", "<unknown>")
TRAVIS_REPO_SLUG = os.environ.get("TRAVIS_REPO_SLUG", "<unknown>")
TRAVIS_JOB_ID = os.environ.get("TRAVIS_JOB_ID", "")
+ TRAVIS_TAG = os.environ.get("TRAVIS_TAG", "")
+ branch = "tag" if TRAVIS_TAG else "branch"
+
DOCTR_COMMAND = ' '.join(map(shlex.quote, sys.argv))
for f in added:
@@ -452,7 +463,7 @@ def commit_docs(*, added, removed):
Update docs after building Travis build {TRAVIS_BUILD_NUMBER} of
{TRAVIS_REPO_SLUG}
-The docs were built from the branch '{TRAVIS_BRANCH}' against the commit
+The docs were built from the {branch} '{TRAVIS_BRANCH}' against the commit
{TRAVIS_COMMIT}.
The Travis build that generated this commit is at
@@ -462,6 +473,7 @@ The doctr command that was run is
{DOCTR_COMMAND}
""".format(
+ branch=branch,
TRAVIS_BUILD_NUMBER=TRAVIS_BUILD_NUMBER,
TRAVIS_BRANCH=TRAVIS_BRANCH,
TRAVIS_COMMIT=TRAVIS_COMMIT,
@@ -504,12 +516,18 @@ def push_docs(deploy_branch='gh-pages', retries=3):
return
sys.exit("Giving up...")
-def determine_push_rights(branch_whitelist, TRAVIS_BRANCH, TRAVIS_PULL_REQUEST):
+def determine_push_rights(*, branch_whitelist, TRAVIS_BRANCH,
+ TRAVIS_PULL_REQUEST, TRAVIS_TAG, build_tags):
"""Check if Travis is running on ``master`` (or a whitelisted branch) to
determine if we can/should push the docs to the deploy repo
"""
canpush = True
+ if TRAVIS_TAG:
+ if not build_tags:
+ print("The docs are not pushed on tag builds. To push on future tag builds, use --build-tags")
+ return build_tags
+
if not any([re.compile(x).match(TRAVIS_BRANCH) for x in branch_whitelist]):
print("The docs are only pushed to gh-pages from master. To allow pushing from "
"a non-master branch, use the --no-require-master flag", file=sys.stderr)
| Ability to do something different on a tag
This tool is mainly designed for uploading dev docs, but it can also be used to upload release docs. In that case, we might need some way to do something different on a tag.
Or maybe the logic for uploading on a tag should just go in the individual .travis.yml files, in which case we can at least document what it could look like.
| drdoctr/doctr | diff --git a/doctr/tests/test_travis.py b/doctr/tests/test_travis.py
index 6a4c3bbe..136ddeaf 100644
--- a/doctr/tests/test_travis.py
+++ b/doctr/tests/test_travis.py
@@ -144,16 +144,51 @@ def test_sync_from_log(src, dst):
os.chdir(old_curdir)
[email protected]("travis_branch, travis_pr, whitelist, canpush",
- [('doctr', 'true', 'master', False),
- ('doctr', 'false', 'master', False),
- ('master', 'true', 'master', False),
- ('master', 'false', 'master', True),
- ('doctr', 'True', 'doctr', False),
- ('doctr', 'false', 'doctr', True),
- ('doctr', 'false', 'set()', False),
- ])
-def test_determine_push_rights(travis_branch, travis_pr, whitelist, canpush, monkeypatch):
- branch_whitelist = {whitelist}
[email protected]("""branch_whitelist, TRAVIS_BRANCH,
+ TRAVIS_PULL_REQUEST, TRAVIS_TAG, build_tags,
+ canpush""",
+ [
+
+ ('master', 'doctr', 'true', "", False, False),
+ ('master', 'doctr', 'false', "", False, False),
+ ('master', 'master', 'true', "", False, False),
+ ('master', 'master', 'false', "", False, True),
+ ('doctr', 'doctr', 'True', "", False, False),
+ ('doctr', 'doctr', 'false', "", False, True),
+ ('set()', 'doctr', 'false', "", False, False),
+
+ ('master', 'doctr', 'true', "tagname", False, False),
+ ('master', 'doctr', 'false', "tagname", False, False),
+ ('master', 'master', 'true', "tagname", False, False),
+ ('master', 'master', 'false', "tagname", False, False),
+ ('doctr', 'doctr', 'True', "tagname", False, False),
+ ('doctr', 'doctr', 'false', "tagname", False, False),
+ ('set()', 'doctr', 'false', "tagname", False, False),
+
+ ('master', 'doctr', 'true', "", True, False),
+ ('master', 'doctr', 'false', "", True, False),
+ ('master', 'master', 'true', "", True, False),
+ ('master', 'master', 'false', "", True, True),
+ ('doctr', 'doctr', 'True', "", True, False),
+ ('doctr', 'doctr', 'false', "", True, True),
+ ('set()', 'doctr', 'false', "", True, False),
+
+ ('master', 'doctr', 'true', "tagname", True, True),
+ ('master', 'doctr', 'false', "tagname", True, True),
+ ('master', 'master', 'true', "tagname", True, True),
+ ('master', 'master', 'false', "tagname", True, True),
+ ('doctr', 'doctr', 'True', "tagname", True, True),
+ ('doctr', 'doctr', 'false', "tagname", True, True),
+ ('set()', 'doctr', 'false', "tagname", True, True),
- assert determine_push_rights(branch_whitelist, travis_branch, travis_pr) == canpush
+ ])
+def test_determine_push_rights(branch_whitelist, TRAVIS_BRANCH,
+ TRAVIS_PULL_REQUEST, TRAVIS_TAG, build_tags, canpush, monkeypatch):
+ branch_whitelist = {branch_whitelist}
+
+ assert determine_push_rights(
+ branch_whitelist=branch_whitelist,
+ TRAVIS_BRANCH=TRAVIS_BRANCH,
+ TRAVIS_PULL_REQUEST=TRAVIS_PULL_REQUEST,
+ TRAVIS_TAG=TRAVIS_TAG,
+ build_tags=build_tags) == canpush
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 4
} | 1.6 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
cffi==1.17.1
charset-normalizer==3.4.1
cryptography==44.0.2
-e git+https://github.com/drdoctr/doctr.git@adcb6b2106ee463af488a901f9aac646e23b6853#egg=doctr
exceptiongroup==1.2.2
idna==3.10
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
pycparser==2.22
pytest==8.3.5
PyYAML==6.0.2
requests==2.32.3
tomli==2.2.1
urllib3==2.3.0
| name: doctr
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- cffi==1.17.1
- charset-normalizer==3.4.1
- cryptography==44.0.2
- exceptiongroup==1.2.2
- idna==3.10
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pycparser==2.22
- pytest==8.3.5
- pyyaml==6.0.2
- requests==2.32.3
- tomli==2.2.1
- urllib3==2.3.0
prefix: /opt/conda/envs/doctr
| [
"doctr/tests/test_travis.py::test_determine_push_rights[master-doctr-true--False-False]",
"doctr/tests/test_travis.py::test_determine_push_rights[master-doctr-false--False-False]",
"doctr/tests/test_travis.py::test_determine_push_rights[master-master-true--False-False]",
"doctr/tests/test_travis.py::test_determine_push_rights[master-master-false--False-True]",
"doctr/tests/test_travis.py::test_determine_push_rights[doctr-doctr-True--False-False]",
"doctr/tests/test_travis.py::test_determine_push_rights[doctr-doctr-false--False-True]",
"doctr/tests/test_travis.py::test_determine_push_rights[set()-doctr-false--False-False]",
"doctr/tests/test_travis.py::test_determine_push_rights[master-doctr-true-tagname-False-False]",
"doctr/tests/test_travis.py::test_determine_push_rights[master-doctr-false-tagname-False-False]",
"doctr/tests/test_travis.py::test_determine_push_rights[master-master-true-tagname-False-False]",
"doctr/tests/test_travis.py::test_determine_push_rights[master-master-false-tagname-False-False]",
"doctr/tests/test_travis.py::test_determine_push_rights[doctr-doctr-True-tagname-False-False]",
"doctr/tests/test_travis.py::test_determine_push_rights[doctr-doctr-false-tagname-False-False]",
"doctr/tests/test_travis.py::test_determine_push_rights[set()-doctr-false-tagname-False-False]",
"doctr/tests/test_travis.py::test_determine_push_rights[master-doctr-true--True-False]",
"doctr/tests/test_travis.py::test_determine_push_rights[master-doctr-false--True-False]",
"doctr/tests/test_travis.py::test_determine_push_rights[master-master-true--True-False]",
"doctr/tests/test_travis.py::test_determine_push_rights[master-master-false--True-True]",
"doctr/tests/test_travis.py::test_determine_push_rights[doctr-doctr-True--True-False]",
"doctr/tests/test_travis.py::test_determine_push_rights[doctr-doctr-false--True-True]",
"doctr/tests/test_travis.py::test_determine_push_rights[set()-doctr-false--True-False]",
"doctr/tests/test_travis.py::test_determine_push_rights[master-doctr-true-tagname-True-True]",
"doctr/tests/test_travis.py::test_determine_push_rights[master-doctr-false-tagname-True-True]",
"doctr/tests/test_travis.py::test_determine_push_rights[master-master-true-tagname-True-True]",
"doctr/tests/test_travis.py::test_determine_push_rights[master-master-false-tagname-True-True]",
"doctr/tests/test_travis.py::test_determine_push_rights[doctr-doctr-True-tagname-True-True]",
"doctr/tests/test_travis.py::test_determine_push_rights[doctr-doctr-false-tagname-True-True]",
"doctr/tests/test_travis.py::test_determine_push_rights[set()-doctr-false-tagname-True-True]"
]
| []
| [
"doctr/tests/test_travis.py::test_sync_from_log[.-src]",
"doctr/tests/test_travis.py::test_sync_from_log[dst-src]"
]
| []
| MIT License | 1,473 | [
"docs/recipes.rst",
".travis.yml",
"doctr/__main__.py",
"doctr/travis.py"
]
| [
"docs/recipes.rst",
".travis.yml",
"doctr/__main__.py",
"doctr/travis.py"
]
|
|
asottile__add-trailing-comma-20 | e6cfc6a9976fc305b0054b30995b5407fea833a5 | 2017-07-16 21:34:30 | e6cfc6a9976fc305b0054b30995b5407fea833a5 | asottile: This also removes the early-pruning of things that the ast tells us aren't multiline -- this potentially increases the runtime as it'll now run on more things -- but it makes everything more consistent and should ensure everything resolves in a single pass. | diff --git a/add_trailing_comma.py b/add_trailing_comma.py
index d8f1939..9ef7e2f 100644
--- a/add_trailing_comma.py
+++ b/add_trailing_comma.py
@@ -64,16 +64,12 @@ class FindNodes(ast.NodeVisitor):
self.literals = {}
self.has_new_syntax = False
- def _visit_literal(self, node, key='elts', is_multiline=False, **kwargs):
- orig = node.lineno
-
+ def _visit_literal(self, node, key='elts', **kwargs):
for elt in getattr(node, key):
- if elt.lineno > orig:
- is_multiline = True
if _is_star_arg(elt): # pragma: no cover (PY35+)
self.has_new_syntax = True
- if is_multiline:
+ if getattr(node, key):
key = Offset(node.lineno, node.col_offset)
self.literals[key] = Literal(node, **kwargs)
self.generic_visit(node)
@@ -87,13 +83,10 @@ class FindNodes(ast.NodeVisitor):
self._visit_literal(node, key='values')
def visit_Tuple(self, node):
- # tuples lie about things, so we pretend they are all multiline
- # and tell the later machinery to backtrack
- self._visit_literal(node, is_multiline=True, backtrack=True)
+ # tuples lie about things so we tell the later machiner to backtrack
+ self._visit_literal(node, backtrack=True)
def visit_Call(self, node):
- orig = node.lineno
-
argnodes = node.args + node.keywords
py2_starargs = getattr(node, 'starargs', None)
if py2_starargs: # pragma: no cover (<PY35)
@@ -103,7 +96,6 @@ class FindNodes(ast.NodeVisitor):
argnodes.append(py2_kwargs)
arg_offsets = set()
- is_multiline = False
has_starargs = bool(py2_starargs or py2_kwargs)
for argnode in argnodes:
if (
@@ -115,8 +107,6 @@ class FindNodes(ast.NodeVisitor):
offset = _to_offset(argnode)
# multiline strings have invalid position, ignore them
if offset.utf8_byte_offset != -1: # pragma: no branch (cpy bug)
- if offset.line > orig:
- is_multiline = True
arg_offsets.add(offset)
# If the sole argument is a generator, don't add a trailing comma as
@@ -125,7 +115,7 @@ class FindNodes(ast.NodeVisitor):
len(argnodes) == 1 and isinstance(argnodes[0], ast.GeneratorExp)
)
- if is_multiline and not only_a_generator:
+ if arg_offsets and not only_a_generator:
key = Offset(node.lineno, node.col_offset)
self.calls[key] = Call(node, has_starargs, arg_offsets)
@@ -144,16 +134,12 @@ class FindNodes(ast.NodeVisitor):
getattr(node.args, 'kwonlyargs', None)
)
- orig = node.lineno
- is_multiline = False
offsets = set()
for argnode in node.args.args:
offset = _to_offset(argnode)
- if offset.line > orig:
- is_multiline = True
offsets.add(offset)
- if is_multiline and not has_starargs:
+ if offsets and not has_starargs:
key = Offset(node.lineno, node.col_offset)
self.funcs[key] = Func(node, offsets)
@@ -181,7 +167,7 @@ def _find_simple(first_brace, tokens):
last_brace = i
- # This was not actually a multi-line call, despite the ast telling us that
+ # Check if we're actually multi-line
if tokens[first_brace].line == tokens[last_brace].line:
return
| Two iterations are required to resolve func(multi line string literal)
### input
```python
f('long'
'literal')
```
### output 1
```python
f(
'long'
'literal'
)
```
### output 2
```python
f(
'long'
'literal',
)
```
This _should_ resolve in a single pass | asottile/add-trailing-comma | diff --git a/tests/add_trailing_comma_test.py b/tests/add_trailing_comma_test.py
index 01ee421..450e3a0 100644
--- a/tests/add_trailing_comma_test.py
+++ b/tests/add_trailing_comma_test.py
@@ -257,6 +257,7 @@ def test_noop_tuple_literal_without_braces():
@pytest.mark.parametrize(
'src',
(
+ 'def f(): pass',
'def f(arg1, arg2): pass',
'def f(\n'
' arg1,\n'
@@ -354,6 +355,22 @@ def test_noop_unhugs(src):
' c,\n'
')',
),
+ (
+ 'def f(\n'
+ ' *args): pass',
+
+ 'def f(\n'
+ ' *args\n'
+ '): pass',
+ ),
+ (
+ 'def f(\n'
+ ' **kwargs): pass',
+
+ 'def f(\n'
+ ' **kwargs\n'
+ '): pass',
+ ),
# if there's already a trailing comma, don't add a new one
(
'f(\n'
@@ -493,6 +510,16 @@ def test_noop_unhugs(src):
' ),\n'
')',
),
+ # Regression test for #16
+ (
+ 'x("foo"\n'
+ ' "bar")',
+
+ 'x(\n'
+ ' "foo"\n'
+ ' "bar",\n'
+ ')',
+ ),
),
)
def test_fix_unhugs(src, expected):
@@ -503,23 +530,6 @@ def test_fix_unhugs(src, expected):
@pytest.mark.parametrize(
('src', 'expected'),
(
- # python 2 doesn't give offset information for starargs
- (
- 'def f(\n'
- ' *args): pass',
-
- 'def f(\n'
- ' *args\n'
- '): pass',
- ),
- (
- 'def f(\n'
- ' **kwargs): pass',
-
- 'def f(\n'
- ' **kwargs\n'
- '): pass',
- ),
# python 2 doesn't kwonlyargs
(
'def f(\n'
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 1
},
"num_modified_files": 1
} | 0.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/asottile/add-trailing-comma.git@e6cfc6a9976fc305b0054b30995b5407fea833a5#egg=add_trailing_comma
cfgv==3.4.0
coverage==7.8.0
distlib==0.3.9
exceptiongroup==1.2.2
filelock==3.18.0
flake8==7.2.0
identify==2.6.9
iniconfig==2.1.0
mccabe==0.7.0
nodeenv==1.9.1
packaging==24.2
platformdirs==4.3.7
pluggy==1.5.0
pre_commit==4.2.0
pycodestyle==2.13.0
pyflakes==3.3.2
pytest==8.3.5
PyYAML==6.0.2
tokenize_rt==6.1.0
tomli==2.2.1
virtualenv==20.29.3
| name: add-trailing-comma
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cfgv==3.4.0
- coverage==7.8.0
- distlib==0.3.9
- exceptiongroup==1.2.2
- filelock==3.18.0
- flake8==7.2.0
- identify==2.6.9
- iniconfig==2.1.0
- mccabe==0.7.0
- nodeenv==1.9.1
- packaging==24.2
- platformdirs==4.3.7
- pluggy==1.5.0
- pre-commit==4.2.0
- pycodestyle==2.13.0
- pyflakes==3.3.2
- pytest==8.3.5
- pyyaml==6.0.2
- tokenize-rt==6.1.0
- tomli==2.2.1
- virtualenv==20.29.3
prefix: /opt/conda/envs/add-trailing-comma
| [
"tests/add_trailing_comma_test.py::test_fix_unhugs[x(\"foo\",",
"tests/add_trailing_comma_test.py::test_fix_unhugs[x(\"foo\"\\n"
]
| [
"tests/add_trailing_comma_test.py::test_fixes_literals[x",
"tests/add_trailing_comma_test.py::test_fixes_py35_plus_literals[x",
"tests/add_trailing_comma_test.py::test_fix_unhugs[x"
]
| [
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x(1)]",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[tuple(\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x(\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x((\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[(\\n",
"tests/add_trailing_comma_test.py::test_py35_plus_rewrite",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[(1,",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[[1,",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[{1,",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[{1:",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[y(*args1,",
"tests/add_trailing_comma_test.py::test_auto_detected_py35_plus_rewrite[y(**kwargs1,",
"tests/add_trailing_comma_test.py::test_fixes_calls[x(\\n",
"tests/add_trailing_comma_test.py::test_fixes_calls[foo()(\\n",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[(1,",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[[1,",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[{1,",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[{1:",
"tests/add_trailing_comma_test.py::test_noop_tuple_literal_without_braces",
"tests/add_trailing_comma_test.py::test_noop_function_defs[def",
"tests/add_trailing_comma_test.py::test_fixes_defs[def",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f(x,",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f(\\n",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f((\\n",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f([\\n",
"tests/add_trailing_comma_test.py::test_noop_unhugs[textwrap.dedent(\"\"\"\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(a,\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[def",
"tests/add_trailing_comma_test.py::test_fix_unhugs[with",
"tests/add_trailing_comma_test.py::test_fix_unhugs[if",
"tests/add_trailing_comma_test.py::test_fix_unhugs[{'foo':",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(g(\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[{\"foo\":",
"tests/add_trailing_comma_test.py::test_fix_unhugs_py3_only[def",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[[]]",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[x",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[y",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[foo.\\\\\\n",
"tests/add_trailing_comma_test.py::test_fix_trailing_brace[x",
"tests/add_trailing_comma_test.py::test_main_trivial",
"tests/add_trailing_comma_test.py::test_main_noop",
"tests/add_trailing_comma_test.py::test_main_changes_a_file",
"tests/add_trailing_comma_test.py::test_main_syntax_error",
"tests/add_trailing_comma_test.py::test_main_non_utf8_bytes",
"tests/add_trailing_comma_test.py::test_main_py35_plus_argument_star_args",
"tests/add_trailing_comma_test.py::test_main_py35_plus_argument_star_star_kwargs"
]
| []
| MIT License | 1,474 | [
"add_trailing_comma.py"
]
| [
"add_trailing_comma.py"
]
|
oasis-open__cti-python-stix2-34 | 6f680be8a65028c303bae38bbe1fa0a2d08852a8 | 2017-07-17 13:24:16 | 58f39f80af5cbfe02879c2efa4b3b4ef7a504390 | diff --git a/stix2/__init__.py b/stix2/__init__.py
index 904af9c..90bcf59 100644
--- a/stix2/__init__.py
+++ b/stix2/__init__.py
@@ -4,6 +4,7 @@
from . import exceptions
from .bundle import Bundle
+from .environment import ObjectFactory
from .observables import (URL, AlternateDataStream, ArchiveExt, Artifact,
AutonomousSystem, Directory, DomainName,
EmailAddress, EmailMessage, EmailMIMEComponent, File,
diff --git a/stix2/environment.py b/stix2/environment.py
new file mode 100644
index 0000000..f855755
--- /dev/null
+++ b/stix2/environment.py
@@ -0,0 +1,68 @@
+import copy
+
+
+class ObjectFactory(object):
+ """Object Factory
+
+ Used to easily create STIX objects with default values for certain
+ properties.
+
+ Args:
+ created_by_ref: Default created_by_ref value to apply to all
+ objects created by this factory.
+ created: Default created value to apply to all
+ objects created by this factory.
+ external_references: Default `external_references` value to apply
+ to all objects created by this factory.
+ object_marking_refs: Default `object_marking_refs` value to apply
+ to all objects created by this factory.
+ list_append: When a default is set for a list property like
+ `external_references` or `object_marking_refs` and a value for
+ that property is passed into `create()`, if this is set to True,
+ that value will be added to the list alongside the default. If
+ this is set to False, the passed in value will replace the
+ default. Defaults to True.
+ """
+
+ def __init__(self, created_by_ref=None, created=None,
+ external_references=None, object_marking_refs=None,
+ list_append=True):
+
+ self._defaults = {}
+ if created_by_ref:
+ self._defaults['created_by_ref'] = created_by_ref
+ if created:
+ self._defaults['created'] = created
+ # If the user provides a default "created" time, we also want to use
+ # that as the modified time.
+ self._defaults['modified'] = created
+ if external_references:
+ self._defaults['external_references'] = external_references
+ if object_marking_refs:
+ self._defaults['object_marking_refs'] = object_marking_refs
+ self._list_append = list_append
+ self._list_properties = ['external_references', 'object_marking_refs']
+
+ def create(self, cls, **kwargs):
+ # Use self.defaults as the base, but update with any explicit args
+ # provided by the user.
+ properties = copy.deepcopy(self._defaults)
+ if kwargs:
+ if self._list_append:
+ # Append provided items to list properties instead of replacing them
+ for list_prop in set(self._list_properties).intersection(kwargs.keys(), properties.keys()):
+ kwarg_prop = kwargs.pop(list_prop)
+ if kwarg_prop is None:
+ del properties[list_prop]
+ continue
+ if not isinstance(properties[list_prop], list):
+ properties[list_prop] = [properties[list_prop]]
+
+ if isinstance(kwarg_prop, list):
+ properties[list_prop].extend(kwarg_prop)
+ else:
+ properties[list_prop].append(kwarg_prop)
+
+ properties.update(**kwargs)
+
+ return cls(**properties)
diff --git a/stix2/other.py b/stix2/other.py
index 51663b3..cd75745 100644
--- a/stix2/other.py
+++ b/stix2/other.py
@@ -69,7 +69,7 @@ class MarkingProperty(Property):
class MarkingDefinition(_STIXBase):
_type = 'marking-definition'
_properties = {
- 'created': TimestampProperty(default=lambda: NOW, required=True),
+ 'created': TimestampProperty(default=lambda: NOW),
'external_references': ListProperty(ExternalReference),
'created_by_ref': ReferenceProperty(type="identity"),
'object_marking_refs': ListProperty(ReferenceProperty(type="marking-definition")),
diff --git a/stix2/properties.py b/stix2/properties.py
index 80e5345..71e4bd9 100644
--- a/stix2/properties.py
+++ b/stix2/properties.py
@@ -5,7 +5,7 @@ import inspect
import re
import uuid
-from six import text_type
+from six import string_types, text_type
from .base import _Observable, _STIXBase
from .exceptions import DictionaryKeyError
@@ -101,12 +101,9 @@ class ListProperty(Property):
iter(value)
except TypeError:
raise ValueError("must be an iterable.")
- try:
- if isinstance(value, basestring):
- value = [value]
- except NameError:
- if isinstance(value, str):
- value = [value]
+
+ if isinstance(value, (_STIXBase, string_types)):
+ value = [value]
result = []
for item in value:
| Create an Object Factory class
This would let users specify defaults for some common properties and easily create STIX objects without needing to specify them every single time; the objects would automatically be created with the default value.
For example, you could specify a default `created_by_ref` value to point to the Identity SDO for your organization. | oasis-open/cti-python-stix2 | diff --git a/stix2/test/constants.py b/stix2/test/constants.py
index b631d08..958120b 100644
--- a/stix2/test/constants.py
+++ b/stix2/test/constants.py
@@ -20,6 +20,12 @@ TOOL_ID = "tool--8e2e2d2b-17d4-4cbf-938f-98ee46b3cd3f"
SIGHTING_ID = "sighting--bfbc19db-ec35-4e45-beed-f8bde2a772fb"
VULNERABILITY_ID = "vulnerability--0c7b5b88-8ff7-4a4d-aa9d-feb398cd0061"
+# Minimum required args for an Identity instance
+IDENTITY_KWARGS = dict(
+ name="John Smith",
+ identity_class="individual",
+)
+
# Minimum required args for an Indicator instance
INDICATOR_KWARGS = dict(
labels=['malicious-activity'],
diff --git a/stix2/test/test_environment.py b/stix2/test/test_environment.py
new file mode 100644
index 0000000..9be8101
--- /dev/null
+++ b/stix2/test/test_environment.py
@@ -0,0 +1,83 @@
+import stix2
+
+from .constants import (FAKE_TIME, IDENTITY_ID, IDENTITY_KWARGS,
+ INDICATOR_KWARGS)
+
+
+def test_object_factory_created_by_ref_str():
+ factory = stix2.ObjectFactory(created_by_ref=IDENTITY_ID)
+ ind = factory.create(stix2.Indicator, **INDICATOR_KWARGS)
+ assert ind.created_by_ref == IDENTITY_ID
+
+
+def test_object_factory_created_by_ref_obj():
+ id_obj = stix2.Identity(id=IDENTITY_ID, **IDENTITY_KWARGS)
+ factory = stix2.ObjectFactory(created_by_ref=id_obj)
+ ind = factory.create(stix2.Indicator, **INDICATOR_KWARGS)
+ assert ind.created_by_ref == IDENTITY_ID
+
+
+def test_object_factory_override_default():
+ factory = stix2.ObjectFactory(created_by_ref=IDENTITY_ID)
+ new_id = "identity--983b3172-44fe-4a80-8091-eb8098841fe8"
+ ind = factory.create(stix2.Indicator, created_by_ref=new_id, **INDICATOR_KWARGS)
+ assert ind.created_by_ref == new_id
+
+
+def test_object_factory_created():
+ factory = stix2.ObjectFactory(created=FAKE_TIME)
+ ind = factory.create(stix2.Indicator, **INDICATOR_KWARGS)
+ assert ind.created == FAKE_TIME
+ assert ind.modified == FAKE_TIME
+
+
+def test_object_factory_external_resource():
+ ext_ref = stix2.ExternalReference(source_name="ACME Threat Intel",
+ description="Threat report")
+ factory = stix2.ObjectFactory(external_references=ext_ref)
+ ind = factory.create(stix2.Indicator, **INDICATOR_KWARGS)
+ assert ind.external_references[0].source_name == "ACME Threat Intel"
+ assert ind.external_references[0].description == "Threat report"
+
+ ind2 = factory.create(stix2.Indicator, external_references=None, **INDICATOR_KWARGS)
+ assert 'external_references' not in ind2
+
+
+def test_object_factory_obj_markings():
+ stmt_marking = stix2.StatementMarking("Copyright 2016, Example Corp")
+ mark_def = stix2.MarkingDefinition(definition_type="statement",
+ definition=stmt_marking)
+ factory = stix2.ObjectFactory(object_marking_refs=[mark_def, stix2.TLP_AMBER])
+ ind = factory.create(stix2.Indicator, **INDICATOR_KWARGS)
+ assert mark_def.id in ind.object_marking_refs
+ assert stix2.TLP_AMBER.id in ind.object_marking_refs
+
+ factory = stix2.ObjectFactory(object_marking_refs=stix2.TLP_RED)
+ ind = factory.create(stix2.Indicator, **INDICATOR_KWARGS)
+ assert stix2.TLP_RED.id in ind.object_marking_refs
+
+
+def test_object_factory_list_append():
+ ext_ref = stix2.ExternalReference(source_name="ACME Threat Intel",
+ description="Threat report from ACME")
+ ext_ref2 = stix2.ExternalReference(source_name="Yet Another Threat Report",
+ description="Threat report from YATR")
+ ext_ref3 = stix2.ExternalReference(source_name="Threat Report #3",
+ description="One more threat report")
+ factory = stix2.ObjectFactory(external_references=ext_ref)
+ ind = factory.create(stix2.Indicator, external_references=ext_ref2, **INDICATOR_KWARGS)
+ assert ind.external_references[1].source_name == "Yet Another Threat Report"
+
+ ind = factory.create(stix2.Indicator, external_references=[ext_ref2, ext_ref3], **INDICATOR_KWARGS)
+ assert ind.external_references[2].source_name == "Threat Report #3"
+
+
+def test_object_factory_list_replace():
+ ext_ref = stix2.ExternalReference(source_name="ACME Threat Intel",
+ description="Threat report from ACME")
+ ext_ref2 = stix2.ExternalReference(source_name="Yet Another Threat Report",
+ description="Threat report from YATR")
+ factory = stix2.ObjectFactory(external_references=ext_ref, list_append=False)
+ ind = factory.create(stix2.Indicator, external_references=ext_ref2, **INDICATOR_KWARGS)
+ assert len(ind.external_references) == 1
+ assert ind.external_references[0].source_name == "Yet Another Threat Report"
diff --git a/stix2/test/test_markings.py b/stix2/test/test_markings.py
index ebfa480..70d67dd 100644
--- a/stix2/test/test_markings.py
+++ b/stix2/test/test_markings.py
@@ -29,6 +29,19 @@ EXPECTED_STATEMENT_MARKING_DEFINITION = """{
"type": "marking-definition"
}"""
+EXPECTED_CAMPAIGN_WITH_OBJECT_MARKING = """{
+ "created": "2016-04-06T20:03:00.000Z",
+ "created_by_ref": "identity--f431f809-377b-45e0-aa1c-6a4751cae5ff",
+ "description": "Campaign by Green Group against a series of targets in the financial services sector.",
+ "id": "campaign--8e2e2d2b-17d4-4cbf-938f-98ee46b3cd3f",
+ "modified": "2016-04-06T20:03:00.000Z",
+ "name": "Green Group Attacks Against Finance",
+ "object_marking_refs": [
+ "marking-definition--613f2e26-407d-48c7-9eca-b8e91df99dc9"
+ ],
+ "type": "campaign"
+}"""
+
EXPECTED_GRANULAR_MARKING = """{
"marking_ref": "marking-definition--613f2e26-407d-48c7-9eca-b8e91df99dc9",
"selectors": [
@@ -84,6 +97,29 @@ def test_marking_def_example_with_positional_statement():
assert str(marking_definition) == EXPECTED_STATEMENT_MARKING_DEFINITION
+def test_marking_def_invalid_type():
+ with pytest.raises(ValueError):
+ stix2.MarkingDefinition(
+ id="marking-definition--613f2e26-407d-48c7-9eca-b8e91df99dc9",
+ created="2017-01-20T00:00:00.000Z",
+ definition_type="my-definiition-type",
+ definition=stix2.StatementMarking("Copyright 2016, Example Corp")
+ )
+
+
+def test_campaign_with_markings_example():
+ campaign = stix2.Campaign(
+ id="campaign--8e2e2d2b-17d4-4cbf-938f-98ee46b3cd3f",
+ created_by_ref="identity--f431f809-377b-45e0-aa1c-6a4751cae5ff",
+ created="2016-04-06T20:03:00Z",
+ modified="2016-04-06T20:03:00Z",
+ name="Green Group Attacks Against Finance",
+ description="Campaign by Green Group against a series of targets in the financial services sector.",
+ object_marking_refs=TLP_WHITE
+ )
+ assert str(campaign) == EXPECTED_CAMPAIGN_WITH_OBJECT_MARKING
+
+
def test_granular_example():
granular_marking = stix2.GranularMarking(
marking_ref="marking-definition--613f2e26-407d-48c7-9eca-b8e91df99dc9",
@@ -119,7 +155,6 @@ def test_campaign_with_granular_markings_example():
marking_ref="marking-definition--613f2e26-407d-48c7-9eca-b8e91df99dc9",
selectors=["description"])
])
- print(str(campaign))
assert str(campaign) == EXPECTED_CAMPAIGN_WITH_GRANULAR_MARKINGS
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 3
} | 0.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest-cov",
"coverage",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs==22.2.0
Babel==2.11.0
bump2version==1.0.1
bumpversion==0.6.0
certifi==2021.5.30
cfgv==3.3.1
charset-normalizer==2.0.12
coverage==6.2
distlib==0.3.9
docutils==0.18.1
filelock==3.4.1
identify==2.4.4
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
importlib-resources==5.2.3
iniconfig==1.1.1
Jinja2==3.0.3
MarkupSafe==2.0.1
nodeenv==1.6.0
packaging==21.3
platformdirs==2.4.0
pluggy==1.0.0
pre-commit==2.17.0
py==1.11.0
Pygments==2.14.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.1
requests==2.27.1
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-prompt==1.5.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
-e git+https://github.com/oasis-open/cti-python-stix2.git@6f680be8a65028c303bae38bbe1fa0a2d08852a8#egg=stix2
toml==0.10.2
tomli==1.2.3
tox==3.28.0
typing_extensions==4.1.1
urllib3==1.26.20
virtualenv==20.16.2
zipp==3.6.0
| name: cti-python-stix2
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- attrs==22.2.0
- babel==2.11.0
- bump2version==1.0.1
- bumpversion==0.6.0
- cfgv==3.3.1
- charset-normalizer==2.0.12
- coverage==6.2
- distlib==0.3.9
- docutils==0.18.1
- filelock==3.4.1
- identify==2.4.4
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- importlib-resources==5.2.3
- iniconfig==1.1.1
- jinja2==3.0.3
- markupsafe==2.0.1
- nodeenv==1.6.0
- packaging==21.3
- platformdirs==2.4.0
- pluggy==1.0.0
- pre-commit==2.17.0
- py==1.11.0
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.1
- requests==2.27.1
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-prompt==1.5.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- toml==0.10.2
- tomli==1.2.3
- tox==3.28.0
- typing-extensions==4.1.1
- urllib3==1.26.20
- virtualenv==20.16.2
- zipp==3.6.0
prefix: /opt/conda/envs/cti-python-stix2
| [
"stix2/test/test_environment.py::test_object_factory_created_by_ref_str",
"stix2/test/test_environment.py::test_object_factory_created_by_ref_obj",
"stix2/test/test_environment.py::test_object_factory_override_default",
"stix2/test/test_environment.py::test_object_factory_created",
"stix2/test/test_environment.py::test_object_factory_external_resource",
"stix2/test/test_environment.py::test_object_factory_obj_markings",
"stix2/test/test_environment.py::test_object_factory_list_append",
"stix2/test/test_environment.py::test_object_factory_list_replace",
"stix2/test/test_markings.py::test_campaign_with_markings_example"
]
| []
| [
"stix2/test/test_markings.py::test_marking_def_example_with_tlp",
"stix2/test/test_markings.py::test_marking_def_example_with_statement",
"stix2/test/test_markings.py::test_marking_def_example_with_positional_statement",
"stix2/test/test_markings.py::test_marking_def_invalid_type",
"stix2/test/test_markings.py::test_granular_example",
"stix2/test/test_markings.py::test_granular_example_with_bad_selector",
"stix2/test/test_markings.py::test_campaign_with_granular_markings_example",
"stix2/test/test_markings.py::test_parse_marking_definition[{\\n",
"stix2/test/test_markings.py::test_parse_marking_definition[data1]"
]
| []
| BSD 3-Clause "New" or "Revised" License | 1,475 | [
"stix2/environment.py",
"stix2/other.py",
"stix2/properties.py",
"stix2/__init__.py"
]
| [
"stix2/environment.py",
"stix2/other.py",
"stix2/properties.py",
"stix2/__init__.py"
]
|
|
getlogbook__logbook-242 | 7d97a72a848c176108845eda44503a688e0e5042 | 2017-07-17 15:54:05 | 7d97a72a848c176108845eda44503a688e0e5042 | coveralls:
[](https://coveralls.io/builds/12424876)
Coverage decreased (-1.09%) to 72.927% when pulling **ae5be3aa326f82ffbd0d7f6ff9f6c613b1f06d0d on quantopian:master** into **fec1913ddc87841da648018deebdbcbcf0ed9a3f on getlogbook:master**.
coveralls:
[](https://coveralls.io/builds/12424876)
Coverage decreased (-1.09%) to 72.927% when pulling **ae5be3aa326f82ffbd0d7f6ff9f6c613b1f06d0d on quantopian:master** into **fec1913ddc87841da648018deebdbcbcf0ed9a3f on getlogbook:master**.
coveralls:
[](https://coveralls.io/builds/12424876)
Coverage decreased (-1.09%) to 72.927% when pulling **ae5be3aa326f82ffbd0d7f6ff9f6c613b1f06d0d on quantopian:master** into **fec1913ddc87841da648018deebdbcbcf0ed9a3f on getlogbook:master**.
coveralls:
[](https://coveralls.io/builds/12425185)
Coverage increased (+0.1%) to 74.131% when pulling **8ecd7c280037cc8193384249e12354fd4d01cf0e on quantopian:master** into **fec1913ddc87841da648018deebdbcbcf0ed9a3f on getlogbook:master**.
coveralls:
[](https://coveralls.io/builds/12425185)
Coverage increased (+0.1%) to 74.131% when pulling **8ecd7c280037cc8193384249e12354fd4d01cf0e on quantopian:master** into **fec1913ddc87841da648018deebdbcbcf0ed9a3f on getlogbook:master**.
coveralls:
[](https://coveralls.io/builds/12465211)
Coverage increased (+0.1%) to 74.158% when pulling **6be7a6f0f3f552ecdcb10759ca430c1f4f12626d on quantopian:master** into **fec1913ddc87841da648018deebdbcbcf0ed9a3f on getlogbook:master**.
coveralls:
[](https://coveralls.io/builds/12465211)
Coverage increased (+0.1%) to 74.158% when pulling **6be7a6f0f3f552ecdcb10759ca430c1f4f12626d on quantopian:master** into **fec1913ddc87841da648018deebdbcbcf0ed9a3f on getlogbook:master**.
coveralls:
[](https://coveralls.io/builds/12465211)
Coverage increased (+0.1%) to 74.158% when pulling **6be7a6f0f3f552ecdcb10759ca430c1f4f12626d on quantopian:master** into **fec1913ddc87841da648018deebdbcbcf0ed9a3f on getlogbook:master**.
coveralls:
[](https://coveralls.io/builds/12466492)
Coverage increased (+0.1%) to 74.14% when pulling **4185f20938065922ee1ef5e3b6e47839bf2cb5d5 on quantopian:master** into **fec1913ddc87841da648018deebdbcbcf0ed9a3f on getlogbook:master**.
jikamens: Sorry about all the crap commits. I've squashed them into the actual functional commits and repushed.
coveralls:
[](https://coveralls.io/builds/12466850)
Coverage increased (+0.1%) to 74.131% when pulling **169fd69409ab5bf8890dba7b25a643612b6f4d74 on quantopian:master** into **fec1913ddc87841da648018deebdbcbcf0ed9a3f on getlogbook:master**.
coveralls:
[](https://coveralls.io/builds/12466973)
Coverage increased (+0.1%) to 74.131% when pulling **8a8f9456946a3d942935b5455bb9dbec2c001b6f on quantopian:master** into **fec1913ddc87841da648018deebdbcbcf0ed9a3f on getlogbook:master**.
jikquantopian: Ping?
vmalloc: Sorry - was a bit busy lately so didn't get around to it. I commented in two places for clarifications.
Thanks!
vmalloc: It also seems the Travis tests are failing... have any idea why?
jikamens: > It also seems the Travis tests are failing... have any idea why?
I didn't want to spend time working on the unit tests until I know y'all were comfortable with these changes conceptually. Let me know if you are, and I'll dig into why the tests are failing and whether fixes are needed to the tests or to my proposed changes.
vmalloc: @jikamens I think I'm fine with the changes as long as they don't break existing (reasonable) usage, and as long as they make sense logically...
Having said that, once we merge and release it, we could be seeing issues from people already using Logbook having issues with the change. Will you be available to aid in fixes? I have little time to actively commit to this project nowadays...
jikamens: OK, all issues you mentioned are addressed, I added a couple bonus fixes, I rebased against current getlogbook/master, and Travis is passing. And yes, I'll be around to help if there are bug reports or complaints. Please review, and if everything looks good I'll squash and rebase one more time before merge. | diff --git a/CHANGES b/CHANGES
index 7530095..1eb202f 100644
--- a/CHANGES
+++ b/CHANGES
@@ -1,6 +1,12 @@
Logbook Changelog
=================
+Not yet released
+
+- Use correct record delimiters (null for UNIX, newline for network) in SyslogHandler (thanks Jonathan Kamens)
+- Try to reconnect to SyslogHandler TCP sockets when they are disconnected (thanks Jonathan Kamens)
+- Use RFC 5424 format for networking logging in SyslogHandler (thanks Jonathan Kamens)
+
Here you can see the full list of changes between each Logbook release.
Version 1.4.1
diff --git a/logbook/handlers.py b/logbook/handlers.py
index 3533445..72d7ebf 100644
--- a/logbook/handlers.py
+++ b/logbook/handlers.py
@@ -1535,7 +1535,7 @@ class SyslogHandler(Handler, StringFormatterHandlerMixin):
def __init__(self, application_name=None, address=None,
facility='user', socktype=socket.SOCK_DGRAM,
level=NOTSET, format_string=None, filter=None,
- bubble=False):
+ bubble=False, record_delimiter=None):
Handler.__init__(self, level, filter, bubble)
StringFormatterHandlerMixin.__init__(self, format_string)
self.application_name = application_name
@@ -1546,14 +1546,24 @@ class SyslogHandler(Handler, StringFormatterHandlerMixin):
else:
address = '/dev/log'
- self.address = address
+ self.remote_address = self.address = address
self.facility = facility
self.socktype = socktype
if isinstance(address, string_types):
self._connect_unixsocket()
+ self.enveloper = self.unix_envelope
+ default_delimiter = u'\x00'
else:
self._connect_netsocket()
+ self.enveloper = self.net_envelope
+ default_delimiter = u'\n'
+
+ self.record_delimiter = default_delimiter \
+ if record_delimiter is None else record_delimiter
+
+ self.connection_exception = getattr(
+ __builtins__, 'BrokenPipeError', socket.error)
def _connect_unixsocket(self):
self.unixsocket = True
@@ -1569,7 +1579,7 @@ class SyslogHandler(Handler, StringFormatterHandlerMixin):
self.unixsocket = False
self.socket = socket.socket(socket.AF_INET, self.socktype)
if self.socktype == socket.SOCK_STREAM:
- self.socket.connect(self.address)
+ self.socket.connect(self.remote_address)
self.address = self.socket.getsockname()
def encode_priority(self, record):
@@ -1578,15 +1588,44 @@ class SyslogHandler(Handler, StringFormatterHandlerMixin):
self.LOG_WARNING)
return (facility << 3) | priority
- def emit(self, record):
- prefix = u('')
- if self.application_name is not None:
- prefix = self.application_name + u(':')
- self.send_to_socket((u('<%d>%s%s\x00') % (
+ def wrap_segments(self, record, before):
+ msg = self.format(record)
+ segments = [segment for segment in msg.split(self.record_delimiter)]
+ return (before + segment + self.record_delimiter
+ for segment in segments)
+
+ def unix_envelope(self, record):
+ before = u'<{}>{}'.format(
self.encode_priority(record),
- prefix,
- self.format(record)
- )).encode('utf-8'))
+ self.application_name + ':' if self.application_name else '')
+ return self.wrap_segments(record, before)
+
+ def net_envelope(self, record):
+ # Gross but effective
+ try:
+ format_string = self.format_string
+ application_name = self.application_name
+ if not application_name and record.channel and \
+ '{record.channel}: ' in format_string:
+ self.format_string = format_string.replace(
+ '{record.channel}: ', '')
+ self.application_name = record.channel
+ # RFC 5424: <PRIVAL>version timestamp hostname app-name procid
+ # msgid structured-data message
+ before = u'<{}>1 {}Z {} {} {} - - '.format(
+ self.encode_priority(record),
+ record.time.isoformat(),
+ socket.gethostname(),
+ self.application_name if self.application_name else '-',
+ record.process)
+ return self.wrap_segments(record, before)
+ finally:
+ self.format_string = format_string
+ self.application_name = application_name
+
+ def emit(self, record):
+ for segment in self.enveloper(record):
+ self.send_to_socket(segment.encode('utf-8'))
def send_to_socket(self, data):
if self.unixsocket:
@@ -1599,7 +1638,11 @@ class SyslogHandler(Handler, StringFormatterHandlerMixin):
# the flags are no longer optional on Python 3
self.socket.sendto(data, 0, self.address)
else:
- self.socket.sendall(data)
+ try:
+ self.socket.sendall(data)
+ except self.connection_exception:
+ self._connect_netsocket()
+ self.socket.send(data)
def close(self):
self.socket.close()
| Should use newline character, not null, for TCP record terminator
As per https://github.com/getlogbook/logbook/blob/master/logbook/handlers.py#L1430, Logbook marks the end of SyslogHandler records with a null character.
I don't know whether this is correct when sending to a socket, but as far as I can tell, the correct record terminator when sending over TCP is newline, not null, and no terminator should be included at all when sending over UDP.
Because it's using a null character instead of a newline, rsyslogd doesn't properly interpret messages sent to it by Logbook via TCP.
| getlogbook/logbook | diff --git a/tests/test_logging_compat.py b/tests/test_logging_compat.py
index 31fdd40..7964993 100644
--- a/tests/test_logging_compat.py
+++ b/tests/test_logging_compat.py
@@ -33,7 +33,7 @@ def test_basic_compat(request, set_root_logger_level):
with redirected_logging(set_root_logger_level):
logger.debug('This is from the old system')
logger.info('This is from the old system')
- logger.warn('This is from the old %s', 'system')
+ logger.warning('This is from the old %s', 'system')
logger.error('This is from the old system')
logger.critical('This is from the old system')
logger.error('This is a %(what)s %(where)s', {'what': 'mapping', 'where': 'test'})
diff --git a/tests/test_mail_handler.py b/tests/test_mail_handler.py
index fd7730b..718d936 100644
--- a/tests/test_mail_handler.py
+++ b/tests/test_mail_handler.py
@@ -40,11 +40,11 @@ def test_mail_handler(activation_strategy, logger):
header, data = mail.split('\n\n', 1)
if 'Content-Transfer-Encoding: base64' in header:
data = base64.b64decode(data).decode('utf-8')
- assert re.search('Message type:\s+ERROR', data)
- assert re.search('Location:.*%s' %
+ assert re.search(r'Message type:\s+ERROR', data)
+ assert re.search(r'Location:.*%s' %
re.escape(__file_without_pyc__), data)
- assert re.search('Module:\s+%s' % __name__, data)
- assert re.search('Function:\s+test_mail_handler', data)
+ assert re.search(r'Module:\s+%s' % __name__, data)
+ assert re.search(r'Function:\s+test_mail_handler', data)
body = u('Viva la Espa\xf1a')
if sys.version_info < (3, 0):
body = body.encode('utf-8')
@@ -72,14 +72,14 @@ def test_mail_handler_batching(activation_strategy, logger):
body, rest = pieces
rest = rest.replace('\r', '')
- assert re.search('Message type:\s+ERROR', body)
- assert re.search('Module:\s+%s' % __name__, body)
- assert re.search('Function:\s+test_mail_handler_batching', body)
+ assert re.search(r'Message type:\s+ERROR', body)
+ assert re.search(r'Module:\s+%s' % __name__, body)
+ assert re.search(r'Function:\s+test_mail_handler_batching', body)
related = rest.strip().split('\n\n')
assert len(related) == 2
- assert re.search('Message type:\s+WARNING', related[0])
- assert re.search('Message type:\s+DEBUG', related[1])
+ assert re.search(r'Message type:\s+WARNING', related[0])
+ assert re.search(r'Message type:\s+DEBUG', related[1])
assert 'And this triggers it again' in mail_handler.mails[1][2]
@@ -101,14 +101,14 @@ def test_group_handler_mail_combo(activation_strategy, logger):
body, rest = pieces
rest = rest.replace('\r', '')
- assert re.search('Message type:\\s+ERROR', body)
- assert re.search('Module:\s+' + __name__, body)
- assert re.search('Function:\s+test_group_handler_mail_combo', body)
+ assert re.search(r'Message type:\s+ERROR', body)
+ assert re.search(r'Module:\s+' + __name__, body)
+ assert re.search(r'Function:\s+test_group_handler_mail_combo', body)
related = rest.strip().split('\n\n')
assert len(related) == 2
- assert re.search('Message type:\s+WARNING', related[0])
- assert re.search('Message type:\s+DEBUG', related[1])
+ assert re.search(r'Message type:\s+WARNING', related[0])
+ assert re.search(r'Message type:\s+DEBUG', related[1])
def test_mail_handler_arguments():
diff --git a/tests/test_syslog_handler.py b/tests/test_syslog_handler.py
index 9772a2a..d19d3f2 100644
--- a/tests/test_syslog_handler.py
+++ b/tests/test_syslog_handler.py
@@ -1,4 +1,5 @@
import os
+import re
import socket
from contextlib import closing
@@ -7,33 +8,59 @@ from logbook.helpers import u
import pytest
+unix_socket = "/tmp/__unixsock_logbook.test"
-def test_syslog_handler(logger, activation_strategy, unix_sock_path):
- to_test = [
- (socket.AF_INET, ('127.0.0.1', 0)),
- ]
- if hasattr(socket, 'AF_UNIX'):
- to_test.append((socket.AF_UNIX, unix_sock_path))
- for sock_family, address in to_test:
- with closing(socket.socket(sock_family, socket.SOCK_DGRAM)) as inc:
- inc.bind(address)
- inc.settimeout(1)
- for app_name in [None, 'Testing']:
- handler = logbook.SyslogHandler(app_name, inc.getsockname())
- with activation_strategy(handler):
- logger.warn('Syslog is weird')
- try:
+to_test = [
+ (socket.AF_INET, socket.SOCK_DGRAM, ('127.0.0.1', 0)),
+ (socket.AF_INET, socket.SOCK_STREAM, ('127.0.0.1', 0)),
+]
+if hasattr(socket, 'AF_UNIX'):
+ to_test.append((socket.AF_UNIX, socket.SOCK_DGRAM, unix_socket))
+
[email protected]("unix_sock_path")
[email protected]("sock_family,socktype,address", to_test)
+def test_syslog_handler(logger, activation_strategy,
+ sock_family, socktype, address):
+ delimiter = {socket.AF_UNIX: '\x00',
+ socket.AF_INET: '\n'}[sock_family]
+ with closing(socket.socket(sock_family, socktype)) as inc:
+ inc.bind(address)
+ if socktype == socket.SOCK_STREAM:
+ inc.listen(0)
+ inc.settimeout(1)
+ for app_name in [None, 'Testing']:
+ if sock_family == socket.AF_UNIX:
+ expected = (r'^<12>%stestlogger: Syslog is weird%s$' %
+ (app_name + ':' if app_name else '',
+ delimiter))
+ else:
+ expected = (r'^<12>1 \d{4}-\d\d-\d\dT\d\d:\d\d:\d\d(\.\d+)?Z %s %s %d '
+ '- - %sSyslog is weird%s$' %
+ (socket.gethostname(),
+ app_name if app_name else 'testlogger',
+ os.getpid(), 'testlogger: ' if app_name else '',
+ delimiter))
+
+ handler = logbook.SyslogHandler(app_name, inc.getsockname(),
+ socktype=socktype)
+ with activation_strategy(handler):
+ logger.warn('Syslog is weird')
+ try:
+ if socktype == socket.SOCK_STREAM:
+ with closing(inc.accept()[0]) as inc2:
+ rv = inc2.recv(1024)
+ else:
rv = inc.recvfrom(1024)[0]
- except socket.error:
- assert False, 'got timeout on socket'
- assert rv == (
- u('<12>%stestlogger: Syslog is weird\x00') %
- ((app_name and (app_name + u(':'))) or u(''))).encode('utf-8')
+ except socket.error:
+ assert False, 'got timeout on socket'
+ rv = rv.decode('utf-8')
+ assert re.match(expected, rv), \
+ 'expected {}, got {}'.format(expected, rv)
@pytest.fixture
def unix_sock_path(request):
- returned = "/tmp/__unixsock_logbook.test"
+ returned = unix_socket
@request.addfinalizer
def cleanup():
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 2
} | 1.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[all]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [
"apt-get update",
"apt-get install -y libzmq3-dev",
"pip install cython"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | async-timeout==4.0.2
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
Brotli==1.1.0
certifi==2021.5.30
coverage==6.2
Cython==3.0.12
execnet==1.9.0
greenlet==2.0.2
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
Jinja2==3.0.3
-e git+https://github.com/getlogbook/logbook.git@7d97a72a848c176108845eda44503a688e0e5042#egg=Logbook
MarkupSafe==2.0.1
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-cov==4.0.0
pyzmq==25.1.2
redis==4.3.6
SQLAlchemy==1.4.54
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tomli==1.2.3
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: logbook
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- async-timeout==4.0.2
- brotli==1.1.0
- coverage==6.2
- cython==3.0.12
- execnet==1.9.0
- greenlet==2.0.2
- jinja2==3.0.3
- markupsafe==2.0.1
- pytest-cov==4.0.0
- pyzmq==25.1.2
- redis==4.3.6
- sqlalchemy==1.4.54
- tomli==1.2.3
prefix: /opt/conda/envs/logbook
| [
"tests/test_syslog_handler.py::test_syslog_handler[ContextEnteringStrategy-AddressFamily.AF_INET-SocketKind.SOCK_DGRAM-address0]",
"tests/test_syslog_handler.py::test_syslog_handler[ContextEnteringStrategy-AddressFamily.AF_INET-SocketKind.SOCK_STREAM-address1]",
"tests/test_syslog_handler.py::test_syslog_handler[PushingStrategy-AddressFamily.AF_INET-SocketKind.SOCK_DGRAM-address0]",
"tests/test_syslog_handler.py::test_syslog_handler[PushingStrategy-AddressFamily.AF_INET-SocketKind.SOCK_STREAM-address1]"
]
| []
| [
"tests/test_logging_compat.py::test_basic_compat[True]",
"tests/test_logging_compat.py::test_basic_compat[False]",
"tests/test_logging_compat.py::test_redirect_logbook",
"tests/test_logging_compat.py::test_warning_redirections",
"tests/test_mail_handler.py::test_mail_handler[ContextEnteringStrategy]",
"tests/test_mail_handler.py::test_mail_handler[PushingStrategy]",
"tests/test_mail_handler.py::test_mail_handler_batching[ContextEnteringStrategy]",
"tests/test_mail_handler.py::test_mail_handler_batching[PushingStrategy]",
"tests/test_mail_handler.py::test_group_handler_mail_combo[ContextEnteringStrategy]",
"tests/test_mail_handler.py::test_group_handler_mail_combo[PushingStrategy]",
"tests/test_mail_handler.py::test_mail_handler_arguments",
"tests/test_syslog_handler.py::test_syslog_handler[ContextEnteringStrategy-AddressFamily.AF_UNIX-SocketKind.SOCK_DGRAM-/tmp/__unixsock_logbook.test]",
"tests/test_syslog_handler.py::test_syslog_handler[PushingStrategy-AddressFamily.AF_UNIX-SocketKind.SOCK_DGRAM-/tmp/__unixsock_logbook.test]"
]
| []
| BSD License | 1,476 | [
"logbook/handlers.py",
"CHANGES"
]
| [
"logbook/handlers.py",
"CHANGES"
]
|
asottile__add-trailing-comma-23 | 47aa870cde65d699237d345df17bfb1ca03bd3f7 | 2017-07-17 17:47:46 | 3343fe9ba1b396342d27a73fafa88807b47fc254 | diff --git a/add_trailing_comma.py b/add_trailing_comma.py
index 784e00b..da4d733 100644
--- a/add_trailing_comma.py
+++ b/add_trailing_comma.py
@@ -59,7 +59,8 @@ def _is_star_star_kwarg(node):
class FindNodes(ast.NodeVisitor):
def __init__(self):
- self.calls = {}
+ # multiple calls can report their starting position as the same
+ self.calls = collections.defaultdict(list)
self.funcs = {}
self.literals = {}
@@ -109,7 +110,7 @@ class FindNodes(ast.NodeVisitor):
if arg_offsets and not only_a_generator:
key = Offset(node.lineno, node.col_offset)
- self.calls[key] = Call(node, has_starargs, arg_offsets)
+ self.calls[key].append(Call(node, has_starargs, arg_offsets))
self.generic_visit(node)
@@ -312,33 +313,30 @@ def _fix_src(contents_text, py35_plus):
tokens = src_to_tokens(contents_text)
for i, token in _changing_list(tokens):
key = Offset(token.line, token.utf8_byte_offset)
- add_comma = True
- fix_data = None
+ fixes = []
if key in visitor.calls:
- call = visitor.calls[key]
- # Only fix stararg calls if asked to
- add_comma = not call.star_args or py35_plus
- fix_data = _find_call(call, i, tokens)
+ for call in visitor.calls[key]:
+ # Only fix stararg calls if asked to
+ add_comma = not call.star_args or py35_plus
+ fixes.append((add_comma, _find_call(call, i, tokens)))
elif key in visitor.funcs:
- func = visitor.funcs[key]
# functions can be treated as calls
- fix_data = _find_call(func, i, tokens)
+ fixes.append((True, _find_call(visitor.funcs[key], i, tokens)))
# Handle parenthesized things
elif token.src == '(':
- fix_data = _find_simple(i, tokens)
- add_comma = False
-
- if fix_data is not None:
- _fix_brace(fix_data, add_comma, tokens)
+ fixes.append((False, _find_simple(i, tokens)))
# need to additionally handle literals afterwards as tuples report
# their starting index as the first element, which may be one of the
# above things.
if key in visitor.literals:
fix_data = _find_literal(visitor.literals[key], i, tokens)
+ fixes.append((True, fix_data))
+
+ for add_comma, fix_data in fixes:
if fix_data is not None:
- _fix_brace(fix_data, True, tokens)
+ _fix_brace(fix_data, add_comma, tokens)
return tokens_to_src(tokens)
| Regression f({}).y(...) is not adding commas
Seems I broke this in 0.4.3 because now the first function is being considered:
```python
f({}).y(
x
)
```
should add a comma on the `x` line. | asottile/add-trailing-comma | diff --git a/tests/add_trailing_comma_test.py b/tests/add_trailing_comma_test.py
index 12b7326..5229737 100644
--- a/tests/add_trailing_comma_test.py
+++ b/tests/add_trailing_comma_test.py
@@ -114,6 +114,16 @@ def test_py35_plus_rewrite():
' 1,\n'
')',
),
+ # Regression test for #22
+ (
+ 'x({}).y(\n'
+ ' x\n'
+ ')',
+
+ 'x({}).y(\n'
+ ' x,\n'
+ ')',
+ ),
),
)
def test_fixes_calls(src, expected):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 0.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/asottile/add-trailing-comma.git@47aa870cde65d699237d345df17bfb1ca03bd3f7#egg=add_trailing_comma
exceptiongroup==1.2.2
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
tokenize_rt==6.1.0
tomli==2.2.1
| name: add-trailing-comma
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- tokenize-rt==6.1.0
- tomli==2.2.1
prefix: /opt/conda/envs/add-trailing-comma
| [
"tests/add_trailing_comma_test.py::test_fixes_calls[x({}).y(\\n"
]
| [
"tests/add_trailing_comma_test.py::test_fixes_literals[x",
"tests/add_trailing_comma_test.py::test_fixes_py35_plus_literals[x",
"tests/add_trailing_comma_test.py::test_fix_unhugs[x"
]
| [
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x(1)]",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[tuple(\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x(\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x((\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[(\\n",
"tests/add_trailing_comma_test.py::test_py35_plus_rewrite",
"tests/add_trailing_comma_test.py::test_fixes_calls[x(\\n",
"tests/add_trailing_comma_test.py::test_fixes_calls[foo()(\\n",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[(1,",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[[1,",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[{1,",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[{1:",
"tests/add_trailing_comma_test.py::test_noop_tuple_literal_without_braces",
"tests/add_trailing_comma_test.py::test_noop_function_defs[def",
"tests/add_trailing_comma_test.py::test_fixes_defs[def",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f(x,",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f(\\n",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f((\\n",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f([\\n",
"tests/add_trailing_comma_test.py::test_noop_unhugs[textwrap.dedent(\"\"\"\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(a,\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[def",
"tests/add_trailing_comma_test.py::test_fix_unhugs[with",
"tests/add_trailing_comma_test.py::test_fix_unhugs[if",
"tests/add_trailing_comma_test.py::test_fix_unhugs[{'foo':",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(g(\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[{\"foo\":",
"tests/add_trailing_comma_test.py::test_fix_unhugs[x(\"foo\",",
"tests/add_trailing_comma_test.py::test_fix_unhugs[x(\"foo\"\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs_py3_only[def",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[[]]",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[x",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[y",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[foo.\\\\\\n",
"tests/add_trailing_comma_test.py::test_fix_trailing_brace[x",
"tests/add_trailing_comma_test.py::test_main_trivial",
"tests/add_trailing_comma_test.py::test_main_noop",
"tests/add_trailing_comma_test.py::test_main_changes_a_file",
"tests/add_trailing_comma_test.py::test_main_syntax_error",
"tests/add_trailing_comma_test.py::test_main_non_utf8_bytes",
"tests/add_trailing_comma_test.py::test_main_py35_plus_argument_star_args",
"tests/add_trailing_comma_test.py::test_main_py35_plus_argument_star_star_kwargs"
]
| []
| MIT License | 1,477 | [
"add_trailing_comma.py"
]
| [
"add_trailing_comma.py"
]
|
|
asottile__add-trailing-comma-25 | c7da498ebb0549a0925b8f4c3502d8fd27f554b8 | 2017-07-17 18:06:47 | 3343fe9ba1b396342d27a73fafa88807b47fc254 | diff --git a/add_trailing_comma.py b/add_trailing_comma.py
index da4d733..70cb166 100644
--- a/add_trailing_comma.py
+++ b/add_trailing_comma.py
@@ -63,11 +63,12 @@ class FindNodes(ast.NodeVisitor):
self.calls = collections.defaultdict(list)
self.funcs = {}
self.literals = {}
+ self.tuples = {}
- def _visit_literal(self, node, key='elts', **kwargs):
+ def _visit_literal(self, node, key='elts'):
if getattr(node, key):
key = Offset(node.lineno, node.col_offset)
- self.literals[key] = Literal(node, **kwargs)
+ self.literals[key] = Literal(node)
self.generic_visit(node)
visit_Set = visit_List = _visit_literal
@@ -76,8 +77,11 @@ class FindNodes(ast.NodeVisitor):
self._visit_literal(node, key='values')
def visit_Tuple(self, node):
- # tuples lie about things so we tell the later machiner to backtrack
- self._visit_literal(node, backtrack=True)
+ if node.elts:
+ key = Offset(node.lineno, node.col_offset)
+ # tuples lie about offset -- tell the later machinery to backtrack
+ self.tuples[key] = Literal(node, backtrack=True)
+ self.generic_visit(node)
def visit_Call(self, node):
argnodes = node.args + node.keywords
@@ -200,16 +204,15 @@ def _find_call(call, i, tokens):
return _find_simple(first_brace, tokens)
-def _find_literal(literal, i, tokens):
+def _find_tuple(i, tokens):
# tuples are evil, we need to backtrack to find the opening paren
- if literal.backtrack:
+ i -= 1
+ while tokens[i].name in NON_CODING_TOKENS:
i -= 1
- while tokens[i].name in NON_CODING_TOKENS:
- i -= 1
- # Sometimes tuples don't even have a paren!
- # x = 1, 2, 3
- if tokens[i].src != '(':
- return
+ # Sometimes tuples don't even have a paren!
+ # x = 1, 2, 3
+ if tokens[i].src != '(':
+ return
return _find_simple(i, tokens)
@@ -326,13 +329,14 @@ def _fix_src(contents_text, py35_plus):
# Handle parenthesized things
elif token.src == '(':
fixes.append((False, _find_simple(i, tokens)))
+ elif key in visitor.literals:
+ fixes.append((True, _find_simple(i, tokens)))
# need to additionally handle literals afterwards as tuples report
# their starting index as the first element, which may be one of the
# above things.
- if key in visitor.literals:
- fix_data = _find_literal(visitor.literals[key], i, tokens)
- fixes.append((True, fix_data))
+ if key in visitor.tuples:
+ fixes.append((True, _find_tuple(i, tokens)))
for add_comma, fix_data in fixes:
if fix_data is not None:
| Regression ({}, ()) is not adding a trailing comma
Similar to #22
```python
(
{k: v},
()
)
``
Is not adding a trailing comma after the second tuple | asottile/add-trailing-comma | diff --git a/tests/add_trailing_comma_test.py b/tests/add_trailing_comma_test.py
index 5229737..a7e4abe 100644
--- a/tests/add_trailing_comma_test.py
+++ b/tests/add_trailing_comma_test.py
@@ -124,6 +124,18 @@ def test_py35_plus_rewrite():
' x,\n'
')',
),
+ # Regression test for #23
+ (
+ '(\n'
+ ' {k: v},\n'
+ ' ()\n'
+ ')',
+
+ '(\n'
+ ' {k: v},\n'
+ ' (),\n'
+ ')',
+ ),
),
)
def test_fixes_calls(src, expected):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_issue_reference",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 0.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/asottile/add-trailing-comma.git@c7da498ebb0549a0925b8f4c3502d8fd27f554b8#egg=add_trailing_comma
attrs==22.2.0
certifi==2021.5.30
cfgv==3.3.1
coverage==6.2
distlib==0.3.9
filelock==3.4.1
flake8==5.0.4
identify==2.4.4
importlib-metadata==4.2.0
importlib-resources==5.2.3
iniconfig==1.1.1
mccabe==0.7.0
nodeenv==1.6.0
packaging==21.3
platformdirs==2.4.0
pluggy==1.0.0
pre-commit==2.17.0
py==1.11.0
pycodestyle==2.9.1
pyflakes==2.5.0
pyparsing==3.1.4
pytest==7.0.1
PyYAML==6.0.1
tokenize-rt==4.2.1
toml==0.10.2
tomli==1.2.3
typing_extensions==4.1.1
virtualenv==20.16.2
zipp==3.6.0
| name: add-trailing-comma
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- cfgv==3.3.1
- coverage==6.2
- distlib==0.3.9
- filelock==3.4.1
- flake8==5.0.4
- identify==2.4.4
- importlib-metadata==4.2.0
- importlib-resources==5.2.3
- iniconfig==1.1.1
- mccabe==0.7.0
- nodeenv==1.6.0
- packaging==21.3
- platformdirs==2.4.0
- pluggy==1.0.0
- pre-commit==2.17.0
- py==1.11.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
- pyparsing==3.1.4
- pytest==7.0.1
- pyyaml==6.0.1
- tokenize-rt==4.2.1
- toml==0.10.2
- tomli==1.2.3
- typing-extensions==4.1.1
- virtualenv==20.16.2
- zipp==3.6.0
prefix: /opt/conda/envs/add-trailing-comma
| [
"tests/add_trailing_comma_test.py::test_fixes_calls[(\\n"
]
| []
| [
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x(1)]",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[tuple(\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x(\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x((\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[(\\n",
"tests/add_trailing_comma_test.py::test_ignores_invalid_ast_node",
"tests/add_trailing_comma_test.py::test_py35_plus_rewrite",
"tests/add_trailing_comma_test.py::test_fixes_calls[x(\\n",
"tests/add_trailing_comma_test.py::test_fixes_calls[foo()(\\n",
"tests/add_trailing_comma_test.py::test_fixes_calls[x({}).y(\\n",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[(1,",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[[1,",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[{1,",
"tests/add_trailing_comma_test.py::test_noop_one_line_literals[{1:",
"tests/add_trailing_comma_test.py::test_fixes_literals[x",
"tests/add_trailing_comma_test.py::test_fixes_py35_plus_literals[x",
"tests/add_trailing_comma_test.py::test_noop_tuple_literal_without_braces",
"tests/add_trailing_comma_test.py::test_noop_function_defs[def",
"tests/add_trailing_comma_test.py::test_fixes_defs[def",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f(x,",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f(\\n",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f((\\n",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f([\\n",
"tests/add_trailing_comma_test.py::test_noop_unhugs[textwrap.dedent(\"\"\"\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(a,\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[def",
"tests/add_trailing_comma_test.py::test_fix_unhugs[with",
"tests/add_trailing_comma_test.py::test_fix_unhugs[if",
"tests/add_trailing_comma_test.py::test_fix_unhugs[{'foo':",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(g(\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[{\"foo\":",
"tests/add_trailing_comma_test.py::test_fix_unhugs[x",
"tests/add_trailing_comma_test.py::test_fix_unhugs[x(\"foo\",",
"tests/add_trailing_comma_test.py::test_fix_unhugs[x(\"foo\"\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs_py3_only[def",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[[]]",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[x",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[y",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[foo.\\\\\\n",
"tests/add_trailing_comma_test.py::test_fix_trailing_brace[x",
"tests/add_trailing_comma_test.py::test_main_trivial",
"tests/add_trailing_comma_test.py::test_main_noop",
"tests/add_trailing_comma_test.py::test_main_changes_a_file",
"tests/add_trailing_comma_test.py::test_main_syntax_error",
"tests/add_trailing_comma_test.py::test_main_non_utf8_bytes",
"tests/add_trailing_comma_test.py::test_main_py35_plus_argument_star_args",
"tests/add_trailing_comma_test.py::test_main_py35_plus_argument_star_star_kwargs"
]
| []
| MIT License | 1,478 | [
"add_trailing_comma.py"
]
| [
"add_trailing_comma.py"
]
|
|
collective__icalendar-235 | 34fda85e994738da788d6ae826c5f97375c2ac72 | 2017-07-18 10:01:25 | 34fda85e994738da788d6ae826c5f97375c2ac72 | diff --git a/CHANGES.rst b/CHANGES.rst
index 5119c87..dc581d9 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -10,7 +10,8 @@ Breaking changes:
New features:
-- *add item here*
+- added vUTCOffset.ignore_exceptions to allow surpressing of failed TZOFFSET
+ parsing (for now this ignores the check for offsets > 24h) [geier]
Bug fixes:
diff --git a/src/icalendar/prop.py b/src/icalendar/prop.py
index f084e84..5c705c2 100644
--- a/src/icalendar/prop.py
+++ b/src/icalendar/prop.py
@@ -792,6 +792,11 @@ class vUTCOffset(object):
"""Renders itself as a utc offset.
"""
+ ignore_exceptions = False # if True, and we cannot parse this
+ # component, we will silently ignore
+ # it, rather than let the exception
+ # propagate upwards
+
def __init__(self, td):
if not isinstance(td, timedelta):
raise ValueError('Offset value MUST be a timedelta instance')
@@ -831,7 +836,7 @@ class vUTCOffset(object):
offset = timedelta(hours=hours, minutes=minutes, seconds=seconds)
except:
raise ValueError('Expected utc offset, got: %s' % ical)
- if offset >= timedelta(hours=24):
+ if not cls.ignore_exceptions and offset >= timedelta(hours=24):
raise ValueError(
'Offset must be less than 24 hours, was %s' % ical)
if sign == '-':
| Offsets larger than 24h?
At https://github.com/geier/khal/issues/140 there is a user whose offset values are not accepted by icalendar. I have no idea what is going on in that iCalendar file. Anybody got any ideas?
| collective/icalendar | diff --git a/src/icalendar/tests/test_unit_cal.py b/src/icalendar/tests/test_unit_cal.py
index 0cda117..1082ce7 100644
--- a/src/icalendar/tests/test_unit_cal.py
+++ b/src/icalendar/tests/test_unit_cal.py
@@ -430,3 +430,26 @@ class TestCal(unittest.TestCase):
for e in icalendar.cal.Calendar.from_ical(s).walk('VEVENT')],
[[], [('EXDATE', "Expected datetime, date, or time, got: ''")]]
)
+
+ def test_cal_strict_parsing(self):
+ cal_str = b'\r\n'.join(
+ [
+ b'BEGIN:VCALENDAR',
+ b'BEGIN:VTIMEZONE',
+ b'TZID:Europe/Prague',
+ b'BEGIN:STANDARD',
+ b'DTSTART:18500101T000000',
+ b'TZNAME:PMT',
+ b'TZOFFSETFROM:+5744',
+ b'TZOFFSETTO:+5744',
+ b'END:STANDARD',
+ b'END:VTIMEZONE',
+ b'END:VCALENDAR',
+ b'',
+ ]
+ )
+
+ self.assertRaises(ValueError, icalendar.Calendar.from_ical, cal_str)
+ icalendar.vUTCOffset.ignore_exceptions = True
+ self.assertEqual(icalendar.Calendar.from_ical(cal_str).to_ical(), cal_str)
+ icalendar.vUTCOffset.ignore_exceptions = False
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_modified_files",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 2,
"issue_text_score": 3,
"test_score": 0
},
"num_modified_files": 2
} | 3.11 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements_docs.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.16
babel==2.17.0
certifi==2025.1.31
charset-normalizer==3.4.1
coverage==7.8.0
docutils==0.21.2
exceptiongroup==1.2.2
execnet==2.1.1
-e git+https://github.com/collective/icalendar.git@34fda85e994738da788d6ae826c5f97375c2ac72#egg=icalendar
idna==3.10
imagesize==1.4.1
importlib_metadata==8.6.1
iniconfig==2.1.0
Jinja2==3.1.6
MarkupSafe==3.0.2
packaging==24.2
pluggy==1.5.0
Pygments==2.19.1
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
python-dateutil==2.9.0.post0
pytz==2025.2
requests==2.32.3
six==1.17.0
snowballstemmer==2.2.0
Sphinx==7.4.7
sphinx-rtd-theme==3.0.2
sphinxcontrib-applehelp==2.0.0
sphinxcontrib-devhelp==2.0.0
sphinxcontrib-htmlhelp==2.1.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==2.0.0
sphinxcontrib-serializinghtml==2.0.0
swebench_matterhorn @ file:///swebench_matterhorn
tomli==2.2.1
typing_extensions==4.13.0
urllib3==2.3.0
zipp==3.21.0
| name: icalendar
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.16
- babel==2.17.0
- certifi==2025.1.31
- charset-normalizer==3.4.1
- coverage==7.8.0
- docutils==0.21.2
- exceptiongroup==1.2.2
- execnet==2.1.1
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- jinja2==3.1.6
- markupsafe==3.0.2
- packaging==24.2
- pluggy==1.5.0
- pygments==2.19.1
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- requests==2.32.3
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==7.4.7
- sphinx-rtd-theme==3.0.2
- sphinxcontrib-applehelp==2.0.0
- sphinxcontrib-devhelp==2.0.0
- sphinxcontrib-htmlhelp==2.1.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==2.0.0
- sphinxcontrib-serializinghtml==2.0.0
- swebench-matterhorn==0.0.0
- tomli==2.2.1
- typing-extensions==4.13.0
- urllib3==2.3.0
- zipp==3.21.0
prefix: /opt/conda/envs/icalendar
| [
"src/icalendar/tests/test_unit_cal.py::TestCal::test_cal_strict_parsing"
]
| []
| [
"src/icalendar/tests/test_unit_cal.py::TestCalComponent::test_cal_Component",
"src/icalendar/tests/test_unit_cal.py::TestCalComponent::test_cal_Component_add",
"src/icalendar/tests/test_unit_cal.py::TestCalComponent::test_cal_Component_add_no_reencode",
"src/icalendar/tests/test_unit_cal.py::TestCalComponent::test_cal_Component_add_property_parameter",
"src/icalendar/tests/test_unit_cal.py::TestCalComponent::test_cal_Component_from_ical",
"src/icalendar/tests/test_unit_cal.py::TestCalComponent::test_cal_Component_to_ical_parameter_order",
"src/icalendar/tests/test_unit_cal.py::TestCalComponent::test_cal_Component_to_ical_property_order",
"src/icalendar/tests/test_unit_cal.py::TestCalComponent::test_repr",
"src/icalendar/tests/test_unit_cal.py::TestCal::test_cal_Calendar",
"src/icalendar/tests/test_unit_cal.py::TestCal::test_cal_ComponentFactory"
]
| []
| BSD License | 1,480 | [
"src/icalendar/prop.py",
"CHANGES.rst"
]
| [
"src/icalendar/prop.py",
"CHANGES.rst"
]
|
|
jupyter__nbgrader-845 | e2f288ce4a11d08db211e67b7c0d2f9ff0c5656a | 2017-07-18 16:08:12 | 5bc6f37c39c8b10b8f60440b2e6d9487e63ef3f1 | diff --git a/nbgrader/__init__.py b/nbgrader/__init__.py
index 7e68cab9..e7ecd231 100644
--- a/nbgrader/__init__.py
+++ b/nbgrader/__init__.py
@@ -3,17 +3,12 @@ A system for assigning and grading notebooks.
"""
import os
+import sys
from ._version import version_info, __version__
def _jupyter_nbextension_paths():
- return [
- dict(
- section="tree",
- src=os.path.join('nbextensions', 'assignment_list'),
- dest="assignment_list",
- require="assignment_list/main"
- ),
+ paths = [
dict(
section="notebook",
src=os.path.join('nbextensions', 'create_assignment'),
@@ -34,9 +29,26 @@ def _jupyter_nbextension_paths():
),
]
+ if sys.platform != 'win32':
+ paths.append(
+ dict(
+ section="tree",
+ src=os.path.join('nbextensions', 'assignment_list'),
+ dest="assignment_list",
+ require="assignment_list/main"
+ )
+ )
+
+ return paths
+
+
def _jupyter_server_extension_paths():
- return [
- dict(module="nbgrader.server_extensions.assignment_list"),
+ paths = [
dict(module="nbgrader.server_extensions.formgrader"),
dict(module="nbgrader.server_extensions.validate_assignment")
]
+
+ if sys.platform != 'win32':
+ paths.append(dict(module="nbgrader.server_extensions.assignment_list"))
+
+ return paths
| Disable Assignment List extension on Windows
See https://github.com/conda-forge/nbgrader-feedstock/issues/12
@lgpage | jupyter/nbgrader | diff --git a/nbgrader/tests/apps/test_nbgrader_extension.py b/nbgrader/tests/apps/test_nbgrader_extension.py
index f0c75f3d..c636413a 100644
--- a/nbgrader/tests/apps/test_nbgrader_extension.py
+++ b/nbgrader/tests/apps/test_nbgrader_extension.py
@@ -1,25 +1,82 @@
import os
-
import nbgrader
+import sys
+import contextlib
+
+
[email protected]
+def mock_platform(platform):
+ old_platform = sys.platform
+ sys.platform = platform
+ yield
+ sys.platform = old_platform
+
+
+def test_nbextension_linux():
+ from nbgrader import _jupyter_nbextension_paths
+ with mock_platform("linux"):
+ nbexts = _jupyter_nbextension_paths()
+ assert len(nbexts) == 4
+ assert nbexts[0]['section'] == 'notebook'
+ assert nbexts[1]['section'] == 'tree'
+ assert nbexts[2]['section'] == 'notebook'
+ assert nbexts[3]['section'] == 'tree'
+ paths = [ext['src'] for ext in nbexts]
+ for path in paths:
+ assert os.path.isdir(os.path.join(os.path.dirname(nbgrader.__file__), path))
+
+
+def test_nbextension_mac():
+ from nbgrader import _jupyter_nbextension_paths
+ with mock_platform("darwin"):
+ nbexts = _jupyter_nbextension_paths()
+ assert len(nbexts) == 4
+ assert nbexts[0]['section'] == 'notebook'
+ assert nbexts[1]['section'] == 'tree'
+ assert nbexts[2]['section'] == 'notebook'
+ assert nbexts[3]['section'] == 'tree'
+ paths = [ext['src'] for ext in nbexts]
+ for path in paths:
+ assert os.path.isdir(os.path.join(os.path.dirname(nbgrader.__file__), path))
-def test_nbextension():
+def test_nbextension_windows():
from nbgrader import _jupyter_nbextension_paths
- nbexts = _jupyter_nbextension_paths()
- assert len(nbexts) == 4
- assert nbexts[0]['section'] == 'tree'
- assert nbexts[1]['section'] == 'notebook'
- assert nbexts[2]['section'] == 'tree'
- assert nbexts[3]['section'] == 'notebook'
- paths = [ext['src'] for ext in nbexts]
- for path in paths:
- assert os.path.isdir(os.path.join(os.path.dirname(nbgrader.__file__), path))
-
-
-def test_serverextension():
+ with mock_platform("win32"):
+ nbexts = _jupyter_nbextension_paths()
+ assert len(nbexts) == 3
+ assert nbexts[0]['section'] == 'notebook'
+ assert nbexts[1]['section'] == 'tree'
+ assert nbexts[2]['section'] == 'notebook'
+ paths = [ext['src'] for ext in nbexts]
+ for path in paths:
+ assert os.path.isdir(os.path.join(os.path.dirname(nbgrader.__file__), path))
+
+
+def test_serverextension_linux():
+ from nbgrader import _jupyter_server_extension_paths
+ with mock_platform("linux"):
+ serverexts = _jupyter_server_extension_paths()
+ assert len(serverexts) == 3
+ assert serverexts[0]['module'] == 'nbgrader.server_extensions.formgrader'
+ assert serverexts[1]['module'] == 'nbgrader.server_extensions.validate_assignment'
+ assert serverexts[2]['module'] == 'nbgrader.server_extensions.assignment_list'
+
+
+def test_serverextension_mac():
+ from nbgrader import _jupyter_server_extension_paths
+ with mock_platform("darwin"):
+ serverexts = _jupyter_server_extension_paths()
+ assert len(serverexts) == 3
+ assert serverexts[0]['module'] == 'nbgrader.server_extensions.formgrader'
+ assert serverexts[1]['module'] == 'nbgrader.server_extensions.validate_assignment'
+ assert serverexts[2]['module'] == 'nbgrader.server_extensions.assignment_list'
+
+
+def test_serverextension_windows():
from nbgrader import _jupyter_server_extension_paths
- serverexts = _jupyter_server_extension_paths()
- assert len(serverexts) == 3
- assert serverexts[0]['module'] == 'nbgrader.server_extensions.assignment_list'
- assert serverexts[1]['module'] == 'nbgrader.server_extensions.formgrader'
- assert serverexts[2]['module'] == 'nbgrader.server_extensions.validate_assignment'
+ with mock_platform("win32"):
+ serverexts = _jupyter_server_extension_paths()
+ assert len(serverexts) == 2
+ assert serverexts[0]['module'] == 'nbgrader.server_extensions.formgrader'
+ assert serverexts[1]['module'] == 'nbgrader.server_extensions.validate_assignment'
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 3,
"test_score": 2
},
"num_modified_files": 1
} | 0.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pyenchant",
"sphinxcontrib-spelling",
"sphinx_rtd_theme",
"nbval",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.5",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
alembic==1.7.7
anyio==3.6.2
argon2-cffi==21.3.0
argon2-cffi-bindings==21.2.0
async-generator==1.10
attrs==22.2.0
Babel==2.11.0
backcall==0.2.0
bleach==4.1.0
certifi==2021.5.30
cffi==1.15.1
charset-normalizer==2.0.12
comm==0.1.4
contextvars==2.4
coverage==6.2
dataclasses==0.8
decorator==5.1.1
defusedxml==0.7.1
docutils==0.18.1
entrypoints==0.4
greenlet==2.0.2
idna==3.10
imagesize==1.4.1
immutables==0.19
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
ipykernel==5.5.6
ipython==7.16.3
ipython-genutils==0.2.0
ipywidgets==7.8.5
jedi==0.17.2
Jinja2==3.0.3
json5==0.9.16
jsonschema==3.2.0
jupyter==1.1.1
jupyter-client==7.1.2
jupyter-console==6.4.3
jupyter-core==4.9.2
jupyter-server==1.13.1
jupyterlab==3.2.9
jupyterlab-pygments==0.1.2
jupyterlab-server==2.10.3
jupyterlab_widgets==1.1.11
Mako==1.1.6
MarkupSafe==2.0.1
mistune==0.8.4
nbclassic==0.3.5
nbclient==0.5.9
nbconvert==6.0.7
nbformat==5.1.3
-e git+https://github.com/jupyter/nbgrader.git@e2f288ce4a11d08db211e67b7c0d2f9ff0c5656a#egg=nbgrader
nbval==0.10.0
nest-asyncio==1.6.0
notebook==6.4.10
packaging==21.3
pandocfilters==1.5.1
parso==0.7.1
pexpect==4.9.0
pickleshare==0.7.5
pluggy==1.0.0
prometheus-client==0.17.1
prompt-toolkit==3.0.36
ptyprocess==0.7.0
py==1.11.0
pycparser==2.21
pyenchant==3.2.2
Pygments==2.14.0
pyparsing==3.1.4
pyrsistent==0.18.0
pytest==7.0.1
python-dateutil==2.9.0.post0
pytz==2025.2
pyzmq==25.1.2
requests==2.27.1
Send2Trash==1.8.3
six==1.17.0
sniffio==1.2.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-rtd-theme==2.0.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
sphinxcontrib-spelling==7.7.0
SQLAlchemy==1.4.54
terminado==0.12.1
testpath==0.6.0
tomli==1.2.3
tornado==6.1
traitlets==4.3.3
typing_extensions==4.1.1
urllib3==1.26.20
wcwidth==0.2.13
webencodings==0.5.1
websocket-client==1.3.1
widgetsnbextension==3.6.10
zipp==3.6.0
| name: nbgrader
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- alembic==1.7.7
- anyio==3.6.2
- argon2-cffi==21.3.0
- argon2-cffi-bindings==21.2.0
- async-generator==1.10
- attrs==22.2.0
- babel==2.11.0
- backcall==0.2.0
- bleach==4.1.0
- cffi==1.15.1
- charset-normalizer==2.0.12
- comm==0.1.4
- contextvars==2.4
- coverage==6.2
- dataclasses==0.8
- decorator==5.1.1
- defusedxml==0.7.1
- docutils==0.18.1
- entrypoints==0.4
- greenlet==2.0.2
- idna==3.10
- imagesize==1.4.1
- immutables==0.19
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- ipykernel==5.5.6
- ipython==7.16.3
- ipython-genutils==0.2.0
- ipywidgets==7.8.5
- jedi==0.17.2
- jinja2==3.0.3
- json5==0.9.16
- jsonschema==3.2.0
- jupyter==1.1.1
- jupyter-client==7.1.2
- jupyter-console==6.4.3
- jupyter-core==4.9.2
- jupyter-server==1.13.1
- jupyterlab==3.2.9
- jupyterlab-pygments==0.1.2
- jupyterlab-server==2.10.3
- jupyterlab-widgets==1.1.11
- mako==1.1.6
- markupsafe==2.0.1
- mistune==0.8.4
- nbclassic==0.3.5
- nbclient==0.5.9
- nbconvert==6.0.7
- nbformat==5.1.3
- nbval==0.10.0
- nest-asyncio==1.6.0
- notebook==6.4.10
- packaging==21.3
- pandocfilters==1.5.1
- parso==0.7.1
- pexpect==4.9.0
- pickleshare==0.7.5
- pluggy==1.0.0
- prometheus-client==0.17.1
- prompt-toolkit==3.0.36
- ptyprocess==0.7.0
- py==1.11.0
- pycparser==2.21
- pyenchant==3.2.2
- pygments==2.14.0
- pyparsing==3.1.4
- pyrsistent==0.18.0
- pytest==7.0.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyzmq==25.1.2
- requests==2.27.1
- send2trash==1.8.3
- six==1.17.0
- sniffio==1.2.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-rtd-theme==2.0.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- sphinxcontrib-spelling==7.7.0
- sqlalchemy==1.4.54
- terminado==0.12.1
- testpath==0.6.0
- tomli==1.2.3
- tornado==6.1
- traitlets==4.3.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- wcwidth==0.2.13
- webencodings==0.5.1
- websocket-client==1.3.1
- widgetsnbextension==3.6.10
- zipp==3.6.0
prefix: /opt/conda/envs/nbgrader
| [
"nbgrader/tests/apps/test_nbgrader_extension.py::test_nbextension_linux",
"nbgrader/tests/apps/test_nbgrader_extension.py::test_nbextension_mac",
"nbgrader/tests/apps/test_nbgrader_extension.py::test_nbextension_windows",
"nbgrader/tests/apps/test_nbgrader_extension.py::test_serverextension_linux",
"nbgrader/tests/apps/test_nbgrader_extension.py::test_serverextension_mac",
"nbgrader/tests/apps/test_nbgrader_extension.py::test_serverextension_windows"
]
| []
| []
| []
| BSD 3-Clause "New" or "Revised" License | 1,481 | [
"nbgrader/__init__.py"
]
| [
"nbgrader/__init__.py"
]
|
|
jupyter__nbgrader-846 | e2f288ce4a11d08db211e67b7c0d2f9ff0c5656a | 2017-07-18 16:20:20 | 5bc6f37c39c8b10b8f60440b2e6d9487e63ef3f1 | diff --git a/nbgrader/docs/source/contributor_guide/metadata.rst b/nbgrader/docs/source/contributor_guide/metadata.rst
index d74f054d..4ede34a1 100644
--- a/nbgrader/docs/source/contributor_guide/metadata.rst
+++ b/nbgrader/docs/source/contributor_guide/metadata.rst
@@ -73,7 +73,8 @@ The metadata may contain the following keys:
Added by the "Create Assignment" extension.
This is the number of points that a cell is worth. It should only be
- set if ``grade`` is also set to true.
+ set if ``grade`` is also set to true. The number of points must be greater
+ than or equal to zero.
.. data:: checksum
diff --git a/nbgrader/nbextensions/create_assignment/main.js b/nbgrader/nbextensions/create_assignment/main.js
index 696908a4..ff4f8b3d 100644
--- a/nbgrader/nbextensions/create_assignment/main.js
+++ b/nbgrader/nbextensions/create_assignment/main.js
@@ -224,7 +224,9 @@ define([
if (cell.metadata.nbgrader === undefined) {
cell.metadata.nbgrader = {};
}
- cell.metadata.nbgrader.points = to_float(val);
+ var points = to_float(val);
+ if (points < 0) points = 0;
+ cell.metadata.nbgrader.points = points;
};
var get_grade_id = function (cell) {
diff --git a/nbgrader/nbgraderformat/v1.py b/nbgrader/nbgraderformat/v1.py
index 85afab34..5358d8d6 100644
--- a/nbgrader/nbgraderformat/v1.py
+++ b/nbgrader/nbgraderformat/v1.py
@@ -32,7 +32,11 @@ class ValidatorV1(BaseValidator):
if meta['points'] == '':
meta['points'] = 0.0
else:
- meta['points'] = float(meta['points'])
+ points = float(meta['points'])
+ if points < 0:
+ meta['points'] = 0.0
+ else:
+ meta['points'] = float(meta['points'])
else:
meta['points'] = 0.0
| UI allows negative points
The UI should probably only allow points within [0-\infty] for graded cells. It currently allows negative points.

| jupyter/nbgrader | diff --git a/nbgrader/tests/nbextensions/test_create_assignment.py b/nbgrader/tests/nbextensions/test_create_assignment.py
index da9bbc34..e972b4da 100644
--- a/nbgrader/tests/nbextensions/test_create_assignment.py
+++ b/nbgrader/tests/nbextensions/test_create_assignment.py
@@ -571,3 +571,24 @@ def test_cell_ids(browser, port):
_save(browser)
_wait_for_modal(browser)
_dismiss_modal(browser)
+
+
[email protected]
+def test_negative_points(browser, port):
+ _load_notebook(browser, port)
+ _activate_toolbar(browser)
+
+ # make sure the total points is zero
+ assert _get_total_points(browser) == 0
+
+ # make it autograder tests and set the points to two
+ _select_tests(browser)
+ _set_points(browser, points=2)
+ _set_id(browser)
+ assert _get_total_points(browser) == 2
+ assert 2 == _get_metadata(browser)['points']
+
+ # set the points to negative one
+ _set_points(browser, points=-1)
+ assert _get_total_points(browser) == 0
+ assert 0 == _get_metadata(browser)['points']
diff --git a/nbgrader/tests/nbgraderformat/test_v1.py b/nbgrader/tests/nbgraderformat/test_v1.py
index 1245aa5e..b85e4963 100644
--- a/nbgrader/tests/nbgraderformat/test_v1.py
+++ b/nbgrader/tests/nbgraderformat/test_v1.py
@@ -52,6 +52,10 @@ def test_set_points():
ValidatorV1().upgrade_cell_metadata(cell)
assert cell.metadata.nbgrader["points"] == 0.0
+ cell = create_grade_cell("", "code", "foo", -1, 0)
+ ValidatorV1().upgrade_cell_metadata(cell)
+ assert cell.metadata.nbgrader["points"] == 0.0
+
def test_extra_keys():
cell = create_grade_cell("", "code", "foo", "", 0)
@@ -65,4 +69,3 @@ def test_schema_version():
del cell.metadata.nbgrader["schema_version"]
ValidatorV1().upgrade_cell_metadata(cell)
assert cell.metadata.nbgrader["schema_version"] == 1
-
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_media",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 3
} | 0.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -r dev-requirements.txt -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-rerunfailures",
"coverage",
"selenium",
"invoke",
"sphinx",
"codecov",
"cov-core",
"nbval"
],
"pre_install": [
"pip install -U pip wheel setuptools"
],
"python": "3.5",
"reqs_path": [
"dev-requirements.txt",
"dev-requirements-windows.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
alembic==1.7.7
anyio==3.6.2
argon2-cffi==21.3.0
argon2-cffi-bindings==21.2.0
async-generator==1.10
attrs==22.2.0
Babel==2.11.0
backcall==0.2.0
bleach==4.1.0
certifi==2021.5.30
cffi==1.15.1
charset-normalizer==2.0.12
codecov==2.1.13
comm==0.1.4
contextvars==2.4
cov-core==1.15.0
coverage==6.2
dataclasses==0.8
decorator==5.1.1
defusedxml==0.7.1
docutils==0.18.1
entrypoints==0.4
greenlet==2.0.2
idna==3.10
imagesize==1.4.1
immutables==0.19
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
invoke==2.2.0
ipykernel==5.5.6
ipython==7.16.3
ipython-genutils==0.2.0
ipywidgets==7.8.5
jedi==0.17.2
Jinja2==3.0.3
json5==0.9.16
jsonschema==3.2.0
jupyter==1.1.1
jupyter-client==7.1.2
jupyter-console==6.4.3
jupyter-core==4.9.2
jupyter-server==1.13.1
jupyterlab==3.2.9
jupyterlab-pygments==0.1.2
jupyterlab-server==2.10.3
jupyterlab_widgets==1.1.11
Mako==1.1.6
MarkupSafe==2.0.1
mistune==0.8.4
nbclassic==0.3.5
nbclient==0.5.9
nbconvert==6.0.7
nbformat==5.1.3
-e git+https://github.com/jupyter/nbgrader.git@e2f288ce4a11d08db211e67b7c0d2f9ff0c5656a#egg=nbgrader
nbval==0.10.0
nest-asyncio==1.6.0
notebook==6.4.10
packaging==21.3
pandocfilters==1.5.1
parso==0.7.1
pexpect==4.9.0
pickleshare==0.7.5
pluggy==1.0.0
prometheus-client==0.17.1
prompt-toolkit==3.0.36
ptyprocess==0.7.0
py==1.11.0
pycparser==2.21
pyenchant==3.2.2
Pygments==2.14.0
pyparsing==3.1.4
pyrsistent==0.18.0
pytest==7.0.1
pytest-cov==4.0.0
pytest-rerunfailures==10.3
python-dateutil==2.9.0.post0
pytz==2025.2
pyzmq==25.1.2
requests==2.27.1
selenium==3.141.0
Send2Trash==1.8.3
six==1.17.0
sniffio==1.2.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-rtd-theme==2.0.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
sphinxcontrib-spelling==7.7.0
SQLAlchemy==1.4.54
terminado==0.12.1
testpath==0.6.0
tomli==1.2.3
tornado==6.1
traitlets==4.3.3
typing_extensions==4.1.1
urllib3==1.26.20
wcwidth==0.2.13
webencodings==0.5.1
websocket-client==1.3.1
widgetsnbextension==3.6.10
zipp==3.6.0
| name: nbgrader
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- alembic==1.7.7
- anyio==3.6.2
- argon2-cffi==21.3.0
- argon2-cffi-bindings==21.2.0
- async-generator==1.10
- attrs==22.2.0
- babel==2.11.0
- backcall==0.2.0
- bleach==4.1.0
- cffi==1.15.1
- charset-normalizer==2.0.12
- codecov==2.1.13
- comm==0.1.4
- contextvars==2.4
- cov-core==1.15.0
- coverage==6.2
- dataclasses==0.8
- decorator==5.1.1
- defusedxml==0.7.1
- docutils==0.18.1
- entrypoints==0.4
- greenlet==2.0.2
- idna==3.10
- imagesize==1.4.1
- immutables==0.19
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- invoke==2.2.0
- ipykernel==5.5.6
- ipython==7.16.3
- ipython-genutils==0.2.0
- ipywidgets==7.8.5
- jedi==0.17.2
- jinja2==3.0.3
- json5==0.9.16
- jsonschema==3.2.0
- jupyter==1.1.1
- jupyter-client==7.1.2
- jupyter-console==6.4.3
- jupyter-core==4.9.2
- jupyter-server==1.13.1
- jupyterlab==3.2.9
- jupyterlab-pygments==0.1.2
- jupyterlab-server==2.10.3
- jupyterlab-widgets==1.1.11
- mako==1.1.6
- markupsafe==2.0.1
- mistune==0.8.4
- nbclassic==0.3.5
- nbclient==0.5.9
- nbconvert==6.0.7
- nbformat==5.1.3
- nbval==0.10.0
- nest-asyncio==1.6.0
- notebook==6.4.10
- packaging==21.3
- pandocfilters==1.5.1
- parso==0.7.1
- pexpect==4.9.0
- pickleshare==0.7.5
- pip==21.3.1
- pluggy==1.0.0
- prometheus-client==0.17.1
- prompt-toolkit==3.0.36
- ptyprocess==0.7.0
- py==1.11.0
- pycparser==2.21
- pyenchant==3.2.2
- pygments==2.14.0
- pyparsing==3.1.4
- pyrsistent==0.18.0
- pytest==7.0.1
- pytest-cov==4.0.0
- pytest-rerunfailures==10.3
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyzmq==25.1.2
- requests==2.27.1
- selenium==3.141.0
- send2trash==1.8.3
- setuptools==59.6.0
- six==1.17.0
- sniffio==1.2.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-rtd-theme==2.0.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- sphinxcontrib-spelling==7.7.0
- sqlalchemy==1.4.54
- terminado==0.12.1
- testpath==0.6.0
- tomli==1.2.3
- tornado==6.1
- traitlets==4.3.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- wcwidth==0.2.13
- webencodings==0.5.1
- websocket-client==1.3.1
- widgetsnbextension==3.6.10
- zipp==3.6.0
prefix: /opt/conda/envs/nbgrader
| [
"nbgrader/tests/nbgraderformat/test_v1.py::test_set_points"
]
| []
| [
"nbgrader/tests/nbgraderformat/test_v1.py::test_set_false",
"nbgrader/tests/nbgraderformat/test_v1.py::test_remove_metadata",
"nbgrader/tests/nbgraderformat/test_v1.py::test_remove_points",
"nbgrader/tests/nbgraderformat/test_v1.py::test_extra_keys",
"nbgrader/tests/nbgraderformat/test_v1.py::test_schema_version"
]
| []
| BSD 3-Clause "New" or "Revised" License | 1,482 | [
"nbgrader/docs/source/contributor_guide/metadata.rst",
"nbgrader/nbextensions/create_assignment/main.js",
"nbgrader/nbgraderformat/v1.py"
]
| [
"nbgrader/docs/source/contributor_guide/metadata.rst",
"nbgrader/nbextensions/create_assignment/main.js",
"nbgrader/nbgraderformat/v1.py"
]
|
|
asottile__add-trailing-comma-28 | 3343fe9ba1b396342d27a73fafa88807b47fc254 | 2017-07-18 18:45:04 | 3343fe9ba1b396342d27a73fafa88807b47fc254 | diff --git a/add_trailing_comma.py b/add_trailing_comma.py
index 926f9de..2e71af5 100644
--- a/add_trailing_comma.py
+++ b/add_trailing_comma.py
@@ -325,6 +325,9 @@ def _fix_src(contents_text, py35_plus, py36_plus):
tokens = src_to_tokens(contents_text)
for i, token in _changing_list(tokens):
+ # DEDENT is a zero length token
+ if not token.src:
+ continue
key = Offset(token.line, token.utf8_byte_offset)
fixes = []
| AssertionError on valid syntax
```python
with a:
pass
[b] = {1}
```
produces this error with add-trailing-comma version 0.5.1:
```
Traceback (most recent call last):
File "/nail/home/ckuehl/.pre-commit/repo3dPrz7/py_env-python2.7/bin/add-trailing-comma", line 9, in <module>
load_entry_point('add-trailing-comma==0.5.1', 'console_scripts', 'add-trailing-comma')()
File "/nail/home/ckuehl/.pre-commit/repo3dPrz7/py_env-python2.7/local/lib/python2.7/site-packages/add_trailing_comma.py", line 378, in main
ret |= fix_file(filename, args)
File "/nail/home/ckuehl/.pre-commit/repo3dPrz7/py_env-python2.7/local/lib/python2.7/site-packages/add_trailing_comma.py", line 358, in fix_file
contents_text = _fix_src(contents_text, args.py35_plus)
File "/nail/home/ckuehl/.pre-commit/repo3dPrz7/py_env-python2.7/local/lib/python2.7/site-packages/add_trailing_comma.py", line 333, in _fix_src
fixes.append((True, _find_simple(i, tokens)))
File "/nail/home/ckuehl/.pre-commit/repo3dPrz7/py_env-python2.7/local/lib/python2.7/site-packages/add_trailing_comma.py", line 154, in _find_simple
raise AssertionError('Past end?')
AssertionError: Past end
``` | asottile/add-trailing-comma | diff --git a/tests/add_trailing_comma_test.py b/tests/add_trailing_comma_test.py
index 82e51eb..a370743 100644
--- a/tests/add_trailing_comma_test.py
+++ b/tests/add_trailing_comma_test.py
@@ -149,9 +149,13 @@ def test_fixes_calls(src, expected):
'[1, 2, 3, 4]',
'{1, 2, 3, 4}',
'{1: 2, 3: 4}',
+ # Regression test for #26
+ 'if True:\n'
+ ' pass\n'
+ '[x] = {y}',
),
)
-def test_noop_one_line_literals(src):
+def test_noop_literals(src):
assert _fix_src(src, py35_plus=False, py36_plus=False) == src
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 0.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": null,
"pre_install": null,
"python": "3.6",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/asottile/add-trailing-comma.git@3343fe9ba1b396342d27a73fafa88807b47fc254#egg=add_trailing_comma
attrs==22.2.0
certifi==2021.5.30
cfgv==3.3.1
coverage==6.2
distlib==0.3.9
filelock==3.4.1
flake8==5.0.4
identify==2.4.4
importlib-metadata==4.2.0
importlib-resources==5.2.3
iniconfig==1.1.1
mccabe==0.7.0
nodeenv==1.6.0
packaging==21.3
platformdirs==2.4.0
pluggy==1.0.0
pre-commit==2.17.0
py==1.11.0
pycodestyle==2.9.1
pyflakes==2.5.0
pyparsing==3.1.4
pytest==7.0.1
PyYAML==6.0.1
tokenize-rt==4.2.1
toml==0.10.2
tomli==1.2.3
typing_extensions==4.1.1
virtualenv==20.16.2
zipp==3.6.0
| name: add-trailing-comma
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- cfgv==3.3.1
- coverage==6.2
- distlib==0.3.9
- filelock==3.4.1
- flake8==5.0.4
- identify==2.4.4
- importlib-metadata==4.2.0
- importlib-resources==5.2.3
- iniconfig==1.1.1
- mccabe==0.7.0
- nodeenv==1.6.0
- packaging==21.3
- platformdirs==2.4.0
- pluggy==1.0.0
- pre-commit==2.17.0
- py==1.11.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
- pyparsing==3.1.4
- pytest==7.0.1
- pyyaml==6.0.1
- tokenize-rt==4.2.1
- toml==0.10.2
- tomli==1.2.3
- typing-extensions==4.1.1
- virtualenv==20.16.2
- zipp==3.6.0
prefix: /opt/conda/envs/add-trailing-comma
| [
"tests/add_trailing_comma_test.py::test_noop_literals[if"
]
| []
| [
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x(1)]",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[tuple(\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x(\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[x((\\n",
"tests/add_trailing_comma_test.py::test_fix_calls_noops[(\\n",
"tests/add_trailing_comma_test.py::test_ignores_invalid_ast_node",
"tests/add_trailing_comma_test.py::test_py35_plus_rewrite",
"tests/add_trailing_comma_test.py::test_fixes_calls[x(\\n",
"tests/add_trailing_comma_test.py::test_fixes_calls[foo()(\\n",
"tests/add_trailing_comma_test.py::test_fixes_calls[x({}).y(\\n",
"tests/add_trailing_comma_test.py::test_fixes_calls[(\\n",
"tests/add_trailing_comma_test.py::test_noop_literals[(1,",
"tests/add_trailing_comma_test.py::test_noop_literals[[1,",
"tests/add_trailing_comma_test.py::test_noop_literals[{1,",
"tests/add_trailing_comma_test.py::test_noop_literals[{1:",
"tests/add_trailing_comma_test.py::test_fixes_literals[x",
"tests/add_trailing_comma_test.py::test_fixes_py35_plus_literals[x",
"tests/add_trailing_comma_test.py::test_noop_tuple_literal_without_braces",
"tests/add_trailing_comma_test.py::test_noop_function_defs[def",
"tests/add_trailing_comma_test.py::test_fixes_defs[def",
"tests/add_trailing_comma_test.py::test_fixes_defs_py36_plus[def",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f(x,",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f(\\n",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f((\\n",
"tests/add_trailing_comma_test.py::test_noop_unhugs[f([\\n",
"tests/add_trailing_comma_test.py::test_noop_unhugs[textwrap.dedent(\"\"\"\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(a,\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[def",
"tests/add_trailing_comma_test.py::test_fix_unhugs[with",
"tests/add_trailing_comma_test.py::test_fix_unhugs[if",
"tests/add_trailing_comma_test.py::test_fix_unhugs[{'foo':",
"tests/add_trailing_comma_test.py::test_fix_unhugs[f(g(\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs[{\"foo\":",
"tests/add_trailing_comma_test.py::test_fix_unhugs[x",
"tests/add_trailing_comma_test.py::test_fix_unhugs[x(\"foo\",",
"tests/add_trailing_comma_test.py::test_fix_unhugs[x(\"foo\"\\n",
"tests/add_trailing_comma_test.py::test_fix_unhugs_py3_only[def",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[[]]",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[x",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[y",
"tests/add_trailing_comma_test.py::test_noop_trailing_brace[foo.\\\\\\n",
"tests/add_trailing_comma_test.py::test_fix_trailing_brace[x",
"tests/add_trailing_comma_test.py::test_main_trivial",
"tests/add_trailing_comma_test.py::test_main_noop",
"tests/add_trailing_comma_test.py::test_main_changes_a_file",
"tests/add_trailing_comma_test.py::test_main_syntax_error",
"tests/add_trailing_comma_test.py::test_main_non_utf8_bytes",
"tests/add_trailing_comma_test.py::test_main_py35_plus_argument_star_args",
"tests/add_trailing_comma_test.py::test_main_py35_plus_argument_star_star_kwargs",
"tests/add_trailing_comma_test.py::test_main_py36_plus_implies_py35_plus",
"tests/add_trailing_comma_test.py::test_main_py36_plus_function_trailing_commas"
]
| []
| MIT License | 1,483 | [
"add_trailing_comma.py"
]
| [
"add_trailing_comma.py"
]
|
|
vertexproject__synapse-349 | 129e058c323f9ff0e1d130c0fa47bdfd423f6515 | 2017-07-18 20:33:45 | 6f5fc661a88b8cc3f4befb2c9c7ddcebf0b89ba0 | diff --git a/synapse/models/files.py b/synapse/models/files.py
index 63916defd..1dcf82c57 100644
--- a/synapse/models/files.py
+++ b/synapse/models/files.py
@@ -87,6 +87,8 @@ class FileMod(CoreModule):
Hashes that we consider "cardinal enough" to pivot.
'''
name = prop.rsplit(':', 1)[-1]
+ # Normalize the valu before we go any further
+ valu, _ = self.core.getPropNorm(prop, valu)
props[name] = valu
# FIXME could we update additional hashes here and
@@ -107,10 +109,12 @@ class FileMod(CoreModule):
return tufo
def seedFileMd5(self, prop, valu, **props):
+ valu, _ = self.core.getPropNorm('file:bytes:md5', valu)
props['md5'] = valu
return self.core.formTufoByProp('file:bytes', valu, **props)
def seedFileSha1(self, prop, valu, **props):
+ valu, _ = self.core.getPropNorm('file:bytes:sha1', valu)
props['sha1'] = valu
valu = guid(valu)
return self.core.formTufoByProp('file:bytes', valu, **props)
| Subsystem that parses ingest.json files calculates different file:bytes GUID (superhash) based on capitalization of hash value
Identified during testing for #334 .
Synapse calculates a GUID as a primary property for a file:bytes node based on the available hash value(s) for the bytes. If the bytes are present, all hashes are used; if we only have a hash and no bytes, the available hash is used.
The GUID should be calculated the same way regardless of whether the hash contains upper case or lower case alpha characters. This is true when adding nodes via Storm:
```
cli> ask [file:bytes:sha256=6ACC29BFC5F8F772FA7AAF4A705F91CB68DC88CB22F4EF5101281DC42109A104]
file:bytes = 8a249054e1c0e455657867a817aa9fcc
(1 results)
cli> ask [file:bytes:sha256=6acc29bfc5f8f772fa7aaf4a705f91cb68dc88cb22f4ef5101281dc42109a104]
file:bytes = 8a249054e1c0e455657867a817aa9fcc
(1 results)
```
However, this is *not* the case when adding data via an ingest.json file.
1. Create two test ingest.json files, each adds only a single node based on file:bytes:sha256.
2. In one file (test1.json), create the SHA256 with upper case alpha characters.
3. In the other file (test2.json) create the SHA256 with lower case alpha characters.
Result:
A. Upper case hash:
```
py3 -m synapse.tools.ingest --verbose ~/research/test1.json --sync <cortex>
add: file:bytes=8a249054e1c0e455657867a817aa9fcc
:mime = ??
:sha256 = 6acc29bfc5f8f772fa7aaf4a705f91cb68dc88cb22f4ef5101281dc42109a104
add: hash:sha256=6acc29bfc5f8f772fa7aaf4a705f91cb68dc88cb22f4ef5101281dc42109a104
ingest took: 0.0028884410858154297 sec
```
B. Lower case hash:
```
py3 -m synapse.tools.ingest --verbose ~/research/test2.json --sync <cortex>
add: file:bytes=ed73917b1dc4011627f7a101ace491c8
:mime = ??
:sha256 = 6acc29bfc5f8f772fa7aaf4a705f91cb68dc88cb22f4ef5101281dc42109a104
add: hash:sha256=6acc29bfc5f8f772fa7aaf4a705f91cb68dc88cb22f4ef5101281dc42109a104
ingest took: 0.0036017894744873047 sec
```
Note that it appears that the upper-case hash calculates the "correct" GUID (GUID value matches the one generated via Storm), but the lower-case hash does not.
| vertexproject/synapse | diff --git a/synapse/tests/test_model_files.py b/synapse/tests/test_model_files.py
index 0b917710e..f42e5edb1 100644
--- a/synapse/tests/test_model_files.py
+++ b/synapse/tests/test_model_files.py
@@ -1,11 +1,11 @@
from __future__ import absolute_import, unicode_literals
import synapse.axon as s_axon
-import synapse.compat as s_compat
-import synapse.cortex as s_cortex
import synapse.daemon as s_daemon
import synapse.telepath as s_telepath
+import synapse.lib.tufo as s_tufo
+
from synapse.tests.common import *
class FileModelTest(SynTest):
@@ -44,6 +44,22 @@ class FileModelTest(SynTest):
self.ne(t0[0], core.formTufoByProp('file:bytes:sha1', props.get('sha1'))[0])
self.ne(t0[0], core.formTufoByProp('file:bytes:md5', props.get('md5'))[0])
+ def test_model_file_seeds_capitalization(self):
+ fhash = '6ACC29BFC5F8F772FA7AAF4A705F91CB68DC88CB22F4EF5101281DC42109A104'
+ fhash_lower = fhash.lower()
+ stable_guid = 'ed73917b1dc4011627f7a101ace491c8'
+
+ with s_cortex.openurl('ram:///') as core:
+
+ n1 = core.formTufoByProp('file:bytes:sha256', fhash)
+ n2 = core.formTufoByProp('file:bytes:sha256', fhash_lower)
+ # Sha256 should be lowercase since the prop type is lowercased
+ n1def = s_tufo.ndef(n1)
+ n2def = s_tufo.ndef(n2)
+ self.eq(n1def[1], stable_guid)
+ self.eq(n2def[1], stable_guid)
+ self.eq(n1[0], n2[0])
+
def test_filepath(self):
with s_cortex.openurl('ram:///') as core:
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_issue_reference"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 1
} | 0.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
cffi==1.15.1
cryptography==40.0.2
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
lmdb==1.6.2
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
msgpack-python==0.5.6
nose==1.3.7
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pycparser==2.21
pyOpenSSL==23.2.0
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
-e git+https://github.com/vertexproject/synapse.git@129e058c323f9ff0e1d130c0fa47bdfd423f6515#egg=synapse
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tornado==6.1
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
xxhash==3.2.0
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: synapse
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- cffi==1.15.1
- cryptography==40.0.2
- lmdb==1.6.2
- msgpack-python==0.5.6
- nose==1.3.7
- pycparser==2.21
- pyopenssl==23.2.0
- tornado==6.1
- xxhash==3.2.0
prefix: /opt/conda/envs/synapse
| [
"synapse/tests/test_model_files.py::FileModelTest::test_model_file_seeds_capitalization"
]
| []
| [
"synapse/tests/test_model_files.py::FileModelTest::test_filebase",
"synapse/tests/test_model_files.py::FileModelTest::test_filepath",
"synapse/tests/test_model_files.py::FileModelTest::test_model_file_bytes",
"synapse/tests/test_model_files.py::FileModelTest::test_model_file_bytes_axon",
"synapse/tests/test_model_files.py::FileModelTest::test_model_file_seeds",
"synapse/tests/test_model_files.py::FileModelTest::test_model_files_imgof",
"synapse/tests/test_model_files.py::FileModelTest::test_model_files_txtref"
]
| []
| Apache License 2.0 | 1,484 | [
"synapse/models/files.py"
]
| [
"synapse/models/files.py"
]
|
|
vertexproject__synapse-350 | f069d21504fad7c0acb6ac96893400b899f9589c | 2017-07-18 21:14:41 | 6f5fc661a88b8cc3f4befb2c9c7ddcebf0b89ba0 | diff --git a/synapse/lib/storm.py b/synapse/lib/storm.py
index f462aa012..a1dfde286 100644
--- a/synapse/lib/storm.py
+++ b/synapse/lib/storm.py
@@ -406,6 +406,7 @@ class Runtime(Configable):
self.setOperFunc('load', self._stormOperLoad)
self.setOperFunc('clear', self._stormOperClear)
+ self.setOperFunc('guid', self._stormOperGuid)
self.setOperFunc('join', self._stormOperJoin)
self.setOperFunc('lift', self._stormOperLift)
self.setOperFunc('refs', self._stormOperRefs)
@@ -1101,7 +1102,7 @@ class Runtime(Configable):
core = self.getStormCore()
props = {}
- for k,v in kwlist:
+ for k, v in kwlist:
if not k[0] == ':':
raise s_common.BadSyntaxError(mesg='addnode() expects relative props with : prefix')
@@ -1319,3 +1320,9 @@ class Runtime(Configable):
[query.add(n) for n in nodes]
return
+
+ def _stormOperGuid(self, query, oper):
+ args = oper[1].get('args')
+ core = self.getStormCore()
+
+ [query.add(node) for node in core.getTufosByIdens(args)]
| Ability to lift by tufo guid in storm
thesilence [12:29 PM]
There may be cases when working at the CLI where "lift by GUID" may be useful, mostly from a copypasta standpoint / repr limitations.
thesilence [12:30 PM]
I haven't run across this specifically, but (again, "theoretically") there may be node type(s) where properties are too lengthy / complex to type or copy, so the easiest thing to copypasta would be the GUID.
epiphyte
[12:30 PM]
yeah - and there is also the use-case @7.389 mentioned for sharing
epiphyte
[12:31 PM]
"Hey guys, checkout 'iden=12345 totags()' - its a real doozy" | vertexproject/synapse | diff --git a/synapse/tests/test_lib_storm.py b/synapse/tests/test_lib_storm.py
index 3f6b77989..07a5e269e 100644
--- a/synapse/tests/test_lib_storm.py
+++ b/synapse/tests/test_lib_storm.py
@@ -563,6 +563,29 @@ class StormTest(SynTest):
['1.2.3.4', 'vv', '#foo.bar'],
])
+ def test_storm_guid(self):
+
+ with s_cortex.openurl('ram:///') as core:
+
+ node0 = core.formTufoByProp('inet:ipv4', '1.2.3.4')
+ node1 = core.formTufoByProp('inet:ipv4', '4.5.6.7')
+
+ nodes = core.eval('guid()')
+ self.eq(len(nodes), 0)
+
+ nodes = core.eval('guid(%s)' % node0[0])
+ self.eq(nodes[0][1].get('inet:ipv4'), 0x01020304)
+
+ nodes = core.eval('guid(%s,%s)' % (node0[0], node1[0]))
+ vals = list(sorted(v[1].get('inet:ipv4') for v in nodes))
+ self.eq(vals, [0x01020304, 0x04050607])
+
+ # We can lift dark rows using guid() but kind of an easter egg.
+ core.addTufoDark(node0, 'foo:bar', 'duck:knight')
+ nodes = core.eval('guid(%s)' % node0[0][::-1])
+ self.eq(len(nodes), 1)
+ self.eq(node0[0], nodes[0][0][::-1])
+
class LimitTest(SynTest):
def test_limit_default(self):
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 0,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 1
} | 0.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"nose",
"coverage",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y build-essential libffi-dev libssl-dev python3 python3-dev python3-pip python3-setuptools"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
cffi==1.15.1
coverage==6.2
cryptography==40.0.2
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
lmdb==1.6.2
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
msgpack-python==0.5.6
nose==1.3.7
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pycparser==2.21
pyOpenSSL==23.2.0
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
-e git+https://github.com/vertexproject/synapse.git@f069d21504fad7c0acb6ac96893400b899f9589c#egg=synapse
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tornado==6.1
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
xxhash==3.2.0
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: synapse
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- cffi==1.15.1
- coverage==6.2
- cryptography==40.0.2
- lmdb==1.6.2
- msgpack-python==0.5.6
- nose==1.3.7
- pycparser==2.21
- pyopenssl==23.2.0
- tornado==6.1
- xxhash==3.2.0
prefix: /opt/conda/envs/synapse
| [
"synapse/tests/test_lib_storm.py::StormTest::test_storm_guid"
]
| []
| [
"synapse/tests/test_lib_storm.py::StormTest::test_storm_addnode",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_addtag",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_alltag",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_cmpr_norm",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_delnode",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_delnode_caching",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_deltag",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_edit_end",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_editmode",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_filt_regex",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_lift",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_lifts_by",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_pivot",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_refs",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_setprop",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_show_help",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_tag_fromtag",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_tag_glob",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_tag_ival",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_tag_jointag",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_tag_query",
"synapse/tests/test_lib_storm.py::StormTest::test_storm_tag_totag",
"synapse/tests/test_lib_storm.py::LimitTest::test_limit_behavior",
"synapse/tests/test_lib_storm.py::LimitTest::test_limit_behavior_negatives",
"synapse/tests/test_lib_storm.py::LimitTest::test_limit_default"
]
| []
| Apache License 2.0 | 1,485 | [
"synapse/lib/storm.py"
]
| [
"synapse/lib/storm.py"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.