repo
stringclasses 856
values | pull_number
int64 3
127k
| instance_id
stringlengths 12
58
| issue_numbers
sequencelengths 1
5
| base_commit
stringlengths 40
40
| patch
stringlengths 67
1.54M
| test_patch
stringlengths 0
107M
| problem_statement
stringlengths 3
307k
| hints_text
stringlengths 0
908k
| created_at
timestamp[s] |
---|---|---|---|---|---|---|---|---|---|
scikit-hep/pyhf | 1,610 | scikit-hep__pyhf-1610 | [
"1426"
] | 02b195158d2e3fe25aec17f72ef3c28fd2af176d | diff --git a/src/pyhf/infer/calculators.py b/src/pyhf/infer/calculators.py
--- a/src/pyhf/infer/calculators.py
+++ b/src/pyhf/infer/calculators.py
@@ -667,7 +667,16 @@ def __init__(
def distributions(self, poi_test, track_progress=None):
"""
- Probability Distributions of the test statistic value under the signal + background and background-only hypothesis.
+ Probability distributions of the test statistic value under the signal + background and background-only hypotheses.
+
+ These distributions are produced by generating pseudo-data ("toys")
+ with the nuisance parameters set to their conditional maximum likelihood
+ estimators at the corresponding value of the parameter of interest for
+ each hypothesis, following the joint recommendations of the ATLAS and CMS
+ experiments in |LHC Higgs search combination procedure|_.
+
+ .. _LHC Higgs search combination procedure: https://inspirehep.net/literature/1196797
+ .. |LHC Higgs search combination procedure| replace:: *Procedure for the LHC Higgs boson search combination in Summer 2011*
Example:
@@ -686,7 +695,7 @@ def distributions(self, poi_test, track_progress=None):
... )
>>> sig_plus_bkg_dist, bkg_dist = toy_calculator.distributions(mu_test)
>>> sig_plus_bkg_dist.pvalue(mu_test), bkg_dist.pvalue(mu_test)
- (array(0.14), array(0.76))
+ (array(0.14), array(0.79))
Args:
poi_test (:obj:`float` or :obj:`tensor`): The value for the parameter of interest.
@@ -699,14 +708,26 @@ def distributions(self, poi_test, track_progress=None):
tensorlib, _ = get_backend()
sample_shape = (self.ntoys,)
- signal_pars = self.pdf.config.suggested_init()
- signal_pars[self.pdf.config.poi_index] = poi_test
- signal_pdf = self.pdf.make_pdf(tensorlib.astensor(signal_pars))
+ signal_pars = fixed_poi_fit(
+ poi_test,
+ self.data,
+ self.pdf,
+ self.init_pars,
+ self.par_bounds,
+ self.fixed_params,
+ )
+ signal_pdf = self.pdf.make_pdf(signal_pars)
signal_sample = signal_pdf.sample(sample_shape)
- bkg_pars = self.pdf.config.suggested_init()
- bkg_pars[self.pdf.config.poi_index] = 1.0 if self.test_stat == 'q0' else 0.0
- bkg_pdf = self.pdf.make_pdf(tensorlib.astensor(bkg_pars))
+ bkg_pars = fixed_poi_fit(
+ 1.0 if self.test_stat == 'q0' else 0.0,
+ self.data,
+ self.pdf,
+ self.init_pars,
+ self.par_bounds,
+ self.fixed_params,
+ )
+ bkg_pdf = self.pdf.make_pdf(bkg_pars)
bkg_sample = bkg_pdf.sample(sample_shape)
teststat_func = utils.get_test_stat(self.test_stat)
@@ -774,7 +795,7 @@ def pvalues(self, teststat, sig_plus_bkg_distribution, bkg_only_distribution):
>>> sig_plus_bkg_dist, bkg_dist = toy_calculator.distributions(mu_test)
>>> CLsb, CLb, CLs = toy_calculator.pvalues(q_tilde, sig_plus_bkg_dist, bkg_dist)
>>> CLsb, CLb, CLs
- (array(0.01), array(0.41), array(0.02439024))
+ (array(0.03), array(0.37), array(0.08108108))
Args:
teststat (:obj:`tensor`): The test statistic.
@@ -820,7 +841,7 @@ def expected_pvalues(self, sig_plus_bkg_distribution, bkg_only_distribution):
>>> sig_plus_bkg_dist, bkg_dist = toy_calculator.distributions(mu_test)
>>> CLsb_exp_band, CLb_exp_band, CLs_exp_band = toy_calculator.expected_pvalues(sig_plus_bkg_dist, bkg_dist)
>>> CLs_exp_band
- [array(0.), array(0.), array(0.06186224), array(0.28450033), array(1.)]
+ [array(0.), array(0.), array(0.08403955), array(0.21892596), array(0.86072977)]
Args:
sig_plus_bkg_distribution (~pyhf.infer.calculators.EmpiricalDistribution):
diff --git a/src/pyhf/infer/utils.py b/src/pyhf/infer/utils.py
--- a/src/pyhf/infer/utils.py
+++ b/src/pyhf/infer/utils.py
@@ -57,7 +57,7 @@ def create_calculator(calctype, *args, **kwargs):
... )
>>> qmu_sig, qmu_bkg = toy_calculator.distributions(mu_test)
>>> qmu_sig.pvalue(mu_test), qmu_bkg.pvalue(mu_test)
- (array(0.14), array(0.76))
+ (array(0.14), array(0.79))
Args:
calctype (:obj:`str`): The calculator to create. Choose either
| diff --git a/tests/test_infer.py b/tests/test_infer.py
--- a/tests/test_infer.py
+++ b/tests/test_infer.py
@@ -365,30 +365,30 @@ def test_toy_calculator(tmpdir, hypotest_args):
assert qtilde_mu_sig.samples.tolist() == pytest.approx(
[
0.0,
- 0.13298492825293806,
- 0.0,
- 0.7718560148925349,
- 1.814884694401428,
+ 0.017350013494649374,
0.0,
+ 0.2338008822475217,
+ 0.020328779776718875,
+ 0.8911134903562186,
+ 0.04408274703718007,
0.0,
+ 0.03977591672014569,
0.0,
- 0.0,
- 0.06586643485326249,
],
1e-07,
)
assert qtilde_mu_bkg.samples.tolist() == pytest.approx(
[
- 2.2664625749100082,
- 1.081660887453154,
- 2.7570218408936853,
- 1.3835691388297846,
- 0.4707467005909507,
- 0.0,
- 3.7166483705294127,
- 3.8021896732709592,
- 5.114135391143066,
- 1.3511153731000718,
+ 5.642956861215396,
+ 0.37581364290284114,
+ 4.875367689039649,
+ 3.4299006094989295,
+ 1.0161021805475343,
+ 0.03345317321810626,
+ 0.21984803001140563,
+ 1.274869119189077,
+ 9.368264062021098,
+ 3.0716486684082156,
],
1e-07,
)
diff --git a/tests/test_validation.py b/tests/test_validation.py
--- a/tests/test_validation.py
+++ b/tests/test_validation.py
@@ -125,8 +125,8 @@ def expected_result_1bin_shapesys_q0():
@pytest.fixture(scope='module')
def expected_result_1bin_shapesys_q0_toys():
expected_result = {
- "exp": [0.0, 0.0005, 0.0145, 0.1205, 0.402761],
- "obs": 0.005,
+ "exp": [0.0, 0.0, 0.0135, 0.1365, 0.39854497],
+ "obs": 0.004,
}
return expected_result
| Option to change the NP values for toys
# Description
Hi all! I'm trying to run some toys (thank you so much for implementing it by the way!) and I was wondering if it would be possible to let the user choose to use what values the nuisance parameter are fixed to for the sampling part of the toy generation. I'm referring to
https://github.com/scikit-hep/pyhf/blob/681b806919603dd47a7209f1127abaa5484a2eef/src/pyhf/infer/calculators.py#L702
This is motivated by the fact that I was looking through the ATLAS frequentist recommendations [here](https://indico.cern.ch/event/126652/contributions/1343592/attachments/80222/115004/Frequentist_Limit_Recommendation.pdf) and I wanted to test if there was any difference using the conditional MLE values instead of the initial expected NP values. Would it be possible to implement this as an option for the user?
Thanks to Giordon for some discussions and clarification on this issue.
| > This is motivated by the fact that I was looking through the ATLAS frequentist recommendations [here](https://indico.cern.ch/event/126652/contributions/1343592/attachments/80222/115004/Frequentist_Limit_Recommendation.pdf) and I wanted to test if there was any difference using the conditional MLE values instead of the initial expected NP values. Would it be possible to implement this as an option for the user?
The decision to perform [the test statistic calculations](https://scikit-hep.org/pyhf/api.html#test-statistics) using model parameters determined from the expectation or a fit of the model to data is ultimately that — a decision / choice. At the moment this is abstracted away to an implementation decision, but I think it seems reasonable to expose this choice through a kwarg. @lukasheinrich @kratsg you'd agree?
I'm going to argue that this isn't a choice and that the current implementation is just wrong. In other words, that this is a bug.
`Model.config.suggested_init()` is not an expectation in any sense; if it happens to align with expected values, this is by coincidence in the particular model. The initial parameter values don't have any intrinsic special significance, and can be effectively nonsense in the case of many data-driven backgrounds. The only conditions for the initial parameters is that they need to be within the allowed range and that they are sufficient to converge in a minimum NLL fit. Even in the standard RooStats implementation for toys, [`FrequentistCalculator`](https://root.cern.ch/doc/master/classRooStats_1_1FrequentistCalculator.html), it's clearly stated that "the nuisance parameters are fixed to their MLEs." If you want to get pre-fit expected values, you should be passing in `Model.expected_data(pars)` with the set of "expected" parameter values. Otherwise, by already having data to pass to `ToyCalculator`, you are implicitly constraining the nuisance parameters, so that constraint should be taken into account.
Perhaps most importantly, without setting the NPs to their MLEs, the results from `ToyCalculator` can be completely different from the results of `AsymptoticCalculator` even in the large statistics limit, which seems contradictory to the idea of an asymptotic approximation... By analogy to the asymptotic case, this would be like not using the Asimov data set to get the expected test statistic and instead generating `Model.expected_data()` with the initial parameters. | 2021-09-24T23:09:45 |
scikit-hep/pyhf | 1,613 | scikit-hep__pyhf-1613 | [
"1336"
] | 54b44adfbc0d942dda9f1cd4726a85d25b5ef384 | diff --git a/src/pyhf/infer/intervals.py b/src/pyhf/infer/intervals.py
--- a/src/pyhf/infer/intervals.py
+++ b/src/pyhf/infer/intervals.py
@@ -15,7 +15,7 @@ def _interp(x, xp, fp):
return tb.astensor(np.interp(x, xp.tolist(), fp.tolist()))
-def upperlimit(data, model, scan, level=0.05, return_results=False):
+def upperlimit(data, model, scan, level=0.05, return_results=False, **hypotest_kwargs):
"""
Calculate an upper limit interval ``(0, poi_up)`` for a single
Parameter of Interest (POI) using a fixed scan through POI-space.
@@ -44,6 +44,8 @@ def upperlimit(data, model, scan, level=0.05, return_results=False):
scan (:obj:`iterable`): Iterable of POI values.
level (:obj:`float`): The threshold value to evaluate the interpolated results at.
return_results (:obj:`bool`): Whether to return the per-point results.
+ hypotest_kwargs (:obj:`string`): Kwargs for the calls to
+ :class:`~pyhf.infer.hypotest` to configure the fits.
Returns:
Tuple of Tensors:
@@ -56,7 +58,7 @@ def upperlimit(data, model, scan, level=0.05, return_results=False):
"""
tb, _ = get_backend()
results = [
- hypotest(mu, data, model, test_stat="qtilde", return_expected_set=True)
+ hypotest(mu, data, model, return_expected_set=True, **hypotest_kwargs)
for mu in scan
]
obs = tb.astensor([[r[0]] for r in results])
| diff --git a/tests/test_infer.py b/tests/test_infer.py
--- a/tests/test_infer.py
+++ b/tests/test_infer.py
@@ -34,6 +34,22 @@ def test_upperlimit(tmpdir, hypotest_args):
)
+def test_upperlimit_with_kwargs(tmpdir, hypotest_args):
+ """
+ Check that the default return structure of pyhf.infer.hypotest is as expected
+ """
+ _, data, model = hypotest_args
+ results = pyhf.infer.intervals.upperlimit(
+ data, model, scan=np.linspace(0, 5, 11), test_stat="qtilde"
+ )
+ assert len(results) == 2
+ observed_limit, expected_limits = results
+ assert observed_limit == pytest.approx(1.0262704738584554)
+ assert expected_limits == pytest.approx(
+ [0.65765653, 0.87999725, 1.12453992, 1.50243428, 2.09232927]
+ )
+
+
def test_mle_fit_default(tmpdir, hypotest_args):
"""
Check that the default return structure of pyhf.infer.mle.fit is as expected
| pass calculator options through `pyhf.infer.upperlimit` (toys)
# Description
Currently there's no easy way to pass custom options to upperlimit so it'll always acll asymptotics
| @lukasheinrich I think this is a duplicate of Issue #1326, so I'm going to close in favor of that Issue. If you meant something more general feel free to reopen.
This actually hits on toys vs. asymptotics, which is slightly different. A PR could hit both this and Issue #1326. | 2021-09-30T08:20:57 |
scikit-hep/pyhf | 1,615 | scikit-hep__pyhf-1615 | [
"969"
] | a8d34462b762172ef6fe794708847de755d5e80d | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -65,6 +65,7 @@ def setup(app):
"bib/use_citations.bib",
"bib/general_citations.bib",
]
+bibtex_default_style = "unsrt"
# external links
xref_links = {"arXiv:1007.1727": ("[1007.1727]", "https://arxiv.org/abs/1007.1727")}
| docs: Enable chronological ordering of talks and tutorials on website
# Description
At the moment, the [docs webpage](https://scikit-hep.org/pyhf/outreach.html) does not order the talks and tutorials given in reverse chronological order. This makes it hard for someone new to find the most recent and relevant information on the page. If the page can be rendered in reverse chronological order that would be very useful.
| Surprisingly non-trivial: https://sphinxcontrib-bibtex.readthedocs.io/en/latest/usage.html#custom-formatting-sorting-and-labelling
Not enough documentation so one has to dig through source code or just trial-and-error. | 2021-10-01T08:34:34 |
|
scikit-hep/pyhf | 1,670 | scikit-hep__pyhf-1670 | [
"1658"
] | c3d0acb2591f34fc3bd169fb525fa1f6b4c8f369 | diff --git a/src/pyhf/simplemodels.py b/src/pyhf/simplemodels.py
--- a/src/pyhf/simplemodels.py
+++ b/src/pyhf/simplemodels.py
@@ -151,20 +151,3 @@ def _deprecated_api_warning(
DeprecationWarning,
stacklevel=3, # Raise to user level
)
-
-
-def hepdata_like(signal_data, bkg_data, bkg_uncerts, batch_size=None):
- """
- .. note:: Deprecated API: Use :func:`~pyhf.simplemodels.uncorrelated_background`
- instead.
-
- .. warning:: :func:`~pyhf.simplemodels.hepdata_like` will be removed in
- ``pyhf`` ``v0.7.0``.
- """
- _deprecated_api_warning(
- "pyhf.simplemodels.hepdata_like",
- "pyhf.simplemodels.uncorrelated_background",
- "0.6.2",
- "0.7.0",
- )
- return uncorrelated_background(signal_data, bkg_data, bkg_uncerts, batch_size)
| diff --git a/tests/test_simplemodels.py b/tests/test_simplemodels.py
--- a/tests/test_simplemodels.py
+++ b/tests/test_simplemodels.py
@@ -1,5 +1,3 @@
-import warnings
-
import pytest
import pyhf
@@ -76,17 +74,3 @@ def test_uncorrelated_background_default_backend(default_backend):
'uncorr_bkguncrt[1]',
]
assert model.config.suggested_init() == [1.0, 1.0, 1.0]
-
-
-# TODO: Remove when pyhf.simplemodels.hepdata_like is removed in pyhf v0.7.0
-def test_deprecated_apis():
- with warnings.catch_warnings(record=True) as _warning:
- # Cause all warnings to always be triggered
- warnings.simplefilter("always")
- pyhf.simplemodels.hepdata_like([12.0, 11.0], [50.0, 52.0], [3.0, 7.0])
- assert len(_warning) == 1
- assert issubclass(_warning[-1].category, DeprecationWarning)
- assert (
- "pyhf.simplemodels.hepdata_like is deprecated in favor of pyhf.simplemodels.uncorrelated_background"
- in str(_warning[-1].message)
- )
| Remove `pyhf.simplemodels.hepdata_like` from public API for v0.7.0
### Summary
[`pyhf.simplemodels.hepdata_like`](https://pyhf.readthedocs.io/en/v0.6.3/_generated/pyhf.simplemodels.hepdata_like.html#pyhf.simplemodels.hepdata_like) has been scheduled for removal from the public API in release `v0.7.0`. As `v0.7.0` will be the next release (and hopefully soon) this should get removed now.
### Additional Information
`pyhf.simplemodels.hepdata_like` has been deprecated since `v0.6.2`.
### Code of Conduct
- [X] I agree to follow the Code of Conduct
| @matthewfeickert , can I take this one?
> can I take this one?
Go for it. :+1: | 2021-10-25T09:57:53 |
scikit-hep/pyhf | 1,673 | scikit-hep__pyhf-1673 | [
"1491"
] | fbdce47f4aa8b9da60977480ac3f109c169346d1 | diff --git a/src/pyhf/contrib/utils.py b/src/pyhf/contrib/utils.py
--- a/src/pyhf/contrib/utils.py
+++ b/src/pyhf/contrib/utils.py
@@ -53,7 +53,18 @@ def download(archive_url, output_directory, force=False, compress=False):
+ "To download an archive from this host use the --force option."
)
- with requests.get(archive_url) as response:
+ # c.f. https://github.com/scikit-hep/pyhf/issues/1491
+ # > Use content negotiation at the landing page for the resource that
+ # > the DOI resolves to. DataCite content negotiation is forwarding all
+ # > requests with unknown content types to the URL registered in the
+ # > handle system.
+ # c.f. https://blog.datacite.org/changes-to-doi-content-negotiation/
+ # The HEPData landing page for the resource file can check if the Accept
+ # request HTTP header matches the content type of the resource file and
+ # return the content directly if so.
+ with requests.get(
+ archive_url, headers={"Accept": "application/x-tar"}
+ ) as response:
if compress:
with open(output_directory, "wb") as archive:
archive.write(response.content)
| Pass `Accept` header in `contrib.utils.download`
I'm copying a comment here that I made in the [HEPData Zulip chat](https://hepdata.zulipchat.com/#narrow/stream/226203-pyhf/topic/DOIs/near/213610306) on 16th October 2020.
Regarding the issue (HEPData/hepdata#162) to mint DOIs for all local resource files attached to a submission, if we do eventually get around to addressing it, we would probably redirect the DOI to a landing page for the resource file, rather than to the resource file itself (e.g. the pyhf tarball). This would follow the DataCite [Best Practices for DOI Landing Pages](https://support.datacite.org/docs/landing-pages), e.g. "[DOIs should resolve to a landing page, not directly to the content](https://support.datacite.org/docs/landing-pages#dois-should-resolve-to-a-landing-page-not-directly-to-the-content)", which I'm currently breaking for the two manually minted DOIs. In the issue (HEPdata/hepdata#162) I mentioned the possibility of using [DataCite Content Negotiation](https://support.datacite.org/docs/datacite-content-resolver) to redirect to the resource file itself, but the linked page now says "Custom content types are no longer supported since January 1st, 2020". I thought maybe content negotiation could be used to return the `.tar.gz` file directly, but the intended purpose is to retrieve DOI metadata in different formats, not to provide the content itself. In anticipation of possible future changes, I'd recommend that you use the URL directly rather than the DOI in pyhf download scripts and documentation (e.g. revert #1109).
| Thanks @GraemeWatt. This is important, so we're quite happy you're bringing this (back) to our attention.
So, from my understanding of what you've shown here, as the [recommendation from DataCite](https://support.datacite.org/docs/landing-pages) is that "DOIs should resolve to a landing page, not directly to the content" and that "The DOI should be appropriately tagged (so that machines can read it)" and "can retrieve additional information about the item that might not be easily retrievable from the item itself." But as you've said that there's no way to get access to the actual data products associated with that particular then I guess I'm not clear on what purpose the DOI has if it just is the metadata.
In the section [The landing page should provide a way to access the item](https://support.datacite.org/docs/landing-pages#the-landing-page-should-provide-a-way-to-access-the-item)
> Humans should be able to reach the item being described from the landing page. If the item has been removed, retracted, or otherwise made unavailable to the public on purpose, the landing page should serve as a "tombstone page", providing enough information that the item can be identified and confirmed to have existed at one time.
only makes explicit mention of humans as opposed to humans and machines. So does this mean that DOIs are becoming human use only and that accessing a data product associated with a DOI is necessarily a two step process (get the DOI and then from the DOI landing page the the data product download URL)?
I am perhaps missing something obvious about all of this. If so, if you have an explicit example that would be great to see.
Hey @mfenner - can you help here?
I think it should be possible to programatically query the DOI and get the location of the underlying object, then fetch it.
Is this correct? Is there any code available that demonstrates this?
Just wanted to follow up on this if @mfenner has time for input. Any thoughts here are appreciated!
Unfortunately DOIs routinely point to landing pages and not the content, as mentioned in the comments above. There are a number of reasons why this makes sense, e.g. access restrictions and different file formats, but that makes automated machine access very hard. A new DOI metadata field `contentURL` is therefore on the list of improvements planned for the next DataCite metadata schema, planned to be released in 12-18 months.
Metadata are specific to each DOI registration agency, so these things might work slightly differently for Crossref or any of the other DOI registration agencies.
If schema.org metadata are available (via the landing page), one can use the `contentURL` property of schema.org.
I've been investigating three options to directly return content (i.e. the `pyhf` tarball) from the DOI after we mint DOIs for local resource files with URLs directing to a landing page rather than the resource file itself (see HEPData/hepdata#162).
1. Following the suggestion of @mfenner, we could embed Schema.org metadata on the HEPData landing page for the resource file in JSON-LD format (see HEPData/hepdata#145) including a [`contentUrl`](https://schema.org/contentUrl) property. One problem is that doing `curl -LH "Accept: application/vnd.schemaorg.ld+json" https://doi.org/10.17182/hepdata.89408.v1/r2` or `curl -LH "Accept: application/ld+json" https://doi.org/10.17182/hepdata.89408.v1/r2` returns JSON-LD from DataCite (without `contentUrl`) using [DataCite Content Negotiation](https://support.datacite.org/docs/datacite-content-resolver) before getting to the HEPData server. I think we would need to introduce a custom metadata content type like `curl -LH "Accept: application/vnd.hepdata.ld+json" https://doi.org/10.17182/hepdata.89408.v1/r2` to return the JSON-LD from the HEPData landing page. The `pyhf` code would then parse the `contentUrl` and make the download in another request.
2. DataCite offers a [media API](https://support.datacite.org/reference/media) where custom content types can be registered and then later retrieved via a public [REST API](https://support.datacite.org/docs/api), although content negotiation is no longer supported. However, it should be possible to retrieve the metadata via, for example, https://api.datacite.org/dois/10.17182/hepdata.89408.v1/r2 and then parse the `media` to find the registered URL of the content for a specific media type like `application/x-tar`. I tried to test the DataCite media API by registering a custom content type for one DOI, but it doesn't seem to be working. I reported the problems I found to DataCite support, but I don't think the media API option is worth pursuing further.
3. A [2019 blog article](https://blog.datacite.org/changes-to-doi-content-negotiation/) by @mfenner mentions an alternative option to "_use content negotiation at the landing page for the resource that the DOI resolves to. DataCite content negotiation is forwarding all requests with unknown content types to the URL registered in the handle system._" This seems like the simplest option for the `pyhf` use case. The HEPData landing page for the resource file can check if the [`Accept`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Accept) request HTTP header matches the content type of the resource file and return the content directly if so, for example, `curl -LH "Accept: application/x-tar" https://doi.org/10.17182/hepdata.89408.v1/r2`. In the `pyhf` Python code, you'd just need to replace this line:
https://github.com/scikit-hep/pyhf/blob/260315d2930b38258ad4c0718b0274c9eca2e6d4/src/pyhf/contrib/utils.py#L56
with:
```python
with requests.get(archive_url, headers={'Accept': 'application/x-tar'}) as response:
```
Some other suggestions for improvements to this code:
* Check the `response.status_code` and return an error message if not OK.
* Use [`tarfile.is_tarfile`](https://docs.python.org/3/library/tarfile.html#tarfile.is_tarfile) to check that `response.content` is actually a tarball and return an error message if not.
* Remove `mode="r|gz"` or replace it with `mode="r"` or `mode="r:*"` for reading with transparent compression, so that the code works also with uncompressed tarballs (see #1111 and #1519), where the media type is still `application/x-tar`.
* Maybe add an option to download a zipfile instead of a tarball (see #1519), then you'd need `headers={'Accept': 'application/zip'}` in the request and [`zipfile.is_zipfile`](https://docs.python.org/3/library/zipfile.html#zipfile.is_zipfile) to check the response content. You could use the Python [`zipfile`](https://docs.python.org/3/library/zipfile.html) module to unpack, but maybe easier to use [`shutil.unpack_archive`](https://docs.python.org/3/library/shutil.html#shutil.unpack_archive) for both tarballs and zipfiles.
Making these changes should not break the functionality with the current situation (where https://doi.org/10.17182/hepdata.89408.v1/r2 returns the tarball directly). I'd therefore recommend you make them ASAP before the next `pyhf` release. After we redirect the DOI to the landing page, probably in the next few weeks, the DOI will return the HTML landing page instead of the tarball unless the request contains the `Accept: application/x-tar` header.
I agree with your analysis. The DataCite media API was deprecated as it doesn't really fit into the outlined model. And content negotiation for `application/ld+json` unfortunately triggers the DataCite content negotiation.
(Very nice analysis). One slight concern I have with this is that the HistFactory JSON should not be treated as the only kind of JSON-like item that would be uploaded to HEPData -- is this taking to account a way to request a particular item as such, or would this be downloading all JSON items in a record?
@kratsg, it seems you misunderstood, so let me try to clarify. Solutions 1. to 3. [above](https://github.com/scikit-hep/pyhf/issues/1491#issuecomment-951810848) are ways to download a resource file (e.g. a `pyhf` tarball) from HEPData given the DOI. Solution 2. doesn't work, so you should concentrate on solution 3. which is simpler than solution 1.
The [Schema.org](https://schema.org) [JSON-LD](https://json-ld.org) referred to in solution 1. is a way of embedding metadata in a web page so it can be indexed by search engines (see "[Understand how structured data works](https://developers.google.com/search/docs/advanced/structured-data/intro-structured-data)" from Google). This has **nothing to do** with the `pyhf` JSON format (apart from obviously being JSON-based)! Until we had solution 3., the proposal in solution 1. was that we could add a field `contentUrl` to the embedded metadata, then you could retrieve the JSON-LD from the landing page for the resource file to find the download link given the DOI. But you don't need to worry about this now that solution 3. has been developed. We'll still make solution 1. available as it might be helpful for other use cases, it enables indexing by search engines, and it is an [open issue from 2018](https://github.com/HEPData/hepdata/issues/145) to upgrade to JSON-LD from the older [Microdata](https://en.wikipedia.org/wiki/Microdata_(HTML)) format currently embedded in HEPData web pages.
> 3. A [2019 blog article](https://blog.datacite.org/changes-to-doi-content-negotiation/) by @mfenner mentions an alternative option to "_use content negotiation at the landing page for the resource that the DOI resolves to. DataCite content negotiation is forwarding all requests with unknown content types to the URL registered in the handle system._" This seems like the simplest option for the `pyhf` use case. The HEPData landing page for the resource file can check if the [`Accept`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Accept) request HTTP header matches the content type of the resource file and return the content directly if so, for example, `curl -LH "Accept: application/x-tar" https://doi.org/10.17182/hepdata.89408.v1/r2`. In the `pyhf` Python code, you'd just need to replace this line:
> https://github.com/scikit-hep/pyhf/blob/260315d2930b38258ad4c0718b0274c9eca2e6d4/src/pyhf/contrib/utils.py#L56
>
> with:
>
> ```python
> with requests.get(archive_url, headers={'Accept': 'application/x-tar'}) as response:
> ```
Thanks for this excellent analysis and summary @GraemeWatt — truly appreciated! :rocket: I'll get this in right away and then we can make additional improvements.
> Some other suggestions for improvements to this code:
>
> * Check the `response.status_code` and return an error message if not OK.
> * Use [`tarfile.is_tarfile`](https://docs.python.org/3/library/tarfile.html#tarfile.is_tarfile) to check that `response.content` is actually a tarball and return an error message if not.
> * Remove `mode="r|gz"` or replace it with `mode="r"` or `mode="r:*"` for reading with transparent compression, so that the code works also with uncompressed tarballs (see [Extend pyhf contrib download to allow for uncompressed targets #1111](https://github.com/scikit-hep/pyhf/issues/1111) and [Make `pyhf contrib download` be able to handle multiple compression types #1519](https://github.com/scikit-hep/pyhf/issues/1519)), where the media type is still `application/x-tar`.
> * Maybe add an option to download a zipfile instead of a tarball (see [Make `pyhf contrib download` be able to handle multiple compression types #1519](https://github.com/scikit-hep/pyhf/issues/1519)), then you'd need `headers={'Accept': 'application/zip'}` in the request and [`zipfile.is_zipfile`](https://docs.python.org/3/library/zipfile.html#zipfile.is_zipfile) to check the response content. You could use the Python [`zipfile`](https://docs.python.org/3/library/zipfile.html) module to unpack, but maybe easier to use [`shutil.unpack_archive`](https://docs.python.org/3/library/shutil.html#shutil.unpack_archive) for both tarballs and zipfiles.
>
> Making these changes should not break the functionality with the current situation (where https://doi.org/10.17182/hepdata.89408.v1/r2 returns the tarball directly). I'd therefore recommend you make them ASAP before the next `pyhf` release. After we redirect the DOI to the landing page, probably in the next few weeks, the DOI will return the HTML landing page instead of the tarball unless the request contains the `Accept: application/x-tar` header.
These are all excellent as well. I'll make these a new issue for `v0.7.0` that refactors the internals. | 2021-10-26T18:42:23 |
|
scikit-hep/pyhf | 1,691 | scikit-hep__pyhf-1691 | [
"1690"
] | c493c7c5ad3e5aeda7d022cdf5fe77e9fdab43a2 | diff --git a/src/pyhf/readxml.py b/src/pyhf/readxml.py
--- a/src/pyhf/readxml.py
+++ b/src/pyhf/readxml.py
@@ -59,19 +59,21 @@ def import_root_histogram(rootdir, filename, path, name, filecache=None):
fullpath = str(Path(rootdir).joinpath(filename))
if fullpath not in filecache:
f = uproot.open(fullpath)
- filecache[fullpath] = f
+ keys = set(f.keys(cycle=False))
+ filecache[fullpath] = (f, keys)
else:
- f = filecache[fullpath]
- try:
+ f, keys = filecache[fullpath]
+
+ fullname = "/".join([path, name])
+
+ if name in keys:
hist = f[name]
- except (KeyError, uproot.deserialization.DeserializationError):
- fullname = "/".join([path, name])
- try:
- hist = f[fullname]
- except KeyError:
- raise KeyError(
- f'Both {name} and {fullname} were tried and not found in {fullpath}'
- )
+ elif fullname in keys:
+ hist = f[fullname]
+ else:
+ raise KeyError(
+ f'Both {name} and {fullname} were tried and not found in {fullpath}'
+ )
return hist.to_numpy()[0].tolist(), extract_error(hist)
| diff --git a/tests/test_import.py b/tests/test_import.py
--- a/tests/test_import.py
+++ b/tests/test_import.py
@@ -201,10 +201,22 @@ def test_import_measurement_gamma_bins(const):
pyhf.readxml.process_measurements(toplvl)
-def test_import_prepHistFactory():
- parsed_xml = pyhf.readxml.parse(
- 'validation/xmlimport_input/config/example.xml', 'validation/xmlimport_input/'
- )
[email protected](
+ "configfile,rootdir",
+ [
+ (
+ 'validation/xmlimport_input/config/example.xml',
+ 'validation/xmlimport_input/',
+ ),
+ (
+ 'validation/xmlimport_input4/config/example.xml',
+ 'validation/xmlimport_input4/',
+ ),
+ ],
+ ids=['xmlimport_input', 'xmlimport_input_histoPath'],
+)
+def test_import_prepHistFactory(configfile, rootdir):
+ parsed_xml = pyhf.readxml.parse(configfile, rootdir)
# build the spec, strictly checks properties included
spec = {
| DeserializationError too frequent when `HistoPath` is specified in HiFa XML
### Summary
When a `<Sample>` specifies the `HistoPath` and it's not empty, we will run into very frequent deserialization errors from `uproot` which is very slow to raise an exception (see scikit-hep/uproot4#504).
`pyhf` needs to fix this to be a bit smarter in how to check valid keys, and in particular, fix up its logic to not hit a/rely on `DeserializationError`.
### OS / Environment
```console
$ system_profiler -detailLevel mini SPSoftwareDataType | head -n 6
Software:
System Software Overview:
System Version: macOS 10.14.6 (18G9323)
Kernel Version: Darwin 18.7.0
```
### Steps to Reproduce
See #1687 for the fundamental issue. It's reproducible using private workspaces for now, but can be confirmed reproducible.
```
$ time pyhf xml2json monotop_twmetComb0L1LBoosted_allCRs_normDStoDR_unblind_sigTheo_envelope__pmoder_sig_a250_DM10_H900_tb1_st0p7/NormalMeasurement.xml --basedir monotop_twmetComb0L1LBoosted_allCRs_normDStoDR_unblind_sigTheo_envelope__pmoder_sig_a250_DM10_H900_tb1_st0p7
Processing ./NormalMeasurement_CR_0LBoosted_ttbar_cuts.xml: 0%| | 0/1 [00:00<?, ?channel/smonotop_twmetComb0L1LBoosted_allCRs_normDStoDR_unblind_sigTheo_envelope__pmoder_sig_a250_DM10_H900_tb1_st0p7/results/monotop_twmetComb0L1LBoosted_allCRs_normDStoDR_unblind_sigTheo_envelope__pmoder_sig_a250_DM10_H900_tb1_st0p7/Exclusion_combined_NormalMeasurement_model.root not in filecache
path=CR_0LBoosted_ttbar_cuts_hists/data, name=hData_CR_0LBoosted_ttbar_obs_cuts
deserialization error, trying fullname=CR_0LBoosted_ttbar_cuts_hists/data/hData_CR_0LBoosted_ttbar_obs_cuts instead
monotop_twmetComb0L1LBoosted_allCRs_normDStoDR_unblind_sigTheo_envelope__pmoder_sig_a250_DM10_H900_tb1_st0p7/results/monotop_twmetComb0L1LBoosted_allCRs_normDStoDR_unblind_sigTheo_envelope__pmoder_sig_a250_DM10_H900_tb1_st0p7/Exclusion_combined_NormalMeasurement_model.root in filecache
path=CR_0LBoosted_ttbar_cuts_hists/Top0LBoosted, name=hTop0LBoostedNom_CR_0LBoosted_ttbar_obs_cuts
deserialization error, trying fullname=CR_0LBoosted_ttbar_cuts_hists/Top0LBoosted/hTop0LBoostedNom_CR_0LBoosted_ttbar_obs_cuts instead
monotop_twmetComb0L1LBoosted_allCRs_normDStoDR_unblind_sigTheo_envelope__pmoder_sig_a250_DM10_H900_tb1_st0p7/results/monotop_twmetComb0L1LBoosted_allCRs_normDStoDR_unblind_sigTheo_envelope__pmoder_sig_a250_DM10_H900_tb1_st0p7/Exclusion_combined_NormalMeasurement_model.root in filecache
path=CR_0LBoosted_ttbar_cuts_hists/Top0LBoosted, name=hTop0LBoostedEG_EffLow_CR_0LBoosted_ttbar_obs_cutsNorm
deserialization error, trying fullname=CR_0LBoosted_ttbar_cuts_hists/Top0LBoosted/hTop0LBoostedEG_EffLow_CR_0LBoosted_ttbar_obs_cutsNorm instead
```
### File Upload (optional)
_No response_
### Expected Results
`pyhf xml2json` should be fast.
### Actual Results
```console
`pyhf xml2json` is slow.
```
### pyhf Version
```console
This impacts all pyhf versions up to 0.6.4.
```
### Code of Conduct
- [X] I agree to follow the Code of Conduct
| 2021-11-10T17:21:18 |
|
scikit-hep/pyhf | 1,692 | scikit-hep__pyhf-1692 | [
"1684"
] | 39a9e920c3a902c683aa182d920ba451224276c0 | diff --git a/src/pyhf/workspace.py b/src/pyhf/workspace.py
--- a/src/pyhf/workspace.py
+++ b/src/pyhf/workspace.py
@@ -485,11 +485,11 @@ def _prune_and_rename(
corresponding `observation`.
Args:
- prune_modifiers: A :obj:`str` or a :obj:`list` of modifiers to prune.
- prune_modifier_types: A :obj:`str` or a :obj:`list` of modifier types to prune.
- prune_samples: A :obj:`str` or a :obj:`list` of samples to prune.
- prune_channels: A :obj:`str` or a :obj:`list` of channels to prune.
- prune_measurements: A :obj:`str` or a :obj:`list` of measurements to prune.
+ prune_modifiers: A :obj:`list` of modifiers to prune.
+ prune_modifier_types: A :obj:`list` of modifier types to prune.
+ prune_samples: A :obj:`list` of samples to prune.
+ prune_channels: A :obj:`list` of channels to prune.
+ prune_measurements: A :obj:`list` of measurements to prune.
rename_modifiers: A :obj:`dict` mapping old modifier name to new modifier name.
rename_samples: A :obj:`dict` mapping old sample name to new sample name.
rename_channels: A :obj:`dict` mapping old channel name to new channel name.
@@ -622,11 +622,11 @@ def prune(
The pruned workspace must also be a valid workspace.
Args:
- modifiers: A :obj:`str` or a :obj:`list` of modifiers to prune.
- modifier_types: A :obj:`str` or a :obj:`list` of modifier types to prune.
- samples: A :obj:`str` or a :obj:`list` of samples to prune.
- channels: A :obj:`str` or a :obj:`list` of channels to prune.
- measurements: A :obj:`str` or a :obj:`list` of measurements to prune.
+ modifiers: A :obj:`list` of modifiers to prune.
+ modifier_types: A :obj:`list` of modifier types to prune.
+ samples: A :obj:`list` of samples to prune.
+ channels: A :obj:`list` of channels to prune.
+ measurements: A :obj:`list` of measurements to prune.
Returns:
~pyhf.workspace.Workspace: A new workspace object with the specified components removed
| `Workspace.prune` only accepts lists
### Summary
The [docstring for `Workspace.prune`](https://github.com/scikit-hep/pyhf/blob/c493c7c5ad3e5aeda7d022cdf5fe77e9fdab43a2/src/pyhf/workspace.py#L624-L629) signals support for `str` arguments, but currently only lists are supported (tested for modifier types / samples / channels). This happens since `_prune_and_rename` iterates over its arguments without enforcing conversion to lists first. Happy to provide a MR to fix this behavior if desired, or update the docstrings to require list arguments.
### OS / Environment
```console
n/a
```
### Steps to Reproduce
<!--- Paste your minimal failing Python example code between the quotes below -->
```python (paste below)
import pyhf
spec = {
"channels": [
{
"name": "SR",
"samples": [
{"data": [10.0], "name": "Signal", "modifiers": []},
{
"data": [112.0],
"modifiers": [
{
"data": {"hi_data": [67.0], "lo_data": [158.0]},
"name": "Modeling",
"type": "histosys"
}
],
"name": "Background"
}
]
}
],
"measurements": [
{"config": {"parameters": [], "poi": ""}, "name": "minimal_example"}
],
"observations": [
{"data": [120.0], "name": "SR"}
],
"version": "1.0.0"
}
ws = pyhf.Workspace(spec)
ws.prune(modifier_types=["histosys"]) # works fine
ws.prune(modifier_types="histosys") # pyhf.exceptions.InvalidWorkspaceOperation
ws.prune(samples="Background") # pyhf.exceptions.InvalidWorkspaceOperation
ws.prune(channels="SR") # pyhf.exceptions.InvalidWorkspaceOperation
```
### File Upload (optional)
_No response_
### Expected Results
No exceptions raised.
### Actual Results
```console
Traceback (most recent call last):
File "test.py", line 34, in <module>
ws.prune(modifier_types="histosys") # pyhf.exceptions.InvalidWorkspaceOperation
File "[...]/pyhf/src/pyhf/workspace.py", line 638, in prune
return self._prune_and_rename(
File "[...]/pyhf/src/pyhf/workspace.py", line 513, in _prune_and_rename
raise exceptions.InvalidWorkspaceOperation(
pyhf.exceptions.InvalidWorkspaceOperation: h is not one of the modifier types in this workspace.
```
### pyhf Version
```console
0.6.4.dev38
```
### Code of Conduct
- [X] I agree to follow the Code of Conduct
| 2021-11-11T14:50:04 |
||
scikit-hep/pyhf | 1,697 | scikit-hep__pyhf-1697 | [
"1486"
] | e4331091a123615ecc91bf9be988d799368c705d | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -32,6 +32,7 @@
'pytest>=6.0',
'pytest-cov>=2.5.1',
'pytest-mock',
+ 'requests-mock>=1.9.0',
'pytest-benchmark[histogram]',
'pytest-console-scripts',
'pytest-mpl',
diff --git a/src/pyhf/contrib/utils.py b/src/pyhf/contrib/utils.py
--- a/src/pyhf/contrib/utils.py
+++ b/src/pyhf/contrib/utils.py
@@ -1,9 +1,13 @@
"""Helper utilities for common tasks."""
-from urllib.parse import urlparse
+import logging
import tarfile
+import zipfile
from io import BytesIO
-import logging
+from pathlib import Path
+from shutil import rmtree
+from urllib.parse import urlparse
+
from pyhf import exceptions
log = logging.getLogger(__name__)
@@ -62,17 +66,66 @@ def download(archive_url, output_directory, force=False, compress=False):
# The HEPData landing page for the resource file can check if the Accept
# request HTTP header matches the content type of the resource file and
# return the content directly if so.
+ # TODO: Figure out how to accept headers of both application/x-tar and
+ # application/zip.
with requests.get(
archive_url, headers={"Accept": "application/x-tar"}
) as response:
+ if response.status_code != 200:
+ raise exceptions.InvalidArchive(
+ f"{archive_url} gives a response code of {response.status_code}.\n"
+ + "There is either something temporarily wrong with the archive host"
+ + f" or {archive_url} is an invalid URL."
+ )
+
if compress:
with open(output_directory, "wb") as archive:
archive.write(response.content)
else:
- with tarfile.open(
- mode="r|gz", fileobj=BytesIO(response.content)
- ) as archive:
- archive.extractall(output_directory)
+ # Support for file-like objects for tarfile.is_tarfile was added
+ # in Python 3.9, so as pyhf is currently Python 3.7+ then can't
+ # do tarfile.is_tarfile(BytesIO(response.content)).
+ # Instead, just use a 'try except' block to determine if the
+ # archive is a valid tarfile.
+ # TODO: Simplify after pyhf is Python 3.9+ only
+ try:
+ # Use transparent compression to allow for .tar or .tar.gz
+ with tarfile.open(
+ mode="r:*", fileobj=BytesIO(response.content)
+ ) as archive:
+ archive.extractall(output_directory)
+ except tarfile.ReadError:
+ if not zipfile.is_zipfile(BytesIO(response.content)):
+ raise exceptions.InvalidArchive(
+ f"The archive downloaded from {archive_url} is not a tarfile"
+ + " or a zipfile and so can not be opened as one."
+ )
+
+ output_directory = Path(output_directory)
+ if output_directory.exists():
+ rmtree(output_directory)
+ with zipfile.ZipFile(BytesIO(response.content)) as archive:
+ archive.extractall(output_directory)
+
+ # zipfile.ZipFile.extractall extracts to a directory
+ # below a target directory, so to match the extraction
+ # path of tarfile.TarFile.extractall move the extracted
+ # directory to a temporary path and then replace the
+ # output directory target with the contents at the
+ # temporary path.
+ # The directory is moved instead of being extracted one
+ # directory up and then renamed as the name of the
+ # zipfile directory is set at zipfile creation time and
+ # isn't knowable in advance.
+ child_path = [child for child in output_directory.iterdir()][0]
+ _tmp_path = output_directory.parent.joinpath(
+ Path(output_directory.name + "__tmp__")
+ )
+ child_path.replace(_tmp_path)
+ # the zipfile could contain remnant __MACOSX directories
+ # from creation time
+ rmtree(output_directory)
+ _tmp_path.replace(output_directory)
except ModuleNotFoundError:
diff --git a/src/pyhf/exceptions/__init__.py b/src/pyhf/exceptions/__init__.py
--- a/src/pyhf/exceptions/__init__.py
+++ b/src/pyhf/exceptions/__init__.py
@@ -70,6 +70,10 @@ class InvalidArchiveHost(Exception):
"""InvalidArchiveHost is raised when a given patchset archive url is not an approved host."""
+class InvalidArchive(Exception):
+ """InvalidArchive is raised when a given patchset archive url does not return a valid response."""
+
+
class InvalidPatchSet(Exception):
"""InvalidPatchSet is raised when a given patchset object does not have the right configuration, even though it validates correctly against the schema."""
| diff --git a/tests/contrib/test_utils.py b/tests/contrib/test_utils.py
new file mode 100644
--- /dev/null
+++ b/tests/contrib/test_utils.py
@@ -0,0 +1,99 @@
+import tarfile
+import zipfile
+from pathlib import Path
+from shutil import rmtree
+
+import pytest
+
+from pyhf.contrib.utils import download
+from pyhf.exceptions import InvalidArchive, InvalidArchiveHost
+
+
[email protected](scope="function")
+def tarfile_path(tmpdir):
+ with open(tmpdir.join("test_file.txt").strpath, "w") as write_file:
+ write_file.write("test file")
+ with tarfile.open(tmpdir.join("test_tar.tar.gz").strpath, mode="w:gz") as archive:
+ archive.add(tmpdir.join("test_file.txt").strpath)
+ return Path(tmpdir.join("test_tar.tar.gz").strpath)
+
+
[email protected](scope="function")
+def tarfile_uncompressed_path(tmpdir):
+ with open(tmpdir.join("test_file.txt").strpath, "w") as write_file:
+ write_file.write("test file")
+ with tarfile.open(tmpdir.join("test_tar.tar").strpath, mode="w") as archive:
+ archive.add(tmpdir.join("test_file.txt").strpath)
+ return Path(tmpdir.join("test_tar.tar").strpath)
+
+
[email protected](scope="function")
+def zipfile_path(tmpdir):
+ with open(tmpdir.join("test_file.txt").strpath, "w") as write_file:
+ write_file.write("test file")
+ with zipfile.ZipFile(tmpdir.join("test_zip.zip").strpath, "w") as archive:
+ archive.write(tmpdir.join("test_file.txt").strpath)
+ return Path(tmpdir.join("test_zip.zip").strpath)
+
+
+def test_download_untrusted_archive_host(tmpdir, requests_mock):
+ archive_url = "https://www.pyhfthisdoesnotexist.org"
+ requests_mock.get(archive_url)
+
+ with pytest.raises(InvalidArchiveHost):
+ download(archive_url, tmpdir.join("likelihoods").strpath)
+
+
+def test_download_invalid_archive(tmpdir, requests_mock):
+ archive_url = "https://www.hepdata.net/record/resource/1408476?view=true"
+ requests_mock.get(archive_url, status_code=404)
+
+ with pytest.raises(InvalidArchive):
+ download(archive_url, tmpdir.join("likelihoods").strpath)
+
+
+def test_download_compress(tmpdir, requests_mock):
+ archive_url = "https://www.hepdata.net/record/resource/1408476?view=true"
+ requests_mock.get(archive_url)
+
+ download(archive_url, tmpdir.join("likelihoods").strpath, compress=True)
+
+
+def test_download_archive_type(
+ tmpdir, mocker, requests_mock, tarfile_path, tarfile_uncompressed_path, zipfile_path
+):
+ archive_url = "https://www.hepdata.net/record/resource/1408476?view=true"
+ output_directory = tmpdir.join("likelihoods").strpath
+ # Give BytesIO a tarfile
+ requests_mock.get(archive_url, content=open(tarfile_path, "rb").read())
+ download(archive_url, output_directory)
+
+ # Give BytesIO an uncompressed tarfile
+ requests_mock.get(archive_url, content=open(tarfile_uncompressed_path, "rb").read())
+ download(archive_url, output_directory)
+
+ # Give BytesIO a zipfile
+ requests_mock.get(archive_url, content=open(zipfile_path, "rb").read())
+ # Run without and with existing output_directory to cover both
+ # cases of the shutil.rmtree logic
+ rmtree(Path(output_directory))
+ download(archive_url, output_directory) # without
+ download(archive_url, output_directory) # with
+
+ # Give BytesIO a zipfile (using same requests_mock as previous) but have
+ # zipfile.is_zipfile reject it
+ mocker.patch("zipfile.is_zipfile", return_value=False)
+ with pytest.raises(InvalidArchive):
+ download(archive_url, output_directory)
+
+
+def test_download_archive_force(tmpdir, requests_mock, tarfile_path):
+ archive_url = "https://www.cern.ch/record/resource/123456789"
+ requests_mock.get(
+ archive_url, content=open(tarfile_path, "rb").read(), status_code=200
+ )
+
+ with pytest.raises(InvalidArchiveHost):
+ download(archive_url, tmpdir.join("likelihoods").strpath, force=False)
+
+ download(archive_url, tmpdir.join("likelihoods").strpath, force=True)
diff --git a/tests/test_scripts.py b/tests/test_scripts.py
--- a/tests/test_scripts.py
+++ b/tests/test_scripts.py
@@ -1,14 +1,26 @@
import json
+import logging
import shlex
-import pyhf
-import time
import sys
-import logging
+import tarfile
+import time
+from importlib import import_module, reload
+from pathlib import Path
+from unittest import mock
+
import pytest
from click.testing import CliRunner
-from unittest import mock
-from importlib import reload
-from importlib import import_module
+
+import pyhf
+
+
[email protected](scope="function")
+def tarfile_path(tmpdir):
+ with open(tmpdir.join("test_file.txt").strpath, "w") as write_file:
+ write_file.write("test file")
+ with tarfile.open(tmpdir.join("test_tar.tar.gz").strpath, mode="w:gz") as archive:
+ archive.add(tmpdir.join("test_file.txt").strpath)
+ return Path(tmpdir.join("test_tar.tar.gz").strpath)
def test_version(script_runner):
@@ -273,9 +285,10 @@ def test_testpoi(tmpdir, script_runner):
results_exp.append(d['CLs_exp'])
results_obs.append(d['CLs_obs'])
- import numpy as np
import itertools
+ import numpy as np
+
for pair in itertools.combinations(results_exp, r=2):
assert not np.array_equal(*pair)
@@ -543,7 +556,8 @@ def test_workspace_digest(tmpdir, script_runner, algorithms, do_json):
"https://doi.org/10.17182/hepdata.89408.v1/r2",
],
)
-def test_patchset_download(tmpdir, script_runner, archive):
+def test_patchset_download(tmpdir, script_runner, requests_mock, tarfile_path, archive):
+ requests_mock.get(archive, content=open(tarfile_path, "rb").read())
command = f'pyhf contrib download {archive} {tmpdir.join("likelihoods").strpath}'
ret = script_runner.run(*shlex.split(command))
assert ret.success
@@ -553,6 +567,9 @@ def test_patchset_download(tmpdir, script_runner, archive):
ret = script_runner.run(*shlex.split(command))
assert ret.success
+ requests_mock.get(
+ "https://www.pyhfthisdoesnotexist.org/record/resource/1234567", status_code=200
+ )
command = f'pyhf contrib download --verbose https://www.pyhfthisdoesnotexist.org/record/resource/1234567 {tmpdir.join("likelihoods").strpath}'
ret = script_runner.run(*shlex.split(command))
assert not ret.success
@@ -560,11 +577,15 @@ def test_patchset_download(tmpdir, script_runner, archive):
"pyhf.exceptions.InvalidArchiveHost: www.pyhfthisdoesnotexist.org is not an approved archive host"
in ret.stderr
)
- # Force a download from a real URL, but one that doesn't have an existing file
+
+ # httpstat.us is a real wesite that can be used for testing responses
+ requests_mock.get(
+ "https://httpstat.us/404/record/resource/1234567", status_code=404
+ )
command = f'pyhf contrib download --verbose --force https://httpstat.us/404/record/resource/1234567 {tmpdir.join("likelihoods").strpath}'
ret = script_runner.run(*shlex.split(command))
assert not ret.success
- assert "tarfile.ReadError: not a gzip file" in ret.stderr
+ assert "gives a response code of 404" in ret.stderr
def test_missing_contrib_extra(caplog):
| Make download tests robust to HEPData outages by mocking of tar.gz files
@kratsg is pointing out that we should probably have these mocked so that the test suite doesn't rely on HEPData behavior.
Example of something similar he's done recently
```python
def test_user_expires_reauthenticate(user_temp, requests_mock, mocker):
user, _ = user_temp
assert user.is_authenticated()
assert user.is_expired() == False
assert user.expires_in > 0
user._id_token['exp'] = time.time() - 1
assert user.is_authenticated()
assert user.is_expired()
assert user.expires_in == 0
mock = mocker.patch.object(user, '_parse_id_token')
requests_mock.post(
requests.compat.urljoin(itkdb.settings.ITKDB_AUTH_URL, 'grantToken'),
text=json.dumps(
{
'id_token': {
'exp': time.time() + 3600,
'name': _name,
'uuidentity': _identity,
},
'access_token': _access_token,
}
),
)
user.authenticate()
user._id_token = {'exp': time.time() + 3600, 'name': _name, 'uuidentity': _identity}
assert user.is_authenticated()
assert user.is_expired() == False
```
_Originally posted by @matthewfeickert in https://github.com/scikit-hep/pyhf/issues/1485#issuecomment-857370600_
| 2021-11-12T23:48:07 |
|
scikit-hep/pyhf | 1,703 | scikit-hep__pyhf-1703 | [
"1630"
] | 8c90450d1ecf2d212d2590541538621f4da8e3ce | diff --git a/src/pyhf/infer/calculators.py b/src/pyhf/infer/calculators.py
--- a/src/pyhf/infer/calculators.py
+++ b/src/pyhf/infer/calculators.py
@@ -48,12 +48,8 @@ def generate_asimov_data(
>>> mu_test = 1.0
>>> pyhf.infer.calculators.generate_asimov_data(mu_test, data, model, None, None, None)
array([ 60.61229858, 56.52802479, 270.06832542, 48.31545488])
-
- It is possible to access the Asimov parameters as well:
-
>>> pyhf.infer.calculators.generate_asimov_data(
- ... mu_test, data, model, None, None, None,
- ... return_fitted_pars = True
+ ... mu_test, data, model, None, None, None, return_fitted_pars=True
... )
(array([ 60.61229858, 56.52802479, 270.06832542, 48.31545488]), array([1. , 0.97224597, 0.87553894]))
@@ -335,8 +331,8 @@ def teststatistic(self, poi_test):
r"""
Compute the test statistic for the observed data under the studied model.
- The fitted parameters of the five fits that are implicitly ran at every call
- of this method are afterwards accessible through ``self.fitted_pars``,
+ The fitted parameters of the five fits that are implicitly run for each call
+ of this method are afterwards accessible through the ``fitted_pars`` attribute,
which is a :py:class:`~pyhf.infer.calculators.HypoTestFitResults` instance.
Example:
@@ -352,15 +348,9 @@ def teststatistic(self, poi_test):
>>> asymptotic_calculator = pyhf.infer.calculators.AsymptoticCalculator(data, model, test_stat="qtilde")
>>> asymptotic_calculator.teststatistic(mu_test)
array(0.14043184)
-
- Access the best-fit parameters afterwards:
-
>>> asymptotic_calculator.fitted_pars
HypoTestFitResults(asimov_pars=array([0. , 1.0030482 , 0.96264534]), free_fit_to_data=array([0. , 1.0030512 , 0.96266961]), free_fit_to_asimov=array([0. , 1.00304893, 0.96263365]), fixed_poi_fit_to_data=array([1. , 0.97224597, 0.87553894]), fixed_poi_fit_to_asimov=array([1. , 0.97276864, 0.87142047]))
-
- E.g. the :math:`\hat{\mu}` and :math:`\hat{\theta}` fitted to the asimov dataset:
-
- >>> asymptotic_calculator.fitted_pars.free_fit_to_asimov
+ >>> asymptotic_calculator.fitted_pars.free_fit_to_asimov # best-fit parameters to Asimov dataset
array([0. , 1.00304893, 0.96263365])
Args:
| diff --git a/src/pyhf/infer/test_statistics.py b/src/pyhf/infer/test_statistics.py
--- a/src/pyhf/infer/test_statistics.py
+++ b/src/pyhf/infer/test_statistics.py
@@ -95,12 +95,13 @@ def qmu(mu, data, pdf, init_pars, par_bounds, fixed_params, return_fitted_pars=F
>>> par_bounds = model.config.suggested_bounds()
>>> par_bounds[model.config.poi_index] = [-10.0, 10.0]
>>> fixed_params = model.config.suggested_fixed()
- >>> pyhf.infer.test_statistics.qmu(test_mu, data, model, init_pars, par_bounds, fixed_params)
+ >>> pyhf.infer.test_statistics.qmu(
+ ... test_mu, data, model, init_pars, par_bounds, fixed_params
+ ... )
array(3.9549891)
-
- Access the best-fit parameter tensors:
-
- >>> pyhf.infer.test_statistics.qmu(test_mu, data, model, init_pars, par_bounds, fixed_params, return_fitted_pars = True)
+ >>> pyhf.infer.test_statistics.qmu(
+ ... test_mu, data, model, init_pars, par_bounds, fixed_params, return_fitted_pars=True
+ ... )
(array(3.9549891), (array([1. , 0.97224597, 0.87553894]), array([-0.06679525, 1.00555369, 0.96930896])))
Args:
@@ -188,12 +189,13 @@ def qmu_tilde(
>>> init_pars = model.config.suggested_init()
>>> par_bounds = model.config.suggested_bounds()
>>> fixed_params = model.config.suggested_fixed()
- >>> pyhf.infer.test_statistics.qmu_tilde(test_mu, data, model, init_pars, par_bounds, fixed_params)
+ >>> pyhf.infer.test_statistics.qmu_tilde(
+ ... test_mu, data, model, init_pars, par_bounds, fixed_params
+ ... )
array(3.93824492)
-
- Access the best-fit parameter tensors:
-
- >>> pyhf.infer.test_statistics.qmu_tilde(test_mu, data, model, init_pars, par_bounds, fixed_params, return_fitted_pars = True)
+ >>> pyhf.infer.test_statistics.qmu_tilde(
+ ... test_mu, data, model, init_pars, par_bounds, fixed_params, return_fitted_pars=True
+ ... )
(array(3.93824492), (array([1. , 0.97224597, 0.87553894]), array([0. , 1.0030512 , 0.96266961])))
Args:
@@ -215,8 +217,9 @@ def qmu_tilde(
- The calculated test statistic, :math:`\tilde{q}_{\mu}`
- - The parameter tensors corresponding to the constrained and unconstrained best fit,
- :math:`\mu, \hat{\hat{\theta}}` and :math:`\hat{\mu}, \hat{\theta}`.
+ - The parameter tensors corresponding to the constrained best fit,
+ :math:`\mu, \hat{\hat{\theta}}`, and the unconstrained best fit,
+ :math:`\hat{\mu}, \hat{\theta}`.
Only returned if ``return_fitted_pars`` is ``True``.
"""
if pdf.config.poi_index is None:
@@ -268,12 +271,13 @@ def tmu(mu, data, pdf, init_pars, par_bounds, fixed_params, return_fitted_pars=F
>>> par_bounds = model.config.suggested_bounds()
>>> par_bounds[model.config.poi_index] = [-10.0, 10.0]
>>> fixed_params = model.config.suggested_fixed()
- >>> pyhf.infer.test_statistics.tmu(test_mu, data, model, init_pars, par_bounds, fixed_params)
+ >>> pyhf.infer.test_statistics.tmu(
+ ... test_mu, data, model, init_pars, par_bounds, fixed_params
+ ... )
array(3.9549891)
-
- Access the best-fit parameter tensors:
-
- >>> pyhf.infer.test_statistics.tmu(test_mu, data, model, init_pars, par_bounds, fixed_params, return_fitted_pars = True)
+ >>> pyhf.infer.test_statistics.tmu(
+ ... test_mu, data, model, init_pars, par_bounds, fixed_params, return_fitted_pars=True
+ ... )
(array(3.9549891), (array([1. , 0.97224597, 0.87553894]), array([-0.06679525, 1.00555369, 0.96930896])))
Args:
@@ -295,8 +299,9 @@ def tmu(mu, data, pdf, init_pars, par_bounds, fixed_params, return_fitted_pars=F
- The calculated test statistic, :math:`t_{\mu}`
- - The parameter tensors corresponding to the constrained and unconstrained best fit,
- :math:`\mu, \hat{\hat{\theta}}` and :math:`\hat{\mu}, \hat{\theta}`.
+ - The parameter tensors corresponding to the constrained best fit,
+ :math:`\mu, \hat{\hat{\theta}}`, and the unconstrained best fit,
+ :math:`\hat{\mu}, \hat{\theta}`.
Only returned if ``return_fitted_pars`` is ``True``.
"""
if pdf.config.poi_index is None:
@@ -356,12 +361,13 @@ def tmu_tilde(
>>> init_pars = model.config.suggested_init()
>>> par_bounds = model.config.suggested_bounds()
>>> fixed_params = model.config.suggested_fixed()
- >>> pyhf.infer.test_statistics.tmu_tilde(test_mu, data, model, init_pars, par_bounds, fixed_params)
+ >>> pyhf.infer.test_statistics.tmu_tilde(
+ ... test_mu, data, model, init_pars, par_bounds, fixed_params
+ ... )
array(3.93824492)
-
- Access the best-fit parameter tensors:
-
- >>> pyhf.infer.test_statistics.tmu_tilde(test_mu, data, model, init_pars, par_bounds, fixed_params, return_fitted_pars = True)
+ >>> pyhf.infer.test_statistics.tmu_tilde(
+ ... test_mu, data, model, init_pars, par_bounds, fixed_params, return_fitted_pars=True
+ ... )
(array(3.93824492), (array([1. , 0.97224597, 0.87553894]), array([0. , 1.0030512 , 0.96266961])))
Args:
@@ -383,8 +389,9 @@ def tmu_tilde(
- The calculated test statistic, :math:`\tilde{t}_{\mu}`
- - The parameter tensors corresponding to the constrained and unconstrained best fit,
- :math:`\mu, \hat{\hat{\theta}}` and :math:`\hat{\mu}, \hat{\theta}`.
+ - The parameter tensors corresponding to the constrained best fit,
+ :math:`\mu, \hat{\hat{\theta}}`, and the unconstrained best fit,
+ :math:`\hat{\mu}, \hat{\theta}`.
Only returned if ``return_fitted_pars`` is ``True``.
"""
if pdf.config.poi_index is None:
@@ -436,10 +443,9 @@ def q0(mu, data, pdf, init_pars, par_bounds, fixed_params, return_fitted_pars=Fa
>>> fixed_params = model.config.suggested_fixed()
>>> pyhf.infer.test_statistics.q0(test_mu, data, model, init_pars, par_bounds, fixed_params)
array(2.98339447)
-
- Access the best-fit parameter tensors:
-
- >>> pyhf.infer.test_statistics.q0(test_mu, data, model, init_pars, par_bounds, fixed_params, return_fitted_pars = True)
+ >>> pyhf.infer.test_statistics.q0(
+ ... test_mu, data, model, init_pars, par_bounds, fixed_params, return_fitted_pars=True
+ ... )
(array(2.98339447), (array([0. , 1.03050845, 1.12128752]), array([0.95260667, 0.99635345, 1.02140172])))
Args:
@@ -461,8 +467,9 @@ def q0(mu, data, pdf, init_pars, par_bounds, fixed_params, return_fitted_pars=Fa
- The calculated test statistic, :math:`q_{0}`
- - The parameter tensors corresponding to the constrained and unconstrained best fit,
- :math:`\mu, \hat{\hat{\theta}}` and :math:`\hat{\mu}, \hat{\theta}`.
+ - The parameter tensors corresponding to the constrained best fit,
+ :math:`\mu, \hat{\hat{\theta}}`, and the unconstrained best fit,
+ :math:`\hat{\mu}, \hat{\theta}`.
Only returned if ``return_fitted_pars`` is ``True``.
"""
| Ensure docstring examples are contiguous
PR #1554 introduced some docstring examples that are not contiguous and so break the usefulness of the copy button. These are good examples, and so should be turned into fully copyable examples.
_Originally posted by @matthewfeickert in https://github.com/scikit-hep/pyhf/pull/1554#discussion_r727640891_
| 2021-11-17T06:28:14 |
|
scikit-hep/pyhf | 1,706 | scikit-hep__pyhf-1706 | [
"1701"
] | f25b8ddc6ab34d089954eb4898d54220f7f2defa | diff --git a/src/pyhf/modifiers/staterror.py b/src/pyhf/modifiers/staterror.py
--- a/src/pyhf/modifiers/staterror.py
+++ b/src/pyhf/modifiers/staterror.py
@@ -1,4 +1,5 @@
import logging
+from typing import List
import pyhf
from pyhf import events
@@ -8,7 +9,7 @@
log = logging.getLogger(__name__)
-def required_parset(sigmas, fixed):
+def required_parset(sigmas, fixed: List[bool]):
n_parameters = len(sigmas)
return {
'paramset_type': 'constrained_by_normal',
@@ -104,7 +105,8 @@ def finalize(self):
for modifier_data in self.builder_data[modname].values():
modifier_data['data']['mask'] = masks[modname]
sigmas = relerrs[masks[modname]]
- fixed = [s == 0 for s in sigmas]
+ # list of bools, consistent with other modifiers (no numpy.bool_)
+ fixed = default_backend.tolist(sigmas == 0)
# ensures non-Nan constraint term, but in a future PR we need to remove constraints for these
sigmas[fixed] = 1.0
self.required_parsets.setdefault(parname, [required_parset(sigmas, fixed)])
diff --git a/src/pyhf/parameters/paramsets.py b/src/pyhf/parameters/paramsets.py
--- a/src/pyhf/parameters/paramsets.py
+++ b/src/pyhf/parameters/paramsets.py
@@ -1,3 +1,5 @@
+from typing import List
+
import pyhf
__all__ = [
@@ -27,13 +29,13 @@ def __init__(self, **kwargs):
)
@property
- def suggested_fixed(self):
+ def suggested_fixed(self) -> List[bool]:
if type(self._suggested_fixed) == bool:
return [self._suggested_fixed] * self.n_parameters
return self._suggested_fixed
@property
- def suggested_fixed_as_bool(self):
+ def suggested_fixed_as_bool(self) -> bool:
'''compresses list of same-value bools into single bool'''
suggested_fixed = self.suggested_fixed
first = suggested_fixed[0]
diff --git a/src/pyhf/pdf.py b/src/pyhf/pdf.py
--- a/src/pyhf/pdf.py
+++ b/src/pyhf/pdf.py
@@ -2,6 +2,7 @@
import copy
import logging
+from typing import List
import pyhf.parameters
import pyhf
@@ -346,7 +347,7 @@ def param_set(self, name):
"""
return self.par_map[name]['paramset']
- def suggested_fixed(self):
+ def suggested_fixed(self) -> List[bool]:
"""
Identify the fixed parameters in the model.
| diff --git a/tests/test_modifiers.py b/tests/test_modifiers.py
--- a/tests/test_modifiers.py
+++ b/tests/test_modifiers.py
@@ -91,12 +91,24 @@ def test_staterror_holes():
True,
False,
]
+ assert all(
+ [
+ isinstance(fixed, bool)
+ for fixed in model.config.param_set("staterror_1").suggested_fixed
+ ]
+ )
assert model.config.param_set("staterror_2").suggested_fixed == [
False,
True,
False,
False,
]
+ assert all(
+ [
+ isinstance(fixed, bool)
+ for fixed in model.config.param_set("staterror_2").suggested_fixed
+ ]
+ )
assert (factors[1][0, 0, 0, :] == [2.0, 1.0, 1.0, 3.0, 1.0, 1.0, 1.0, 1.0]).all()
assert (factors[1][1, 0, 0, :] == [1.0, 1.0, 1.0, 1.0, 4.0, 1.0, 5.0, 6.0]).all()
| Revert slight change in return types of `suggested_fixed`
### Summary
https://github.com/scikit-hep/pyhf/pull/1639 very slightly changed the return type of `model.config.suggested_fixed()`. It used to be `List[bool]`, but now some elements can be `numpy.bool_`, which behaves differently (see https://github.com/numpy/numpy/issues/9646):
```python
>>> import numpy as np
>>> isinstance(np.bool_(True), bool)
False
```
This makes typing / type checking in libraries using `pyhf` more complicated (spotted via https://github.com/scikit-hep/cabinetry/pull/301). Would it be possible to consistently return `List[bool]` again instead?
### Additional Information
<!--- Describe what you are showing in your example -->
The example below shows how `numpy.bool_` entries appear for some, but not all models.
<!--- and then paste your mock Python example code between the quotes below -->
```python (paste below)
import pyhf
spec = {
"channels": [
{
"name": "SR",
"samples": [
{
"data": [25, 5],
"modifiers": [
{
"data": [5, 2],
"name": "staterror_SR",
"type": "staterror",
},
],
"name": "Signal",
}
],
},
],
"measurements": [{"config": {"parameters": [], "poi": ""}, "name": "fit"}],
"observations": [
{"data": [35, 8], "name": "SR"},
],
"version": "1.0.0",
}
def check_types(model):
for i, par in enumerate(model.config.suggested_fixed()):
if not isinstance(par, bool):
print(f"{model.config.par_names()[i]} has type {type(par)}")
model = pyhf.simplemodels.correlated_background([5], [10], [11], [9])
print("checking correlated_background model")
check_types(model)
model = pyhf.Workspace(spec).model()
print("checking custom model")
check_types(model)
```
Output for 9fbbbf9740175fcdfe73fedc4351953be18d2664 and later (including current `master`):
```
checking correlated_background model
checking custom model
staterror_SR[0] has type <class 'numpy.bool_'>
staterror_SR[1] has type <class 'numpy.bool_'>
```
Output for 5ea4e0a7751aa446c286b9697e01a9439622ebb5:
```
checking correlated_background model
checking custom model
```
### Code of Conduct
- [X] I agree to follow the Code of Conduct
| This only seems to affect `staterror` modifiers. `shapesys.required_parset` uses a slightly different way to construct the information and returns a list of bools. `shapefactor.required_parset` does not seem to support per-bin settings at the moment and is also not affected, as are the remaining modifier types that don't have per-bin settings.
The `staterror` behavior can be changed to return `List[bool]` again at a few different levels, I will push a PR with a suggestion and some comments in case you'd like to have the old behavior restored. | 2021-11-18T12:23:03 |
scikit-hep/pyhf | 1,708 | scikit-hep__pyhf-1708 | [
"1216"
] | d072da0c0d3083db6b154ffc064b789cedfdb301 | diff --git a/src/pyhf/modifiers/histosys.py b/src/pyhf/modifiers/histosys.py
--- a/src/pyhf/modifiers/histosys.py
+++ b/src/pyhf/modifiers/histosys.py
@@ -1,10 +1,10 @@
import logging
import pyhf
-from pyhf import events
-from pyhf.tensor.manager import get_backend
-from pyhf import interpolators
+from pyhf import events, interpolators
+from pyhf.exceptions import InvalidModifier
from pyhf.parameters import ParamViewer
+from pyhf.tensor.manager import get_backend
log = logging.getLogger(__name__)
@@ -61,8 +61,8 @@ def append(self, key, channel, sample, thismod, defined_samp):
def finalize(self):
default_backend = pyhf.default_backend
- for modifier in self.builder_data.values():
- for sample in modifier.values():
+ for modifier_name, modifier in self.builder_data.items():
+ for sample_name, sample in modifier.items():
sample["data"]["mask"] = default_backend.concatenate(
sample["data"]["mask"]
)
@@ -75,6 +75,21 @@ def finalize(self):
sample["data"]["nom_data"] = default_backend.concatenate(
sample["data"]["nom_data"]
)
+ if (
+ not len(sample["data"]["nom_data"])
+ == len(sample["data"]["lo_data"])
+ == len(sample["data"]["hi_data"])
+ ):
+ _modifier_type, _modifier_name = modifier_name.split("/")
+ _sample_data_len = len(sample["data"]["nom_data"])
+ _lo_data_len = len(sample["data"]["lo_data"])
+ _hi_data_len = len(sample["data"]["hi_data"])
+ raise InvalidModifier(
+ f"The '{sample_name}' sample {_modifier_type} modifier"
+ + f" '{_modifier_name}' has data shape inconsistent with the sample.\n"
+ + f"{sample_name} has 'data' of length {_sample_data_len} but {_modifier_name}"
+ + f" has 'lo_data' of length {_lo_data_len} and 'hi_data' of length {_hi_data_len}."
+ )
return self.builder_data
diff --git a/src/pyhf/modifiers/shapesys.py b/src/pyhf/modifiers/shapesys.py
--- a/src/pyhf/modifiers/shapesys.py
+++ b/src/pyhf/modifiers/shapesys.py
@@ -2,8 +2,9 @@
import pyhf
from pyhf import events
-from pyhf.tensor.manager import get_backend
+from pyhf.exceptions import InvalidModifier
from pyhf.parameters import ParamViewer
+from pyhf.tensor.manager import get_backend
log = logging.getLogger(__name__)
@@ -70,8 +71,8 @@ def append(self, key, channel, sample, thismod, defined_samp):
def finalize(self):
default_backend = pyhf.default_backend
- for modifier in self.builder_data.values():
- for sample in modifier.values():
+ for modifier_name, modifier in self.builder_data.items():
+ for sample_name, sample in modifier.items():
sample["data"]["mask"] = default_backend.concatenate(
sample["data"]["mask"]
)
@@ -81,6 +82,16 @@ def finalize(self):
sample["data"]["nom_data"] = default_backend.concatenate(
sample["data"]["nom_data"]
)
+ if len(sample["data"]["nom_data"]) != len(sample["data"]["uncrt"]):
+ _modifier_type, _modifier_name = modifier_name.split("/")
+ _sample_data_len = len(sample["data"]["nom_data"])
+ _uncrt_len = len(sample["data"]["uncrt"])
+ raise InvalidModifier(
+ f"The '{sample_name}' sample {_modifier_type} modifier"
+ + f" '{_modifier_name}' has data shape inconsistent with the sample.\n"
+ + f"{sample_name} has 'data' of length {_sample_data_len} but {_modifier_name}"
+ + f" has 'data' of length {_uncrt_len}."
+ )
return self.builder_data
diff --git a/src/pyhf/modifiers/staterror.py b/src/pyhf/modifiers/staterror.py
--- a/src/pyhf/modifiers/staterror.py
+++ b/src/pyhf/modifiers/staterror.py
@@ -3,8 +3,9 @@
import pyhf
from pyhf import events
-from pyhf.tensor.manager import get_backend
+from pyhf.exceptions import InvalidModifier
from pyhf.parameters import ParamViewer
+from pyhf.tensor.manager import get_backend
log = logging.getLogger(__name__)
@@ -54,8 +55,8 @@ def append(self, key, channel, sample, thismod, defined_samp):
def finalize(self):
default_backend = pyhf.default_backend
- for modifier in self.builder_data.values():
- for sample in modifier.values():
+ for modifier_name, modifier in self.builder_data.items():
+ for sample_name, sample in modifier.items():
sample["data"]["mask"] = default_backend.concatenate(
sample["data"]["mask"]
)
@@ -65,6 +66,16 @@ def finalize(self):
sample["data"]["nom_data"] = default_backend.concatenate(
sample["data"]["nom_data"]
)
+ if len(sample["data"]["nom_data"]) != len(sample["data"]["uncrt"]):
+ _modifier_type, _modifier_name = modifier_name.split("/")
+ _sample_data_len = len(sample["data"]["nom_data"])
+ _uncrt_len = len(sample["data"]["uncrt"])
+ raise InvalidModifier(
+ f"The '{sample_name}' sample {_modifier_type} modifier"
+ + f" '{_modifier_name}' has data shape inconsistent with the sample.\n"
+ + f"{sample_name} has 'data' of length {_sample_data_len} but {_modifier_name}"
+ + f" has 'data' of length {_uncrt_len}."
+ )
for modname in self.builder_data.keys():
parname = modname.split('/')[1]
| diff --git a/tests/test_modifiers.py b/tests/test_modifiers.py
--- a/tests/test_modifiers.py
+++ b/tests/test_modifiers.py
@@ -1,15 +1,10 @@
-import pyhf
+import json
+
import numpy
+import pytest
+from jsonpatch import JsonPatch
-modifiers_to_test = [
- "histosys",
- "normfactor",
- "normsys",
- "shapefactor",
- "shapesys",
- "staterror",
-]
-modifier_pdf_types = ["normal", None, "normal", None, "poisson", "normal"]
+import pyhf
def test_shapefactor_build():
@@ -174,3 +169,27 @@ def test_shapesys_holes():
True,
False,
]
+
+
[email protected](
+ "patch_file",
+ [
+ "bad_histosys_modifier_patch.json",
+ "bad_shapesys_modifier_patch.json",
+ "bad_staterror_modifier_patch.json",
+ ],
+)
+def test_invalid_bin_wise_modifier(datadir, patch_file):
+ """
+ Test that bin-wise modifiers will raise an exception if their data shape
+ differs from their sample's.
+ """
+ spec = json.load(open(datadir.join("spec.json")))
+
+ assert pyhf.Model(spec)
+
+ patch = JsonPatch.from_string(open(datadir.join(patch_file)).read())
+ bad_spec = patch.apply(spec)
+
+ with pytest.raises(pyhf.exceptions.InvalidModifier):
+ pyhf.Model(bad_spec)
diff --git a/tests/test_modifiers/bad_histosys_modifier_patch.json b/tests/test_modifiers/bad_histosys_modifier_patch.json
new file mode 100644
--- /dev/null
+++ b/tests/test_modifiers/bad_histosys_modifier_patch.json
@@ -0,0 +1,24 @@
+[
+ {
+ "op": "add",
+ "path": "/channels/0/samples/1/modifiers",
+ "value": [
+ {
+ "name": "histosys_bad",
+ "type": "histosys",
+ "data": {
+ "hi_data": [
+ 3,
+ 6,
+ 9
+ ],
+ "lo_data": [
+ 1,
+ 2,
+ 3
+ ]
+ }
+ }
+ ]
+ }
+]
diff --git a/tests/test_modifiers/bad_shapesys_modifier_patch.json b/tests/test_modifiers/bad_shapesys_modifier_patch.json
new file mode 100644
--- /dev/null
+++ b/tests/test_modifiers/bad_shapesys_modifier_patch.json
@@ -0,0 +1,17 @@
+[
+ {
+ "op": "add",
+ "path": "/channels/0/samples/1/modifiers",
+ "value": [
+ {
+ "name": "shapesys_bad",
+ "type": "shapesys",
+ "data": [
+ 1,
+ 2,
+ 3
+ ]
+ }
+ ]
+ }
+]
diff --git a/tests/test_modifiers/bad_staterror_modifier_patch.json b/tests/test_modifiers/bad_staterror_modifier_patch.json
new file mode 100644
--- /dev/null
+++ b/tests/test_modifiers/bad_staterror_modifier_patch.json
@@ -0,0 +1,17 @@
+[
+ {
+ "op": "add",
+ "path": "/channels/0/samples/1/modifiers",
+ "value": [
+ {
+ "name": "staterror_bad",
+ "type": "staterror",
+ "data": [
+ 1,
+ 2,
+ 3
+ ]
+ }
+ ]
+ }
+]
diff --git a/tests/test_modifiers/spec.json b/tests/test_modifiers/spec.json
new file mode 100644
--- /dev/null
+++ b/tests/test_modifiers/spec.json
@@ -0,0 +1,35 @@
+{
+ "channels": [
+ {
+ "name": "channel_1",
+ "samples": [
+ {
+ "name": "sample_1",
+ "data": [
+ 1,
+ 2,
+ 3,
+ 4
+ ],
+ "modifiers": [
+ {
+ "name": "mu",
+ "type": "normfactor",
+ "data": null
+ }
+ ]
+ },
+ {
+ "name": "sample_2",
+ "data": [
+ 2,
+ 4,
+ 6,
+ 8
+ ],
+ "modifiers": []
+ }
+ ]
+ }
+ ]
+}
| better error message when systematic has wrong number of bins for the channel
# Description
It's good that we can catch samples with wrong number of bins for a channel relatively early. We should catch this earlier too for systematics on the samples. Example here has a systematic with 3 bins for a channel with 4 bins:
```json
{
"channels": [
{
"name": "SR",
"samples": [
{
"data": [
1,
2,
3,
4
],
"modifiers": [
{
"data": null,
"name": "SigXsecOverSM",
"type": "normfactor"
}
],
"name": "signal"
},
{
"data": [
2,
4,
6,
8
],
"modifiers": [
{
"data": {
"hi_data": [
3,
6,
9
],
"lo_data": [
1,
2,
3
]
},
"name": "syst_wrongbins",
"type": "histosys"
}
],
"name": "WtZ"
}
]
}
],
"measurements": [
{
"config": {
"parameters": [
{
"bounds": [
[
-10.0,
10.0
]
],
"fixed": true,
"inits": [
1.0
],
"name": "SigXsecOverSM"
}
],
"poi": "SigXsecOverSM"
},
"name": "NormalMeasurement"
}
],
"observations": [
{
"data": [
2.0,
2.0,
2.0,
2.0
],
"name": "SR"
}
],
"version": "1.0.0"
}
```
when running `pyhf cls` on this (or just loading in the workspace), the stacktrace is a bit long and crashes in the combined histosys modifier code (a bit further than we'd like)
```
Traceback (most recent call last):
File "/Users/kratsg/.pyenv/versions/pyhf-dev/bin/pyhf", line 33, in <module>
sys.exit(load_entry_point('pyhf', 'console_scripts', 'pyhf')())
File "/Users/kratsg/.pyenv/versions/3.8.6/envs/pyhf-dev/lib/python3.8/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/Users/kratsg/.pyenv/versions/3.8.6/envs/pyhf-dev/lib/python3.8/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/Users/kratsg/.pyenv/versions/3.8.6/envs/pyhf-dev/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/Users/kratsg/.pyenv/versions/3.8.6/envs/pyhf-dev/lib/python3.8/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/Users/kratsg/.pyenv/versions/3.8.6/envs/pyhf-dev/lib/python3.8/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/Users/kratsg/pyhf/src/pyhf/cli/infer.py", line 198, in cls
model = ws.model(
File "/Users/kratsg/pyhf/src/pyhf/workspace.py", line 412, in model
return Model(modelspec, poi_name=measurement['config']['poi'], **config_kwargs)
File "/Users/kratsg/pyhf/src/pyhf/pdf.py", line 561, in __init__
self.main_model = _MainModel(
File "/Users/kratsg/pyhf/src/pyhf/pdf.py", line 417, in __init__
self.modifiers_appliers = {
File "/Users/kratsg/pyhf/src/pyhf/pdf.py", line 418, in <dictcomp>
k: c(
File "/Users/kratsg/pyhf/src/pyhf/modifiers/histosys.py", line 63, in __init__
if histosys_mods:
File "/Users/kratsg/pyhf/src/pyhf/interpolators/code4p.py", line 22, in __init__
self._histogramssets = default_backend.astensor(histogramssets)
File "/Users/kratsg/pyhf/src/pyhf/tensor/numpy_backend.py", line 214, in astensor
return np.asarray(tensor_in, dtype=dtype)
File "/Users/kratsg/.pyenv/versions/3.8.6/envs/pyhf-dev/lib/python3.8/site-packages/numpy/core/_asarray.py", line 83, in asarray
return array(a, dtype, copy=False, order=order)
ValueError: setting an array element with a sequence.
```
The best thing to do is probably say something like "modifier <name> in <sample> has incompatible structure for <channel>" or something similar.
| I guess we probably want to check all the modifier data entries length just after model spec creation in
https://github.com/scikit-hep/pyhf/blob/f25b8ddc6ab34d089954eb4898d54220f7f2defa/src/pyhf/workspace.py#L416-L419
Thinking more on this, I guess this only needs to be applied to bin-wise modifiers, so we shouldn't check after model spec creation, but in each relevant modifier itself. | 2021-11-19T00:12:06 |
scikit-hep/pyhf | 1,710 | scikit-hep__pyhf-1710 | [
"1699"
] | 43c156702fd2a0392114c5b2468f0128ed16ea72 | diff --git a/src/pyhf/pdf.py b/src/pyhf/pdf.py
--- a/src/pyhf/pdf.py
+++ b/src/pyhf/pdf.py
@@ -635,14 +635,21 @@ class Model:
"""The main pyhf model class."""
def __init__(
- self, spec, modifier_set=None, batch_size=None, validate=True, **config_kwargs
+ self,
+ spec,
+ modifier_set=None,
+ batch_size=None,
+ validate: bool = True,
+ **config_kwargs,
):
"""
Construct a HistFactory Model.
Args:
spec (:obj:`jsonable`): The HistFactory JSON specification
- batch_size (:obj:`None` or :obj:`int`): Number of simultaneous (batched) Models to compute.
+ batch_size (:obj:`None` or :obj:`int`): Number of simultaneous (batched)
+ Models to compute.
+ validate (:obj:`bool`): Whether to validate against a JSON schema
config_kwargs: Possible keyword arguments for the model configuration
Returns:
@@ -657,8 +664,8 @@ def __init__(
self.schema = config_kwargs.pop('schema', 'model.json')
self.version = config_kwargs.pop('version', None)
# run jsonschema validation of input specification against the (provided) schema
- log.info(f"Validating spec against schema: {self.schema:s}")
if validate:
+ log.info(f"Validating spec against schema: {self.schema:s}")
utils.validate(self.spec, self.schema, version=self.version)
# build up our representation of the specification
poi_name = config_kwargs.pop('poi_name', 'mu')
diff --git a/src/pyhf/workspace.py b/src/pyhf/workspace.py
--- a/src/pyhf/workspace.py
+++ b/src/pyhf/workspace.py
@@ -286,16 +286,28 @@ class Workspace(_ChannelSummaryMixin, dict):
valid_joins = ['none', 'outer', 'left outer', 'right outer']
- def __init__(self, spec, **config_kwargs):
- """Workspaces hold the model, data and measurements."""
+ def __init__(self, spec, validate: bool = True, **config_kwargs):
+ """
+ Workspaces hold the model, data and measurements.
+
+ Args:
+ spec (:obj:`jsonable`): The HistFactory JSON specification
+ validate (:obj:`bool`): Whether to validate against a JSON schema
+ config_kwargs: Possible keyword arguments for the workspace configuration
+
+ Returns:
+ model (:class:`~pyhf.workspace.Workspace`): The Workspace instance
+
+ """
spec = copy.deepcopy(spec)
super().__init__(spec, channels=spec['channels'])
self.schema = config_kwargs.pop('schema', 'workspace.json')
self.version = config_kwargs.pop('version', spec.get('version', None))
# run jsonschema validation of input specification against the (provided) schema
- log.info(f"Validating spec against schema: {self.schema}")
- utils.validate(self, self.schema, version=self.version)
+ if validate:
+ log.info(f"Validating spec against schema: {self.schema}")
+ utils.validate(self, self.schema, version=self.version)
self.measurement_names = []
for measurement in self.get('measurements', []):
@@ -784,7 +796,7 @@ def sorted(cls, workspace):
return cls(newspec)
@classmethod
- def build(cls, model, data, name='measurement'):
+ def build(cls, model, data, name='measurement', validate: bool = True):
"""
Build a workspace from model and data.
@@ -792,6 +804,7 @@ def build(cls, model, data, name='measurement'):
model (~pyhf.pdf.Model): A model to store into a workspace
data (:obj:`tensor`): A array holding observations to store into a workspace
name (:obj:`str`): The name of the workspace measurement
+ validate (:obj:`bool`): Whether to validate against a JSON schema
Returns:
~pyhf.workspace.Workspace: A new workspace object
@@ -823,4 +836,4 @@ def build(cls, model, data, name='measurement'):
{'name': k, 'data': list(data[model.config.channel_slices[k]])}
for k in model.config.channels
]
- return cls(workspace)
+ return cls(workspace, validate=validate)
| diff --git a/tests/test_workspace.py b/tests/test_workspace.py
--- a/tests/test_workspace.py
+++ b/tests/test_workspace.py
@@ -898,3 +898,14 @@ def test_wspace_unexpected_keyword_argument(simplemodels_model_data):
with pytest.raises(pyhf.exceptions.Unsupported):
pyhf.Workspace(spec, abc=True)
+
+
+def test_workspace_without_validation(mocker, simplemodels_model_data):
+ model, data = simplemodels_model_data
+
+ mocker.patch('pyhf.utils.validate')
+ ws = pyhf.Workspace.build(model, data, validate=False)
+ assert pyhf.utils.validate.called is False
+
+ pyhf.Workspace(dict(ws), validate=False)
+ assert pyhf.utils.validate.called is False
| Option to skip workspace validation
### Summary
Workspace validation is currently not skippable:
https://github.com/scikit-hep/pyhf/blob/767ed59d06274f810e47944f1fe4c95d94968543/src/pyhf/workspace.py#L289-L298
In some scenarios the validation takes a significant amount of time, and it would be convenient if it was possible to skip it.
### Additional Information
<!--- Describe what you are showing in your example -->
This is using [NormalMeasurement_combined.txt](https://github.com/scikit-hep/pyhf/files/7522251/NormalMeasurement_combined.txt) from https://github.com/scikit-hep/pyhf/discussions/1695. While working with [this gist](https://gist.github.com/alexander-held/2f6727df4cc8f65d75368e293ca83b5a) used in https://github.com/scikit-hep/pyhf/discussions/1695#discussioncomment-1635955, I noticed that workspace validation can take a long time for this example. Below is a minimal reproducer:
<!--- and then paste your mock Python example code between the quotes below -->
```python (paste below)
import pyhf
import json
with open("NormalMeasurement_combined.txt") as f:
ws = pyhf.Workspace(json.load(f))
```
This runs in around 5.4 seconds for me locally.
When commenting out
https://github.com/scikit-hep/pyhf/blob/767ed59d06274f810e47944f1fe4c95d94968543/src/pyhf/workspace.py#L298
this time goes down to 1.6 seconds.
The workspace validation takes about as long as the model construction (via `ws.model()`), which for me takes about 4 seconds by itself (again dominated by validation time in this example). It is possible to skip model validation (via `validate=False`), and I believe it would be useful to allow the same for the workspace.
I also noticed that arbitrary kwargs can be passed to workspace construction, e.g. in the above example
```python
ws = pyhf.Workspace(json.load(f), abcdefg=False)
```
also runs fine. It may be convenient to catch these kwargs to help users identify typos.
### Code of Conduct
- [X] I agree to follow the Code of Conduct
| This seems reasonable to add with it being turned off by default so that only expert users will try to use it. | 2021-11-22T12:44:07 |
scikit-hep/pyhf | 1,715 | scikit-hep__pyhf-1715 | [
"1714"
] | b9af390a79a1abe01dad5ec71a740198b49f22db | diff --git a/src/pyhf/contrib/utils.py b/src/pyhf/contrib/utils.py
--- a/src/pyhf/contrib/utils.py
+++ b/src/pyhf/contrib/utils.py
@@ -30,13 +30,13 @@ def download(archive_url, output_directory, force=False, compress=False):
Example:
>>> from pyhf.contrib.utils import download
- >>> download("https://doi.org/10.17182/hepdata.90607.v3/r3", "1Lbb-likelihoods")
+ >>> download("https://doi.org/10.17182/hepdata.90607.v3/r3", "1Lbb-likelihoods") # doctest: +SKIP
>>> import os
- >>> sorted(os.listdir("1Lbb-likelihoods"))
+ >>> sorted(os.listdir("1Lbb-likelihoods")) # doctest: +SKIP
['BkgOnly.json', 'README.md', 'patchset.json']
- >>> download("https://doi.org/10.17182/hepdata.90607.v3/r3", "1Lbb-likelihoods.tar.gz", compress=True)
+ >>> download("https://doi.org/10.17182/hepdata.90607.v3/r3", "1Lbb-likelihoods.tar.gz", compress=True) # doctest: +SKIP
>>> import glob
- >>> glob.glob("1Lbb-likelihoods.tar.gz")
+ >>> glob.glob("1Lbb-likelihoods.tar.gz") # doctest: +SKIP
['1Lbb-likelihoods.tar.gz']
Args:
| Remove dependency on HEPData from docstring tests
### Summary
With PRs #1697, #1704, and #1711 there have been attempts to remove any dependency on HEPData infrastructure for testing. There is still a dependency in the [`pyhf.contrib.utils.download` docs](https://pyhf.readthedocs.io/en/v0.6.3/_generated/pyhf.contrib.utils.download.html) as it demos downloading from HEPData in the docstring example. This can fail with
```pytb
____________________ [doctest] pyhf.contrib.utils.download _____________________
028 directory at the path given.
029
030 Example:
031
032 >>> from pyhf.contrib.utils import download
033 >>> download("https://doi.org/10.17182/hepdata.90607.v3/r3", "1Lbb-likelihoods")
034 >>> import os
035 >>> sorted(os.listdir("1Lbb-likelihoods"))
036 ['BkgOnly.json', 'README.md', 'patchset.json']
037 >>> download("https://doi.org/10.17182/hepdata.90607.v3/r3", "1Lbb-likelihoods.tar.gz", compress=True)
UNEXPECTED EXCEPTION: InvalidArchive('https://doi.org/10.17182/hepdata.90607.v3/r3 gives a response code of 500.\nThere is either something temporarily wrong with the archive host or https://doi.org/10.17182/hepdata.90607.v3/r3 is an invalid URL.')
Traceback (most recent call last):
File "/opt/hostedtoolcache/Python/3.9.9/x64/lib/python3.9/doctest.py", line 1334, in __run
exec(compile(example.source, filename, "single",
File "<doctest pyhf.contrib.utils.download[4]>", line 1, in <module>
File "/home/runner/work/pyhf/pyhf/src/pyhf/contrib/utils.py", line 73, in download
raise exceptions.InvalidArchive(
pyhf.exceptions.InvalidArchive: https://doi.org/10.17182/hepdata.90607.v3/r3 gives a response code of 500.
There is either something temporarily wrong with the archive host or https://doi.org/10.17182/hepdata.90607.v3/r3 is an invalid URL.
/home/runner/work/pyhf/pyhf/src/pyhf/contrib/utils.py:37: UnexpectedException
```
### OS / Environment
```console
All platforms
```
### Steps to Reproduce
Run the CI and get unlucky
### File Upload (optional)
_No response_
### Expected Results
The CI to run the doctests without error and to be able to not have dependencies on HEPData uptime.
### Actual Results
```pytb
____________________ [doctest] pyhf.contrib.utils.download _____________________
028 directory at the path given.
029
030 Example:
031
032 >>> from pyhf.contrib.utils import download
033 >>> download("https://doi.org/10.17182/hepdata.90607.v3/r3", "1Lbb-likelihoods")
034 >>> import os
035 >>> sorted(os.listdir("1Lbb-likelihoods"))
036 ['BkgOnly.json', 'README.md', 'patchset.json']
037 >>> download("https://doi.org/10.17182/hepdata.90607.v3/r3", "1Lbb-likelihoods.tar.gz", compress=True)
UNEXPECTED EXCEPTION: InvalidArchive('https://doi.org/10.17182/hepdata.90607.v3/r3 gives a response code of 500.\nThere is either something temporarily wrong with the archive host or https://doi.org/10.17182/hepdata.90607.v3/r3 is an invalid URL.')
Traceback (most recent call last):
File "/opt/hostedtoolcache/Python/3.9.9/x64/lib/python3.9/doctest.py", line 1334, in __run
exec(compile(example.source, filename, "single",
File "<doctest pyhf.contrib.utils.download[4]>", line 1, in <module>
File "/home/runner/work/pyhf/pyhf/src/pyhf/contrib/utils.py", line 73, in download
raise exceptions.InvalidArchive(
pyhf.exceptions.InvalidArchive: https://doi.org/10.17182/hepdata.90607.v3/r3 gives a response code of 500.
There is either something temporarily wrong with the archive host or https://doi.org/10.17182/hepdata.90607.v3/r3 is an invalid URL.
/home/runner/work/pyhf/pyhf/src/pyhf/contrib/utils.py:37: UnexpectedException
```
### pyhf Version
```console
pyhf, version 0.6.3
```
### Code of Conduct
- [X] I agree to follow the Code of Conduct
| 2021-11-30T00:07:39 |
||
scikit-hep/pyhf | 1,730 | scikit-hep__pyhf-1730 | [
"1729"
] | eb45d60676fb2c3f3925d404be54952895f41870 | diff --git a/src/pyhf/tensor/jax_backend.py b/src/pyhf/tensor/jax_backend.py
--- a/src/pyhf/tensor/jax_backend.py
+++ b/src/pyhf/tensor/jax_backend.py
@@ -287,7 +287,7 @@ def percentile(self, tensor_in, q, axis=None, interpolation="linear"):
>>> import jax.numpy as jnp
>>> pyhf.set_backend("jax")
>>> a = pyhf.tensorlib.astensor([[10, 7, 4], [3, 2, 1]])
- >>> pyhf.tensorlib.percentile(a, jnp.float64(50))
+ >>> pyhf.tensorlib.percentile(a, 50)
DeviceArray(3.5, dtype=float64)
>>> pyhf.tensorlib.percentile(a, 50, axis=1)
DeviceArray([7., 2.], dtype=float64)
@@ -314,8 +314,6 @@ def percentile(self, tensor_in, q, axis=None, interpolation="linear"):
JAX ndarray: The value of the :math:`q`-th percentile of the tensor along the specified axis.
"""
- # TODO: Monitor future JAX releases for changes to percentile dtype promotion
- # c.f. https://github.com/google/jax/issues/8513
return jnp.percentile(tensor_in, q, axis=axis, interpolation=interpolation)
def stack(self, sequence, axis=0):
| diff --git a/tests/conftest.py b/tests/conftest.py
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -99,7 +99,13 @@ def backend(request):
)
if fail_backend:
- pytest.xfail(f"expect {func_name} to fail as specified")
+ # Mark the test as xfail to actually run it and ensure that it does
+ # fail. pytest.mark.xfail checks for failure, while pytest.xfail
+ # assumes failure and skips running the test.
+ # c.f. https://docs.pytest.org/en/6.2.x/skipping.html#xfail
+ request.node.add_marker(
+ pytest.mark.xfail(reason=f"expect {func_name} to fail as specified")
+ )
# actual execution here, after all checks is done
pyhf.set_backend(*request.param)
diff --git a/tests/test_tensor.py b/tests/test_tensor.py
--- a/tests/test_tensor.py
+++ b/tests/test_tensor.py
@@ -376,7 +376,6 @@ def test_boolean_mask(backend):
)
[email protected]_jax
def test_percentile(backend):
tb = pyhf.tensorlib
a = tb.astensor([[10, 7, 4], [3, 2, 1]])
@@ -392,7 +391,6 @@ def test_percentile(backend):
# c.f. https://github.com/scikit-hep/pyhf/issues/1693
@pytest.mark.fail_pytorch
@pytest.mark.fail_pytorch64
[email protected]_jax
def test_percentile_interpolation(backend):
tb = pyhf.tensorlib
a = tb.astensor([[10, 7, 4], [3, 2, 1]])
@@ -404,33 +402,6 @@ def test_percentile_interpolation(backend):
assert tb.tolist(tb.percentile(a, 50, interpolation="higher")) == 4.0
[email protected]_jax
-def test_percentile_jax(backend):
- tb = pyhf.tensorlib
- a = tb.astensor([[10, 7, 4], [3, 2, 1]])
- assert tb.tolist(tb.percentile(a, 0)) == 1
-
- # TODO: Monitor future JAX releases for changes to percentile dtype promotion
- # c.f. https://github.com/scikit-hep/pyhf/issues/1693
- assert tb.tolist(tb.percentile(a, np.float64(50))) == 3.5
- assert tb.tolist(tb.percentile(a, np.float64(100))) == 10
- assert tb.tolist(tb.percentile(a, 50, axis=1)) == [7.0, 2.0]
-
-
[email protected]_jax
-def test_percentile_interpolation_jax(backend):
- tb = pyhf.tensorlib
- a = tb.astensor([[10, 7, 4], [3, 2, 1]])
-
- # TODO: Monitor future JAX releases for changes to percentile dtype promotion
- # c.f. https://github.com/scikit-hep/pyhf/issues/1693
- assert tb.tolist(tb.percentile(a, np.float64(50), interpolation="linear")) == 3.5
- assert tb.tolist(tb.percentile(a, 50, interpolation="nearest")) == 3.0
- assert tb.tolist(tb.percentile(a, 50, interpolation="lower")) == 3.0
- assert tb.tolist(tb.percentile(a, 50, interpolation="midpoint")) == 3.5
- assert tb.tolist(tb.percentile(a, 50, interpolation="higher")) == 4.0
-
-
def test_tensor_tile(backend):
a = [[1], [2], [3]]
tb = pyhf.tensorlib
@@ -465,7 +436,6 @@ def test_1D_gather(backend):
) == [[5, 1], [4, 3]]
[email protected]_pytorch
def test_ND_gather(backend):
tb = pyhf.tensorlib
assert tb.tolist(
| Use of xfail in conftest.py hiding passing conditions
### Summary
In PR #1702 we adopted using `xfail_strict = true` for `pytest` to become aware of when tests that _should_ be failing aren't. With the release of `jax` `v0.2.26` (which includes a fix for https://github.com/google/jax/issues/8513 — c.f. PR #817)
https://github.com/scikit-hep/pyhf/blob/43c156702fd2a0392114c5b2468f0128ed16ea72/tests/test_tensor.py#L379-L396
_should_ be failing given `xfail_strict` as `jax` `v0.2.26` should be passing here. However, they don't fail.
This is because in `conftest.py` we use [`pytest.xfail`](https://docs.pytest.org/en/6.2.x/reference.html#pytest-xfail)
https://github.com/scikit-hep/pyhf/blob/43c156702fd2a0392114c5b2468f0128ed16ea72/tests/conftest.py#L101-L102
and [according to the xfail tutorial](https://docs.pytest.org/en/6.2.x/skipping.html#xfail)
> Note that no other code is executed after the `pytest.xfail()` call, differently from the marker
so the use of `pytest.xfail` in `conftest.py` is actually _skipping_ tests instead of running them and checking for failure (which is a surprise to me!). The [`pytest.xfail` docs](https://docs.pytest.org/en/6.2.x/reference.html#pytest-xfail) note that
> It is better to use the [`pytest.mark.xfail`](https://docs.pytest.org/en/6.2.x/reference.html#pytest-mark-xfail-ref) marker when possible to declare a test to be xfailed under certain conditions like known bugs or missing features.
Note also that
https://github.com/scikit-hep/pyhf/blob/43c156702fd2a0392114c5b2468f0128ed16ea72/pyproject.toml#L41-L42
should actually be under `[tool.pytest.ini_options]` as [noted in the `pytest` docs](https://docs.pytest.org/en/6.2.x/customize.html#pyproject-toml) (this was done wrong in PR #1702).
### OS / Environment
```console
All
```
### Steps to Reproduce
```diff
$ git diff
diff --git a/pyproject.toml b/pyproject.toml
index 9c81dc60..6616d586 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -38,11 +38,9 @@ ignore = [
'AUTHORS',
]
-[tool.pytest]
-xfail_strict = true
-
[tool.pytest.ini_options]
minversion = "6.0"
+xfail_strict = true
addopts = [
"--ignore=setup.py",
"--ignore=validation/",
diff --git a/tests/conftest.py b/tests/conftest.py
index 0e2537b7..e1c3f87e 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -99,7 +99,9 @@ def backend(request):
)
if fail_backend:
+ print(f"expect {func_name} to fail as specified")
pytest.xfail(f"expect {func_name} to fail as specified")
+ print("this won't ever run")
# actual execution here, after all checks is done
pyhf.set_backend(*request.param)
diff --git a/tests/test_tensor.py b/tests/test_tensor.py
index 616b6ca5..3b8768e2 100644
--- a/tests/test_tensor.py
+++ b/tests/test_tensor.py
@@ -377,6 +377,7 @@ def test_boolean_mask(backend):
@pytest.mark.fail_jax
[email protected]_numpy
def test_percentile(backend):
tb = pyhf.tensorlib
a = tb.astensor([[10, 7, 4], [3, 2, 1]])
```
`test_percentile[numpy]` should pass, but use of `pytest.xfail` skips it:
```console
$ pytest -sx tests/test_tensor.py -k test_percentile[numpy]
=========================================================================================== test session starts ===========================================================================================
platform linux -- Python 3.9.6, pytest-6.2.5, py-1.10.0, pluggy-1.0.0
Matplotlib: 3.5.0
Freetype: 2.6.1
benchmark: 3.4.1 (defaults: timer=time.perf_counter disable_gc=False min_rounds=5 min_time=0.000005 max_time=1.0 calibration_precision=10 warmup=False warmup_iterations=100000)
rootdir: /home/feickert/Code/GitHub/pyhf, configfile: pyproject.toml
plugins: mock-3.6.1, console-scripts-1.2.1, mpl-0.13, requests-mock-1.9.3, benchmark-3.4.1, cov-3.0.0, anyio-3.3.3
collected 222 items / 221 deselected / 1 selected
tests/test_tensor.py expect test_percentile[numpy] to fail as specified
x
```
### Expected Results
For tests marked with `xfail` to fail:
```diff
$ git diff
diff --git a/pyproject.toml b/pyproject.toml
index 9c81dc60..6616d586 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -38,11 +38,9 @@ ignore = [
'AUTHORS',
]
-[tool.pytest]
-xfail_strict = true
-
[tool.pytest.ini_options]
minversion = "6.0"
+xfail_strict = true
addopts = [
"--ignore=setup.py",
"--ignore=validation/",
diff --git a/tests/test_tensor.py b/tests/test_tensor.py
index 616b6ca5..421015f3 100644
--- a/tests/test_tensor.py
+++ b/tests/test_tensor.py
@@ -377,6 +377,7 @@ def test_boolean_mask(backend):
@pytest.mark.fail_jax
[email protected]()
def test_percentile(backend):
tb = pyhf.tensorlib
a = tb.astensor([[10, 7, 4], [3, 2, 1]])
```
```console
$ pytest -sx tests/test_tensor.py -k test_percentile[numpy]
=========================================================================================== test session starts ===========================================================================================
platform linux -- Python 3.9.6, pytest-6.2.5, py-1.10.0, pluggy-1.0.0
Matplotlib: 3.5.0
Freetype: 2.6.1
benchmark: 3.4.1 (defaults: timer=time.perf_counter disable_gc=False min_rounds=5 min_time=0.000005 max_time=1.0 calibration_precision=10 warmup=False warmup_iterations=100000)
rootdir: /home/feickert/Code/GitHub/pyhf, configfile: pyproject.toml
plugins: mock-3.6.1, console-scripts-1.2.1, mpl-0.13, requests-mock-1.9.3, benchmark-3.4.1, cov-3.0.0, anyio-3.3.3
collected 222 items / 221 deselected / 1 selected
tests/test_tensor.py F
================================================================================================ FAILURES =================================================================================================
_________________________________________________________________________________________ test_percentile[numpy] __________________________________________________________________________________________
[XPASS(strict)]
```
### Actual Results
```console
$ pytest -sx tests/test_tensor.py -k test_percentile[numpy]
=========================================================================================== test session starts ===========================================================================================
platform linux -- Python 3.9.6, pytest-6.2.5, py-1.10.0, pluggy-1.0.0
Matplotlib: 3.5.0
Freetype: 2.6.1
benchmark: 3.4.1 (defaults: timer=time.perf_counter disable_gc=False min_rounds=5 min_time=0.000005 max_time=1.0 calibration_precision=10 warmup=False warmup_iterations=100000)
rootdir: /home/feickert/Code/GitHub/pyhf, configfile: pyproject.toml
plugins: mock-3.6.1, console-scripts-1.2.1, mpl-0.13, requests-mock-1.9.3, benchmark-3.4.1, cov-3.0.0, anyio-3.3.3
collected 222 items / 221 deselected / 1 selected
tests/test_tensor.py expect test_percentile[numpy] to fail as specified
x
```
### pyhf Version
`master` at commit 43c156702fd2a0392114c5b2468f0128ed16ea72
### Code of Conduct
- [X] I agree to follow the Code of Conduct
| 2021-12-09T03:59:01 |
|
scikit-hep/pyhf | 1,767 | scikit-hep__pyhf-1767 | [
"1766"
] | e3d879f3e4982ac629bec7bf92d78b00025e52dc | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -73,7 +73,7 @@ def setup(app):
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'numpy': ('https://numpy.org/doc/stable/', None),
- 'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
+ 'scipy': ('https://docs.scipy.org/doc/scipy/', None),
'matplotlib': ('https://matplotlib.org/stable/', None),
'iminuit': ('https://iminuit.readthedocs.io/en/stable/', None),
'uproot': ('https://uproot.readthedocs.io/en/latest/', None),
| diff --git a/tests/test_optim.py b/tests/test_optim.py
--- a/tests/test_optim.py
+++ b/tests/test_optim.py
@@ -9,7 +9,7 @@
import numpy as np
-# from https://docs.scipy.org/doc/scipy/reference/tutorial/optimize.html#nelder-mead-simplex-algorithm-method-nelder-mead
+# from https://docs.scipy.org/doc/scipy/tutorial/optimize.html#nelder-mead-simplex-algorithm-method-nelder-mead
@pytest.mark.skip_pytorch
@pytest.mark.skip_pytorch64
@pytest.mark.skip_tensorflow
| scipy reference objects can't be found for docs
### Summary
The docs are failing to build as we're hitting the warning
```pytb
WARNING: failed to reach any of the inventories with the following issues:
intersphinx inventory 'https://docs.scipy.org/doc/scipy/reference/objects.inv' not readable due to ValueError: unknown or unsupported inventory version: ValueError('invalid inventory header: ')
```
It is unclear if these just got moved when [`scipy` `v1.8.0`](https://pypi.org/project/scipy/1.8.0/#history) got released or if there is some outage.
### OS / Environment
All
### Steps to Reproduce
Build the docs in CI
### File Upload (optional)
_No response_
### Expected Results
Docs build without warnings or errors
### Actual Results
```pytb
WARNING: failed to reach any of the inventories with the following issues:
intersphinx inventory 'https://docs.scipy.org/doc/scipy/reference/objects.inv' not readable due to ValueError: unknown or unsupported inventory version: ValueError('invalid inventory header: ')
```
### pyhf Version
`HEAD` is at e3d879f3e4982ac629bec7bf92d78b00025e52dc
### Code of Conduct
- [X] I agree to follow the Code of Conduct
| This is covered in https://github.com/scipy/scipy/issues/15545. | 2022-02-07T21:17:11 |
scikit-hep/pyhf | 1,790 | scikit-hep__pyhf-1790 | [
"1223"
] | 9fd99be886349a90e927672e950cc233fad0916c | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -68,7 +68,7 @@
+ extras_require['test']
+ [
'nbdime',
- 'bump2version',
+ 'tbump>=6.7.0',
'ipython',
'pre-commit',
'check-manifest',
| Guard SCHEMA_VERSION from version bumps
I don't think it is going to be possible to guard the `SCHEMA_VERSION` from `bump2version` so we might need to look for a replacement for `bump2version` that gives guard support.
This is going to be a problem when
https://github.com/scikit-hep/pyhf/blob/6b0a9317b14da2a452f51d089cb9e493c8f19347/.bumpversion.cfg#L1-L2
hits `1.0.0` and conflicts with
https://github.com/scikit-hep/pyhf/blob/f824afe77d9e48e90651931700ccfc3d3c268c18/src/pyhf/utils.py#L13
and also has to properly pick up the multiple correct instances in
https://github.com/scikit-hep/pyhf/blob/f824afe77d9e48e90651931700ccfc3d3c268c18/src/pyhf/utils.py#L145
_Originally posted by @matthewfeickert in https://github.com/scikit-hep/pyhf/issues/1218#issuecomment-744590434_
| 2022-02-25T04:51:31 |
||
scikit-hep/pyhf | 1,818 | scikit-hep__pyhf-1818 | [
"1815"
] | 6d03b9e90cd1862bfd9069b340a6400d13ba2119 | diff --git a/src/pyhf/schema/__init__.py b/src/pyhf/schema/__init__.py
--- a/src/pyhf/schema/__init__.py
+++ b/src/pyhf/schema/__init__.py
@@ -23,17 +23,40 @@ class Schema(sys.modules[__name__].__class__):
"""
A module-level wrapper around :mod:`pyhf.schema` which will provide additional functionality for interacting with schemas.
- Example:
+ .. rubric:: Example (callable)
+
+ .. code-block:: pycon
+
>>> import pyhf.schema
>>> import pathlib
>>> curr_path = pyhf.schema.path
- >>> curr_path # doctest: +ELLIPSIS
+ >>> curr_path # doctest: +ELLIPSIS
PosixPath('.../pyhf/schemas')
- >>> pyhf.schema(pathlib.Path('/home/root/my/new/path'))
+ >>> new_path = pathlib.Path("/home/root/my/new/path")
+ >>> pyhf.schema(new_path) # doctest: +ELLIPSIS
+ <module 'pyhf.schema' from ...>
>>> pyhf.schema.path
PosixPath('/home/root/my/new/path')
- >>> pyhf.schema(curr_path)
- >>> pyhf.schema.path # doctest: +ELLIPSIS
+ >>> pyhf.schema(curr_path) # doctest: +ELLIPSIS
+ <module 'pyhf.schema' from ...>
+ >>> pyhf.schema.path # doctest: +ELLIPSIS
+ PosixPath('.../pyhf/schemas')
+
+ .. rubric:: Example (context-manager)
+
+ .. code-block:: pycon
+
+ >>> import pyhf.schema
+ >>> import pathlib
+ >>> curr_path = pyhf.schema.path
+ >>> curr_path # doctest: +ELLIPSIS
+ PosixPath('.../pyhf/schemas')
+ >>> new_path = pathlib.Path("/home/root/my/new/path")
+ >>> with pyhf.schema(new_path):
+ ... print(repr(pyhf.schema.path))
+ ...
+ PosixPath('/home/root/my/new/path')
+ >>> pyhf.schema.path # doctest: +ELLIPSIS
PosixPath('.../pyhf/schemas')
"""
@@ -45,10 +68,23 @@ def __call__(self, new_path: pathlib.Path):
Args:
new_path (pathlib.Path): Path to folder containing the schemas
+ Returns:
+ self (pyhf.schema.Schema): Returns itself (for contextlib management)
+ """
+ self.orig_path, variables.schemas = variables.schemas, new_path
+ return self
+
+ def __enter__(self):
+ pass
+
+ def __exit__(self, *args, **kwargs):
+ """
+ Reset the local search path for finding schemas locally.
+
Returns:
None
"""
- variables.schemas = new_path
+ variables.schemas = self.orig_path
@property
def path(self):
| diff --git a/tests/test_public_api.py b/tests/test_public_api.py
--- a/tests/test_public_api.py
+++ b/tests/test_public_api.py
@@ -1,6 +1,7 @@
import pytest
import pyhf
import numpy as np
+import pathlib
@pytest.fixture(scope='function')
@@ -200,3 +201,23 @@ def test_pdf_batched(backend):
model.pdf(pars, data)
model.expected_data(pars)
+
+
+def test_set_schema_path(monkeypatch):
+ monkeypatch.setattr(
+ pyhf.schema.variables, 'schemas', pyhf.schema.variables.schemas, raising=True
+ )
+
+ new_path = pathlib.Path('a/new/path')
+ pyhf.schema(new_path)
+ assert pyhf.schema.path == new_path
+
+
+def test_set_schema_path_context(monkeypatch):
+ monkeypatch.setattr(
+ pyhf.schema.variables, 'schemas', pyhf.schema.variables.schemas, raising=True
+ )
+
+ new_path = pathlib.Path('a/new/path')
+ with pyhf.schema(new_path):
+ assert pyhf.schema.path == new_path
diff --git a/tests/test_schema.py b/tests/test_schema.py
--- a/tests/test_schema.py
+++ b/tests/test_schema.py
@@ -27,18 +27,51 @@ def test_schema_callable():
assert callable(pyhf.schema)
-def test_schema_changeable(datadir):
+def test_schema_changeable(datadir, monkeypatch):
+ monkeypatch.setattr(
+ pyhf.schema.variables, 'schemas', pyhf.schema.variables.schemas, raising=True
+ )
+ old_path = pyhf.schema.path
+ new_path = datadir / 'customschema'
+
with pytest.raises(pyhf.exceptions.SchemaNotFound):
pyhf.Workspace(json.load(open(datadir / 'customschema' / 'custom.json')))
- old_path = pyhf.schema.path
- pyhf.schema(datadir / 'customschema')
- assert pyhf.schema.path != old_path
- assert pyhf.schema.path == datadir / 'customschema'
- assert pyhf.Workspace(json.load(open(datadir / 'customschema' / 'custom.json')))
+ pyhf.schema(new_path)
+ assert old_path != pyhf.schema.path
+ assert new_path == pyhf.schema.path
+ assert pyhf.Workspace(json.load(open(new_path / 'custom.json')))
pyhf.schema(old_path)
+def test_schema_changeable_context(datadir, monkeypatch):
+ monkeypatch.setattr(
+ pyhf.schema.variables, 'schemas', pyhf.schema.variables.schemas, raising=True
+ )
+ old_path = pyhf.schema.path
+ new_path = datadir / 'customschema'
+
+ assert old_path == pyhf.schema.path
+ with pyhf.schema(new_path):
+ assert old_path != pyhf.schema.path
+ assert new_path == pyhf.schema.path
+ assert pyhf.Workspace(json.load(open(new_path / 'custom.json')))
+ assert old_path == pyhf.schema.path
+
+
+def test_schema_changeable_context_error(datadir, monkeypatch):
+ monkeypatch.setattr(
+ pyhf.schema.variables, 'schemas', pyhf.schema.variables.schemas, raising=True
+ )
+ old_path = pyhf.schema.path
+ new_path = datadir / 'customschema'
+
+ with pytest.raises(ZeroDivisionError):
+ with pyhf.schema(new_path):
+ raise ZeroDivisionError()
+ assert old_path == pyhf.schema.path
+
+
def test_no_channels():
spec = {'channels': []}
with pytest.raises(pyhf.exceptions.InvalidSpecification):
| Add contextlib support to the pyhf.schema API
> instead of having users overwriting the value by assignment, do you think there is value in offering a `pyhf.utils.update_schema_path` or something that performs this operation through the API?
>
> Thanks @kratsg for taking up my suggestion!
>
> I second @matthewfeickert's suggestion to change schemas via function call rather than assignment. It is much simpler to add functionality to a function if it ever becomes necessary, than to replace an entire existing module with a class instance with a property.
>
> I'd even go so far to say that a context manager doubling as an update function would be ideal IMO:
> ```python
> # in pyhf.utils
> _SCHEMAS = Path(...)
> class use_schema_path: # snake_case to remind of function-like usage
> def __init__(self, path):
> global _SCHEMAS
> self._old_schemas = _SCHEMAS
> _SCHEMAS = pathlib.Path(path)
> def __enter__(self):
> pass
> def __exit__(self, *args, **kwargs):
> global _SCHEMAS
> _SCHEMAS = self._old_schemas
> ```
> which can still be called as a function (only executing `__init__`), so short scripts etc. are not forced to use `with` blocks.
> But it can also be used like so:
> ```python
> def make_my_workspace(spec):
> with pyhf.utils.use_schema_path('/my/very/special/schemas'):
> return pyhf.Workspace(spec)
> ```
> So as a user writing code on top of pyhf, I don't have to worry about resesetting the global variable, the CM does it for me, and there are fewer mistakes to make.
>
_Originally posted by @lhenkelm in https://github.com/scikit-hep/pyhf/issues/1753#issuecomment-1026678066_
| This is on top of #1609 . | 2022-03-23T15:58:13 |
scikit-hep/pyhf | 1,819 | scikit-hep__pyhf-1819 | [
"1814"
] | 36e0d67530a94462fc1423585d78947749f05c65 | diff --git a/src/pyhf/writexml.py b/src/pyhf/writexml.py
--- a/src/pyhf/writexml.py
+++ b/src/pyhf/writexml.py
@@ -178,8 +178,8 @@ def build_modifier(spec, modifierspec, channelname, samplename, sampledata):
high = 10
for p in spec['measurements'][0]['config']['parameters']:
if p['name'] == modifierspec['name']:
- val = p['inits'][0]
- low, high = p['bounds'][0]
+ val = p.get('inits', [val])[0]
+ low, high = p.get('bounds', [[low, high]])[0]
attrs['Val'] = str(val)
attrs['Low'] = str(low)
attrs['High'] = str(high)
| diff --git a/tests/test_export.py b/tests/test_export.py
--- a/tests/test_export.py
+++ b/tests/test_export.py
@@ -428,3 +428,30 @@ def test_integer_data(datadir, mocker):
channel = pyhf.writexml.build_channel(spec, channel_spec, {})
assert channel
+
+
[email protected](
+ "fname,val,low,high",
+ [
+ ('workspace_no_parameter_inits.json', '1', '-5', '5'),
+ ('workspace_no_parameter_bounds.json', '5', '0', '10'),
+ ],
+ ids=['no_inits', 'no_bounds'],
+)
+def test_issue1814(datadir, mocker, fname, val, low, high):
+ with open(datadir / fname) as spec_file:
+ spec = json.load(spec_file)
+
+ modifierspec = {'data': None, 'name': 'mu_sig', 'type': 'normfactor'}
+ channelname = None
+ samplename = None
+ sampledata = None
+
+ modifier = pyhf.writexml.build_modifier(
+ spec, modifierspec, channelname, samplename, sampledata
+ )
+ assert modifier is not None
+ assert sorted(modifier.keys()) == ['High', 'Low', 'Name', 'Val']
+ assert modifier.get('Val') == val
+ assert modifier.get('Low') == low
+ assert modifier.get('High') == high
diff --git a/tests/test_export/workspace_no_parameter_bounds.json b/tests/test_export/workspace_no_parameter_bounds.json
new file mode 100644
--- /dev/null
+++ b/tests/test_export/workspace_no_parameter_bounds.json
@@ -0,0 +1,37 @@
+{
+ "channels": [
+ {
+ "name": "ch",
+ "samples": [
+ {
+ "data": [1000.0],
+ "modifiers": [
+ {"data": null, "name": "mu_sig", "type": "normfactor"},
+ {
+ "data": {"hi": 1.5, "lo": 0.5},
+ "name": "unc",
+ "type": "normsys"
+ }
+ ],
+ "name": "signal"
+ }
+ ]
+ }
+ ],
+ "measurements": [
+ {
+ "config": {
+ "parameters": [
+ {
+ "name": "mu_sig",
+ "inits": [5]
+ }
+ ],
+ "poi": "mu_sig"
+ },
+ "name": "meas"
+ }
+ ],
+ "observations": [{"data": [1000], "name": "ch"}],
+ "version": "1.0.0"
+}
diff --git a/tests/test_export/workspace_no_parameter_inits.json b/tests/test_export/workspace_no_parameter_inits.json
new file mode 100644
--- /dev/null
+++ b/tests/test_export/workspace_no_parameter_inits.json
@@ -0,0 +1,37 @@
+{
+ "channels": [
+ {
+ "name": "ch",
+ "samples": [
+ {
+ "data": [1000.0],
+ "modifiers": [
+ {"data": null, "name": "mu_sig", "type": "normfactor"},
+ {
+ "data": {"hi": 1.5, "lo": 0.5},
+ "name": "unc",
+ "type": "normsys"
+ }
+ ],
+ "name": "signal"
+ }
+ ]
+ }
+ ],
+ "measurements": [
+ {
+ "config": {
+ "parameters": [
+ {
+ "name": "mu_sig",
+ "bounds": [[-5, 5]]
+ }
+ ],
+ "poi": "mu_sig"
+ },
+ "name": "meas"
+ }
+ ],
+ "observations": [{"data": [1000], "name": "ch"}],
+ "version": "1.0.0"
+}
| `pyhf json2xml` requires parameter inits
### Summary
When converting a workspace via `pyhf json2xml`, the model config seems to rely on the presence of parameter inits, even though the spec does not strictly require them.
### OS / Environment
```console
n/a
```
### Steps to Reproduce
workspace:
```json
{
"channels": [
{
"name": "ch",
"samples": [
{
"data": [1000.0],
"modifiers": [
{"data": null, "name": "mu_sig", "type": "normfactor"},
{
"data": {"hi": 1.5, "lo": 0.5},
"name": "unc",
"type": "normsys"
}
],
"name": "signal"
}
]
}
],
"measurements": [
{
"config": {
"parameters": [
{
"name": "mu_sig",
"bounds": [[0, 10]]
}
],
"poi": "mu_sig"
},
"name": "meas"
}
],
"observations": [{"data": [1000], "name": "ch"}],
"version": "1.0.0"
}
```
convert with `pyhf json2xml`:
```pytb
...
File "[...]/pyhf/src/pyhf/cli/rootio.py", line 88, in json2xml
writexml.writexml(
File "[...]/pyhf/src/pyhf/writexml.py", line 292, in writexml
channel = build_channel(spec, channelspec, spec.get('observations'))
File "[...]/pyhf/src/pyhf/writexml.py", line 271, in build_channel
channel.append(build_sample(spec, samplespec, channelspec['name']))
File "[...]/pyhf/src/pyhf/writexml.py", line 245, in build_sample
modifier = build_modifier(
File "[...]/pyhf/src/pyhf/writexml.py", line 181, in build_modifier
val = p['inits'][0]
KeyError: 'inits'
```
A fit with `pyhf fit` works perfectly fine. The addition of `"inits": [1.0]` in the model config also fixes the crash, as does removing this parameter configuration completely.
### File Upload (optional)
_No response_
### Expected Results
I expect the conversion to not rely on the presence of `inits`. The default of `1.0` may not make sense if some custom bounds are specified, but I think this is on the user to fix. The same issue also appears when just setting a `normfactor` to constant without specifying the `inits`.
### Actual Results
```console
File "[...]/pyhf/src/pyhf/cli/rootio.py", line 88, in json2xml
writexml.writexml(
File "[...]/pyhf/src/pyhf/writexml.py", line 292, in writexml
channel = build_channel(spec, channelspec, spec.get('observations'))
File "[...]/pyhf/src/pyhf/writexml.py", line 271, in build_channel
channel.append(build_sample(spec, samplespec, channelspec['name']))
File "[...]/pyhf/src/pyhf/writexml.py", line 245, in build_sample
modifier = build_modifier(
File "[...]/pyhf/src/pyhf/writexml.py", line 181, in build_modifier
val = p['inits'][0]
KeyError: 'inits'
```
### pyhf Version
```console
0.6.3
```
### Code of Conduct
- [X] I agree to follow the Code of Conduct
| 2022-03-23T16:48:23 |
|
scikit-hep/pyhf | 1,820 | scikit-hep__pyhf-1820 | [
"1736"
] | 3ab2077f63e8307258d7e32492fe3b7c527dff5f | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -52,6 +52,7 @@ def setup(app):
'nbsphinx',
'sphinx_issues',
'sphinx_copybutton',
+ 'sphinx_togglebutton',
'xref',
]
bibtex_bibfiles = [
diff --git a/docs/generate_jupyterlite_iframe.py b/docs/generate_jupyterlite_iframe.py
new file mode 100644
--- /dev/null
+++ b/docs/generate_jupyterlite_iframe.py
@@ -0,0 +1,30 @@
+import urllib.parse
+
+
+def main():
+ code = """\
+import piplite
+await piplite.install(["pyhf==0.6.3", "requests"])
+%matplotlib inline
+import pyhf\
+"""
+
+ parsed_url = urllib.parse.quote(code)
+ url_base = "https://jupyterlite.github.io/demo/repl/index.html"
+ jupyterlite_options = "?kernel=python&toolbar=1&code="
+ jupyterlite_url = url_base + jupyterlite_options + parsed_url
+
+ print(f"# jupyterlite URL:\n{jupyterlite_url}")
+
+ jupyterlite_iframe_rst = f"""\
+ <iframe
+ src="{jupyterlite_url}"
+ width="100%"
+ height="500px"
+ ></iframe>\
+"""
+ print(f"\n# RST for iframe for jupyterlite.rst:\n{jupyterlite_iframe_rst}")
+
+
+if __name__ == "__main__":
+ raise SystemExit(main())
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -59,6 +59,7 @@
'ipywidgets',
'sphinx-issues',
'sphinx-copybutton>=0.3.2',
+ 'sphinx-togglebutton>=0.3.0',
]
)
)
| Explore pyodide for pyhf
### Summary
Pyodide allows pure-python wheels to be "imported" into the browser and run as-is.. this means we could ship an interactive console as part of our docs that just.. works..
https://pyodide.org/en/stable/index.html
However, one of our main dependencies is on scipy which is currently being worked on (pyodide/pyodide#1293) but it seems like all of our other deps should be ok.
### Additional Information
_No response_
### Code of Conduct
- [X] I agree to follow the Code of Conduct
| 2022-03-23T20:23:27 |
||
scikit-hep/pyhf | 1,821 | scikit-hep__pyhf-1821 | [
"1770"
] | 569f51257c6a895508f9c026bd61a2e723cb339c | diff --git a/src/pyhf/writexml.py b/src/pyhf/writexml.py
--- a/src/pyhf/writexml.py
+++ b/src/pyhf/writexml.py
@@ -188,6 +188,7 @@ def build_modifier(spec, modifierspec, channelname, samplename, sampledata):
attrs['HistoName'] = _make_hist_name(
channelname, samplename, modifierspec['name']
)
+ # must be deleted, HiFa XML specification does not support 'Name'
del attrs['Name']
# need to make this a relative uncertainty stored in ROOT file
_export_root_histogram(
| diff --git a/tests/test_export.py b/tests/test_export.py
--- a/tests/test_export.py
+++ b/tests/test_export.py
@@ -224,17 +224,17 @@ def test_export_measurement():
@pytest.mark.parametrize(
- "spec, has_root_data, attrs",
+ "spec, has_root_data, attrs, modtype",
[
- (spec_staterror(), True, ['Activate', 'HistoName']),
- (spec_histosys(), True, ['HistoNameHigh', 'HistoNameLow']),
- (spec_normsys(), False, ['High', 'Low']),
- (spec_shapesys(), True, ['ConstraintType', 'HistoName']),
- (spec_shapefactor(), False, []),
+ (spec_staterror(), True, ['Activate', 'HistoName'], 'staterror'),
+ (spec_histosys(), True, ['HistoNameHigh', 'HistoNameLow'], 'histosys'),
+ (spec_normsys(), False, ['High', 'Low'], 'normsys'),
+ (spec_shapesys(), True, ['ConstraintType', 'HistoName'], 'shapesys'),
+ (spec_shapefactor(), False, [], 'shapefactor'),
],
ids=['staterror', 'histosys', 'normsys', 'shapesys', 'shapefactor'],
)
-def test_export_modifier(mocker, caplog, spec, has_root_data, attrs):
+def test_export_modifier(mocker, caplog, spec, has_root_data, attrs, modtype):
channelspec = spec['channels'][0]
channelname = channelspec['name']
samplespec = channelspec['samples'][1]
@@ -242,6 +242,8 @@ def test_export_modifier(mocker, caplog, spec, has_root_data, attrs):
sampledata = samplespec['data']
modifierspec = samplespec['modifiers'][0]
+ assert modifierspec['type'] == modtype
+
mocker.patch('pyhf.writexml._ROOT_DATA_FILE')
with caplog.at_level(logging.DEBUG, 'pyhf.writexml'):
@@ -255,7 +257,9 @@ def test_export_modifier(mocker, caplog, spec, has_root_data, attrs):
assert "Skipping modifier" not in caplog.text
# if the modifier is a staterror, it has no Name
- if 'Name' in modifier.attrib:
+ if modtype == 'staterror':
+ assert 'Name' not in modifier.attrib
+ else:
assert modifier.attrib['Name'] == modifierspec['name']
assert all(attr in modifier.attrib for attr in attrs)
assert pyhf.writexml._ROOT_DATA_FILE.__setitem__.called == has_root_data
| Use of `del` for staterror in writexml necessary?
In PR #435 the use of `del` was added in
https://github.com/scikit-hep/pyhf/blob/e3d879f3e4982ac629bec7bf92d78b00025e52dc/src/pyhf/writexml.py#L186-L191
I assume because later on in the PR it mentions
https://github.com/scikit-hep/pyhf/blob/e3d879f3e4982ac629bec7bf92d78b00025e52dc/tests/test_export.py#L257-L259
However, if you remove that `del` statement
```
pytest -sx tests/test_export.py -k test_export_modifier
```
still passes.
Do we still need it? @kratsg have any thoughts here?
| It's needed -- https://github.com/scikit-hep/pyhf/blob/569f51257c6a895508f9c026bd61a2e723cb339c/src/pyhf/schemas/HistFactorySchema.dtd#L105-L112
We do need a test to assert that the exported staterror XML does not have a `Name` attribute. HiFa doesn't support staterror having names. | 2022-03-23T20:46:52 |
scikit-hep/pyhf | 1,837 | scikit-hep__pyhf-1837 | [
"1823"
] | 0693b9fc451f50963dcbcb1b017f73fb80112d7d | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -55,7 +55,6 @@
'sphinx-click',
'sphinx_rtd_theme',
'nbsphinx!=0.8.8', # c.f. https://github.com/spatialaudio/nbsphinx/issues/620
- 'Jinja2!=3.1.0', # c.f. https://github.com/spatialaudio/nbsphinx/issues/641
'ipywidgets',
'sphinx-issues',
'sphinx-copybutton>=0.3.2',
| Jinja2 v3.1.0 breaks nbsphinx
### Summary
[`Jinja2` `v3.1.0`](https://pypi.org/project/Jinja2/3.1.0/#history) was released 2 hours ago and is breaking [`nbsphinx`](https://github.com/spatialaudio/nbsphinx) (v0.8.7). This will need to get resolved soon by `nbsphinx`, but as a stopgap we should probably put in a
```diff
'sphinx-click',
'sphinx_rtd_theme',
'nbsphinx!=0.8.8', # c.f. https://github.com/spatialaudio/nbsphinx/issues/620
+ 'Jinja2!=3.1.0',
'ipywidgets',
'sphinx-issues',
'sphinx-copybutton>=0.3.2',
```
until things can get resolved.
### OS / Environment
```console
All
```
### Steps to Reproduce
N/A
### File Upload (optional)
_No response_
### Expected Results
For the docs to build
### Actual Results
nbsphinx errors due to new Jinja2 API
```
...
building [mo]: targets for 0 po files that are out of date
building [html]: targets for 35 source files that are out of date
updating environment: [new config] 153 added, 0 changed, 0 removed
reading sources... [ 0%] _generated/pyhf.compat
reading sources... [ 1%] _generated/pyhf.compat.interpret_rootname
reading sources... [ 1%] _generated/pyhf.compat.paramset_to_rootnames
reading sources... [ 2%] _generated/pyhf.contrib.utils.download
reading sources... [ 3%] _generated/pyhf.contrib.viz.brazil
reading sources... [ 3%] _generated/pyhf.contrib.viz.brazil.BrazilBandCollection
reading sources... [ 4%] _generated/pyhf.contrib.viz.brazil.plot_brazil_band
reading sources... [ 5%] _generated/pyhf.contrib.viz.brazil.plot_cls_components
reading sources... [ 5%] _generated/pyhf.contrib.viz.brazil.plot_results
reading sources... [ 6%] _generated/pyhf.exceptions.ImportBackendError
reading sources... [ 7%] _generated/pyhf.exceptions.InvalidBackend
reading sources... [ 7%] _generated/pyhf.exceptions.InvalidInterpCode
reading sources... [ 8%] _generated/pyhf.exceptions.InvalidMeasurement
reading sources... [ 9%] _generated/pyhf.exceptions.InvalidModel
reading sources... [ 9%] _generated/pyhf.exceptions.InvalidModifier
reading sources... [ 10%] _generated/pyhf.exceptions.InvalidNameReuse
reading sources... [ 11%] _generated/pyhf.exceptions.InvalidOptimizer
reading sources... [ 11%] _generated/pyhf.exceptions.InvalidPatchLookup
reading sources... [ 12%] _generated/pyhf.exceptions.InvalidPatchSet
reading sources... [ 13%] _generated/pyhf.exceptions.InvalidPdfData
reading sources... [ 13%] _generated/pyhf.exceptions.InvalidPdfParameters
reading sources... [ 14%] _generated/pyhf.exceptions.InvalidSpecification
reading sources... [ 15%] _generated/pyhf.exceptions.InvalidWorkspaceOperation
reading sources... [ 15%] _generated/pyhf.exceptions.PatchSetVerificationError
reading sources... [ 16%] _generated/pyhf.get_backend
reading sources... [ 16%] _generated/pyhf.infer.calculators.AsymptoticCalculator
reading sources... [ 17%] _generated/pyhf.infer.calculators.AsymptoticTestStatDistribution
reading sources... [ 18%] _generated/pyhf.infer.calculators.EmpiricalDistribution
reading sources... [ 18%] _generated/pyhf.infer.calculators.HypoTestFitResults
reading sources... [ 19%] _generated/pyhf.infer.calculators.ToyCalculator
reading sources... [ 20%] _generated/pyhf.infer.calculators.generate_asimov_data
reading sources... [ 20%] _generated/pyhf.infer.hypotest
reading sources... [ 21%] _generated/pyhf.infer.intervals.upperlimit
reading sources... [ 22%] _generated/pyhf.infer.mle.fit
reading sources... [ 22%] _generated/pyhf.infer.mle.fixed_poi_fit
reading sources... [ 23%] _generated/pyhf.infer.mle.twice_nll
reading sources... [ 24%] _generated/pyhf.infer.test_statistics.q0
reading sources... [ 24%] _generated/pyhf.infer.test_statistics.qmu
reading sources... [ 25%] _generated/pyhf.infer.test_statistics.qmu_tilde
reading sources... [ 26%] _generated/pyhf.infer.test_statistics.tmu
reading sources... [ 26%] _generated/pyhf.infer.test_statistics.tmu_tilde
reading sources... [ 27%] _generated/pyhf.infer.utils.all_pois_floating
reading sources... [ 28%] _generated/pyhf.infer.utils.create_calculator
reading sources... [ 28%] _generated/pyhf.infer.utils.get_test_stat
reading sources... [ 29%] _generated/pyhf.interpolators.code0
reading sources... [ 30%] _generated/pyhf.interpolators.code1
reading sources... [ 30%] _generated/pyhf.interpolators.code2
reading sources... [ 31%] _generated/pyhf.interpolators.code4
reading sources... [ 32%] _generated/pyhf.interpolators.code4p
reading sources... [ 32%] _generated/pyhf.modifiers.histosys
reading sources... [ 33%] _generated/pyhf.modifiers.histosys.histosys_builder
reading sources... [ 33%] _generated/pyhf.modifiers.histosys.histosys_combined
reading sources... [ 34%] _generated/pyhf.modifiers.histosys.required_parset
reading sources... [ 35%] _generated/pyhf.modifiers.normfactor
reading sources... [ 35%] _generated/pyhf.modifiers.normfactor.normfactor_builder
reading sources... [ [36](https://github.com/scikit-hep/pyhf/runs/5679132850?check_suite_focus=true#step:7:36)%] _generated/pyhf.modifiers.normfactor.normfactor_combined
reading sources... [ [37](https://github.com/scikit-hep/pyhf/runs/5679132850?check_suite_focus=true#step:7:37)%] _generated/pyhf.modifiers.normfactor.required_parset
reading sources... [ 37%] _generated/pyhf.modifiers.normsys
reading sources... [ [38](https://github.com/scikit-hep/pyhf/runs/5679132850?check_suite_focus=true#step:7:38)%] _generated/pyhf.modifiers.normsys.normsys_builder
reading sources... [ [39](https://github.com/scikit-hep/pyhf/runs/5679132850?check_suite_focus=true#step:7:39)%] _generated/pyhf.modifiers.normsys.normsys_combined
reading sources... [ 39%] _generated/pyhf.modifiers.normsys.required_parset
reading sources... [ [40](https://github.com/scikit-hep/pyhf/runs/5679132850?check_suite_focus=true#step:7:40)%] _generated/pyhf.modifiers.shapefactor
reading sources... [ [41](https://github.com/scikit-hep/pyhf/runs/5679132850?check_suite_focus=true#step:7:41)%] _generated/pyhf.modifiers.shapefactor.required_parset
reading sources... [ 41%] _generated/pyhf.modifiers.shapefactor.shapefactor_builder
reading sources... [ [42](https://github.com/scikit-hep/pyhf/runs/5679132850?check_suite_focus=true#step:7:42)%] _generated/pyhf.modifiers.shapefactor.shapefactor_combined
reading sources... [ [43](https://github.com/scikit-hep/pyhf/runs/5679132850?check_suite_focus=true#step:7:43)%] _generated/pyhf.modifiers.shapesys
reading sources... [ 43%] _generated/pyhf.modifiers.shapesys.required_parset
reading sources... [ [44](https://github.com/scikit-hep/pyhf/runs/5679132850?check_suite_focus=true#step:7:44)%] _generated/pyhf.modifiers.shapesys.shapesys_builder
reading sources... [ [45](https://github.com/scikit-hep/pyhf/runs/5679132850?check_suite_focus=true#step:7:45)%] _generated/pyhf.modifiers.shapesys.shapesys_combined
reading sources... [ 45%] _generated/pyhf.modifiers.staterror
reading sources... [ [46](https://github.com/scikit-hep/pyhf/runs/5679132850?check_suite_focus=true#step:7:46)%] _generated/pyhf.modifiers.staterror.required_parset
reading sources... [ [47](https://github.com/scikit-hep/pyhf/runs/5679132850?check_suite_focus=true#step:7:47)%] _generated/pyhf.modifiers.staterror.staterror_builder
reading sources... [ 47%] _generated/pyhf.modifiers.staterror.staterror_combined
reading sources... [ [48](https://github.com/scikit-hep/pyhf/runs/5679132850?check_suite_focus=true#step:7:48)%] _generated/pyhf.optimize.mixins.OptimizerMixin
reading sources... [ [49](https://github.com/scikit-hep/pyhf/runs/5679132850?check_suite_focus=true#step:7:49)%] _generated/pyhf.optimize.opt_minuit.minuit_optimizer
reading sources... [ 49%] _generated/pyhf.optimize.opt_scipy.scipy_optimizer
reading sources... [ [50](https://github.com/scikit-hep/pyhf/runs/5679132850?check_suite_focus=true#step:7:50)%] _generated/pyhf.optimizer
reading sources... [ 50%] _generated/pyhf.patchset.Patch
reading sources... [ [51](https://github.com/scikit-hep/pyhf/runs/5679132850?check_suite_focus=true#step:7:51)%] _generated/pyhf.patchset.PatchSet
reading sources... [ [52](https://github.com/scikit-hep/pyhf/runs/5679132850?check_suite_focus=true#step:7:52)%] _generated/pyhf.pdf.Model
reading sources... [ 52%] _generated/pyhf.pdf._ModelConfig
reading sources... [ [53](https://github.com/scikit-hep/pyhf/runs/5679132850?check_suite_focus=true#step:7:53)%] _generated/pyhf.probability.Independent
reading sources... [ [54](https://github.com/scikit-hep/pyhf/runs/5679132850?check_suite_focus=true#step:7:54)%] _generated/pyhf.probability.Normal
reading sources... [ 54%] _generated/pyhf.probability.Poisson
reading sources... [ [55](https://github.com/scikit-hep/pyhf/runs/5679132850?check_suite_focus=true#step:7:55)%] _generated/pyhf.probability.Simultaneous
reading sources... [ [56](https://github.com/scikit-hep/pyhf/runs/5679132850?check_suite_focus=true#step:7:56)%] _generated/pyhf.readxml
reading sources... [ 56%] _generated/pyhf.readxml.clear_filecache
reading sources... [ [57](https://github.com/scikit-hep/pyhf/runs/5679132850?check_suite_focus=true#step:7:57)%] _generated/pyhf.readxml.dedupe_parameters
reading sources... [ [58](https://github.com/scikit-hep/pyhf/runs/5679132850?check_suite_focus=true#step:7:58)%] _generated/pyhf.readxml.extract_error
reading sources... [ 58%] _generated/pyhf.readxml.import_root_histogram
reading sources... [ [59](https://github.com/scikit-hep/pyhf/runs/5679132850?check_suite_focus=true#step:7:59)%] _generated/pyhf.readxml.parse
reading sources... [ [60](https://github.com/scikit-hep/pyhf/runs/5679132850?check_suite_focus=true#step:7:60)%] _generated/pyhf.readxml.process_channel
reading sources... [ 60%] _generated/pyhf.readxml.process_data
reading sources... [ [61](https://github.com/scikit-hep/pyhf/runs/5679132850?check_suite_focus=true#step:7:61)%] _generated/pyhf.readxml.process_measurements
reading sources... [ [62](https://github.com/scikit-hep/pyhf/runs/5679132850?check_suite_focus=true#step:7:62)%] _generated/pyhf.readxml.process_sample
reading sources... [ 62%] _generated/pyhf.schema
reading sources... [ [63](https://github.com/scikit-hep/pyhf/runs/5679132850?check_suite_focus=true#step:7:63)%] _generated/pyhf.schema.Schema
reading sources... [ [64](https://github.com/scikit-hep/pyhf/runs/5679132850?check_suite_focus=true#step:7:64)%] _generated/pyhf.schema.load_schema
reading sources... [ 64%] _generated/pyhf.schema.validate
reading sources... [ [65](https://github.com/scikit-hep/pyhf/runs/5679132850?check_suite_focus=true#step:7:65)%] _generated/pyhf.set_backend
reading sources... [ [66](https://github.com/scikit-hep/pyhf/runs/5679132850?check_suite_focus=true#step:7:66)%] _generated/pyhf.simplemodels.correlated_background
reading sources... [ 66%] _generated/pyhf.simplemodels.uncorrelated_background
reading sources... [ [67](https://github.com/scikit-hep/pyhf/runs/5679132850?check_suite_focus=true#step:7:67)%] _generated/pyhf.tensor.jax_backend.jax_backend
reading sources... [ 67%] _generated/pyhf.tensor.numpy_backend.numpy_backend
reading sources... [ [68](https://github.com/scikit-hep/pyhf/runs/5679132850?check_suite_focus=true#step:7:68)%] _generated/pyhf.tensor.pytorch_backend.pytorch_backend
reading sources... [ [69](https://github.com/scikit-hep/pyhf/runs/5679132850?check_suite_focus=true#step:7:69)%] _generated/pyhf.tensor.tensorflow_backend.tensorflow_backend
reading sources... [ 69%] _generated/pyhf.tensorlib
reading sources... [ [70](https://github.com/scikit-hep/pyhf/runs/5679132850?check_suite_focus=true#step:7:70)%] _generated/pyhf.utils.citation
reading sources... [ [71](https://github.com/scikit-hep/pyhf/runs/5679132850?check_suite_focus=true#step:7:71)%] _generated/pyhf.utils.digest
reading sources... [ 71%] _generated/pyhf.utils.options_from_eqdelimstring
reading sources... [ [72](https://github.com/scikit-hep/pyhf/runs/5679132850?check_suite_focus=true#step:7:72)%] _generated/pyhf.workspace.Workspace
reading sources... [ [73](https://github.com/scikit-hep/pyhf/runs/5679132850?check_suite_focus=true#step:7:73)%] _generated/pyhf.writexml
reading sources... [ 73%] _generated/pyhf.writexml.build_channel
reading sources... [ [74](https://github.com/scikit-hep/pyhf/runs/5679132850?check_suite_focus=true#step:7:74)%] _generated/pyhf.writexml.build_data
reading sources... [ [75](https://github.com/scikit-hep/pyhf/runs/5679132850?check_suite_focus=true#step:7:75)%] _generated/pyhf.writexml.build_measurement
reading sources... [ 75%] _generated/pyhf.writexml.build_modifier
reading sources... [ [76](https://github.com/scikit-hep/pyhf/runs/5679132850?check_suite_focus=true#step:7:76)%] _generated/pyhf.writexml.build_sample
reading sources... [ [77](https://github.com/scikit-hep/pyhf/runs/5679132850?check_suite_focus=true#step:7:77)%] _generated/pyhf.writexml.indent
reading sources... [ 77%] api
reading sources... [ [78](https://github.com/scikit-hep/pyhf/runs/5679132850?check_suite_focus=true#step:7:78)%] babel
reading sources... [ [79](https://github.com/scikit-hep/pyhf/runs/5679132850?check_suite_focus=true#step:7:79)%] citations
reading sources... [ 79%] cli
reading sources... [ [80](https://github.com/scikit-hep/pyhf/runs/5679132850?check_suite_focus=true#step:7:80)%] contributors
reading sources... [ [81](https://github.com/scikit-hep/pyhf/runs/5679132850?check_suite_focus=true#step:7:81)%] development
reading sources... [ 81%] examples
reading sources... [ [82](https://github.com/scikit-hep/pyhf/runs/5679132850?check_suite_focus=true#step:7:82)%] examples/notebooks/ShapeFactor
Notebook error:
AttributeError in examples/notebooks/ShapeFactor.ipynb:
module 'jinja2.utils' has no attribute 'escape'
make: *** [Makefile:57: html] Error 2
```
### pyhf Version
`master` on commit 419bc766791aa18226777910002bd91c695d0465
### Code of Conduct
- [X] I agree to follow the Code of Conduct
| 2022-04-03T22:20:32 |
||
scikit-hep/pyhf | 1,841 | scikit-hep__pyhf-1841 | [
"1725"
] | e8cb58136769d8a103a4ebc0cbfe0756d8ece3fb | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -38,8 +38,8 @@
'pytest-console-scripts',
'pytest-mpl',
'pydocstyle',
- 'papermill~=2.0',
- 'nteract-scrapbook~=0.2',
+ 'papermill~=2.3.4',
+ 'scrapbook~=0.5.0',
'jupyter',
'graphviz',
]
| diff --git a/tests/test_notebooks.py b/tests/test_notebooks.py
--- a/tests/test_notebooks.py
+++ b/tests/test_notebooks.py
@@ -1,8 +1,9 @@
import sys
-import os
+from pathlib import Path
+
import papermill as pm
-import scrapbook as sb
import pytest
+import scrapbook as sb
@pytest.fixture()
@@ -11,6 +12,7 @@ def common_kwargs(tmpdir):
return {
'output_path': str(outputnb),
'kernel_name': f'python{sys.version_info.major}',
+ 'progress_bar': False,
}
@@ -19,17 +21,19 @@ def test_hello_world(common_kwargs):
def test_xml_importexport(common_kwargs):
+ # Change directories to make users not have to worry about paths to follow example
+ execution_dir = Path.cwd() / "docs" / "examples" / "notebooks"
pm.execute_notebook(
- 'docs/examples/notebooks/XML_ImportExport.ipynb', **common_kwargs
+ execution_dir / "XML_ImportExport.ipynb", cwd=execution_dir, **common_kwargs
)
def test_statisticalanalysis(common_kwargs):
# The Binder example uses specific relative paths
- cwd = os.getcwd()
- os.chdir(os.path.join(cwd, 'docs/examples/notebooks/binderexample'))
- pm.execute_notebook('StatisticalAnalysis.ipynb', **common_kwargs)
- os.chdir(cwd)
+ execution_dir = Path.cwd() / "docs" / "examples" / "notebooks" / "binderexample"
+ pm.execute_notebook(
+ execution_dir / "StatisticalAnalysis.ipynb", cwd=execution_dir, **common_kwargs
+ )
def test_shapefactor(common_kwargs):
@@ -39,7 +43,7 @@ def test_shapefactor(common_kwargs):
def test_multichannel_coupled_histos(common_kwargs):
pm.execute_notebook(
'docs/examples/notebooks/multichannel-coupled-histo.ipynb',
- parameters={'validation_datadir': 'validation/data'},
+ parameters={"validation_datadir": str(Path.cwd() / "validation" / "data")},
**common_kwargs,
)
@@ -47,7 +51,7 @@ def test_multichannel_coupled_histos(common_kwargs):
def test_multibinpois(common_kwargs):
pm.execute_notebook(
'docs/examples/notebooks/multiBinPois.ipynb',
- parameters={'validation_datadir': 'validation/data'},
+ parameters={"validation_datadir": str(Path.cwd() / "validation" / "data")},
**common_kwargs,
)
nb = sb.read_notebook(common_kwargs['output_path'])
@@ -55,19 +59,17 @@ def test_multibinpois(common_kwargs):
def test_pullplot(common_kwargs):
- # Change directories to make users not have to worry about paths to follow example
- cwd = os.getcwd()
- os.chdir(os.path.join(cwd, "docs/examples/notebooks"))
- pm.execute_notebook("pullplot.ipynb", **common_kwargs)
- os.chdir(cwd)
+ execution_dir = Path.cwd() / "docs" / "examples" / "notebooks"
+ pm.execute_notebook(
+ execution_dir / "pullplot.ipynb", cwd=execution_dir, **common_kwargs
+ )
def test_impactplot(common_kwargs):
- # Change directories to make users not have to worry about paths to follow example
- cwd = os.getcwd()
- os.chdir(os.path.join(cwd, "docs/examples/notebooks"))
- pm.execute_notebook("ImpactPlot.ipynb", **common_kwargs)
- os.chdir(cwd)
+ execution_dir = Path.cwd() / "docs" / "examples" / "notebooks"
+ pm.execute_notebook(
+ execution_dir / "ImpactPlot.ipynb", cwd=execution_dir, **common_kwargs
+ )
def test_toys(common_kwargs):
| Update `test` dependency from `nteract-scrapbook` to `scrapbook`
### Summary
Running the notebook tests generates the warning
```pytb
warnings.warn("'nteract-scrapbook' package has been renamed to `scrapbook`. No new releases are going out for this old package name.", FutureWarning)
```
as [`nteract-scrapbook`](https://pypi.org/project/nteract-scrapbook/) is now [`scrapbook`](https://pypi.org/project/scrapbook/). All that needs to be done is to change the name used in `steup.py` for the `test` extra:
https://github.com/scikit-hep/pyhf/blob/29bc6daed55b40711fabd9b22d3e76f9ee15657d/setup.py#L42
### Additional Information
_No response_
### Code of Conduct
- [X] I agree to follow the Code of Conduct
| 2022-04-06T08:02:12 |
|
scikit-hep/pyhf | 1,855 | scikit-hep__pyhf-1855 | [
"1853"
] | ae0823875988b3fac77758d8d2ec4d8616ebd8e4 | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -462,3 +462,7 @@ def setup(app):
}
},
}
+
+# c.f. https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-the-linkcheck-builder
+linkcheck_ignore = ['cli.html#pyhf-xml2json']
+linkcheck_retries = 50
| Add linkcheck to docs workflows
With the addition of [user-defined build jobs in ReadTheDocs](https://twitter.com/readthedocs/status/1519363742869295105?s=11&t=5-u_2BFwXLAj9IyXQLhIVA) I noticed that one of their examples was to [perform a check for broken links]( https://docs.readthedocs.io/en/latest/build-customization.html#perform-a-check-for-broken-links) with `sphinx`'s `linkcheck`.
I'm working on adding this both to the ReadTheDocs config and to the docs GHA workflow, but at the moment
```console
$ cd docs
$ make linkcheck
```
is giving a failure
```
( babel: line 3) broken cli.html#pyhf-xml2json -
```
on
https://github.com/scikit-hep/pyhf/blob/e7996e5ba350a48825d9736ccc81ca8e3009dd3c/docs/babel.rst?plain=1#L5
I'm not quite sure why, as this is a valid link once the source is built, but I think it might be a form of https://github.com/sphinx-doc/sphinx/issues/9383.
I have this and other fixes on a branch named `docs/use-read-the-docs-pre-build-job`.
| 2022-04-28T04:51:05 |
||
scikit-hep/pyhf | 1,865 | scikit-hep__pyhf-1865 | [
"1864"
] | de4ce07ea9ca703c23411624a1e9272751fe269d | diff --git a/src/pyhf/cli/rootio.py b/src/pyhf/cli/rootio.py
--- a/src/pyhf/cli/rootio.py
+++ b/src/pyhf/cli/rootio.py
@@ -29,7 +29,8 @@ def cli():
default=None,
)
@click.option('--track-progress/--hide-progress', default=True)
-def xml2json(entrypoint_xml, basedir, output_file, track_progress):
[email protected]('--validation-as-error/--validation-as-warning', default=True)
+def xml2json(entrypoint_xml, basedir, output_file, track_progress, validation_as_error):
"""Entrypoint XML: The top-level XML file for the PDF definition."""
try:
import uproot
@@ -43,7 +44,12 @@ def xml2json(entrypoint_xml, basedir, output_file, track_progress):
)
from pyhf import readxml
- spec = readxml.parse(entrypoint_xml, basedir, track_progress=track_progress)
+ spec = readxml.parse(
+ entrypoint_xml,
+ basedir,
+ track_progress=track_progress,
+ validation_as_error=validation_as_error,
+ )
if output_file is None:
click.echo(json.dumps(spec, indent=4, sort_keys=True))
else:
diff --git a/src/pyhf/readxml.py b/src/pyhf/readxml.py
--- a/src/pyhf/readxml.py
+++ b/src/pyhf/readxml.py
@@ -1,5 +1,6 @@
from pyhf import schema
from pyhf import compat
+from pyhf import exceptions
import logging
@@ -333,7 +334,7 @@ def dedupe_parameters(parameters):
return list({v['name']: v for v in parameters}.values())
-def parse(configfile, rootdir, track_progress=False):
+def parse(configfile, rootdir, track_progress=False, validation_as_error=True):
toplvl = ET.parse(configfile)
inputs = tqdm.tqdm(
[x.text for x in toplvl.findall('Input')],
@@ -366,8 +367,13 @@ def parse(configfile, rootdir, track_progress=False):
],
'version': schema.version,
}
- schema.validate(result, 'workspace.json')
-
+ try:
+ schema.validate(result, 'workspace.json')
+ except exceptions.InvalidSpecification as exc:
+ if validation_as_error:
+ raise exc
+ else:
+ log.warning(exc)
return result
| diff --git a/tests/test_import.py b/tests/test_import.py
--- a/tests/test_import.py
+++ b/tests/test_import.py
@@ -6,6 +6,7 @@
import pytest
import xml.etree.ElementTree as ET
import logging
+from jsonschema import ValidationError
def assert_equal_dictionary(d1, d2):
@@ -448,3 +449,28 @@ def test_process_modifiers(mocker, caplog):
assert {'name': 'staterror_myChannel', 'type': 'staterror', 'data': _err} in result[
'modifiers'
]
+
+
+def test_import_validation_exception(mocker, caplog):
+
+ mocker.patch(
+ 'pyhf.schema.validate',
+ side_effect=pyhf.exceptions.InvalidSpecification(
+ ValidationError('this is an invalid specification')
+ ),
+ )
+
+ with caplog.at_level(logging.WARNING, "pyhf.readxml"):
+ pyhf.readxml.parse(
+ 'validation/xmlimport_input2/config/example.xml',
+ 'validation/xmlimport_input2',
+ validation_as_error=False,
+ )
+ assert "this is an invalid specification" in caplog.text
+
+ with pytest.raises(pyhf.exceptions.InvalidSpecification):
+ pyhf.readxml.parse(
+ 'validation/xmlimport_input2/config/example.xml',
+ 'validation/xmlimport_input2',
+ validation_as_error=True,
+ )
| Skip schema validation for xml2json
### Summary
Sometimes upstream tools like HistFitter can produce non-valid XML workspaces, but we need to be able to convert them to JSON. Schema validation could be configured off in some of these cases (force the error to a warning instead).
### Additional Information
_No response_
### Code of Conduct
- [X] I agree to follow the Code of Conduct
| 2022-05-23T17:53:16 |
|
scikit-hep/pyhf | 1,917 | scikit-hep__pyhf-1917 | [
"1916"
] | e5bfdd089a57bb5883f2896b247b315bbc3c8a9c | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -40,6 +40,7 @@
'scrapbook~=0.5.0',
'jupyter',
'graphviz',
+ 'pytest-socket>=0.2.0', # c.f. PR #1917
]
)
)
diff --git a/src/pyhf/schema/__init__.py b/src/pyhf/schema/__init__.py
--- a/src/pyhf/schema/__init__.py
+++ b/src/pyhf/schema/__init__.py
@@ -72,6 +72,8 @@ def __call__(self, new_path: pathlib.Path):
self (pyhf.schema.Schema): Returns itself (for contextlib management)
"""
self.orig_path, variables.schemas = variables.schemas, new_path
+ self.orig_cache = dict(variables.SCHEMA_CACHE)
+ variables.SCHEMA_CACHE.clear()
return self
def __enter__(self):
@@ -85,6 +87,7 @@ def __exit__(self, *args, **kwargs):
None
"""
variables.schemas = self.orig_path
+ variables.SCHEMA_CACHE = self.orig_cache
@property
def path(self):
diff --git a/src/pyhf/schema/loader.py b/src/pyhf/schema/loader.py
--- a/src/pyhf/schema/loader.py
+++ b/src/pyhf/schema/loader.py
@@ -39,3 +39,9 @@ def load_schema(schema_id: str):
schema = json.load(json_schema)
variables.SCHEMA_CACHE[schema['$id']] = schema
return variables.SCHEMA_CACHE[schema['$id']]
+
+
+# pre-populate the cache to avoid network access
+# on first validation in standard usage
+# (not in pyhf.schema.variables to avoid circular imports)
+load_schema(f'{variables.SCHEMA_VERSION}/defs.json')
| diff --git a/tests/test_schema.py b/tests/test_schema.py
--- a/tests/test_schema.py
+++ b/tests/test_schema.py
@@ -1,6 +1,11 @@
-import pyhf
-import pytest
+import importlib
import json
+import sys
+
+import pytest
+from pytest_socket import socket_disabled # noqa: F401
+
+import pyhf
@pytest.mark.parametrize('version', ['1.0.0'])
@@ -27,11 +32,20 @@ def test_schema_callable():
assert callable(pyhf.schema)
-def test_schema_changeable(datadir, monkeypatch):
[email protected]
+def self_restoring_schema_globals():
+ old_path = pyhf.schema.path
+ old_cache = dict(pyhf.schema.variables.SCHEMA_CACHE)
+ yield old_path, old_cache
+ pyhf.schema(old_path)
+ pyhf.schema.variables.SCHEMA_CACHE = old_cache
+
+
+def test_schema_changeable(datadir, monkeypatch, self_restoring_schema_globals):
monkeypatch.setattr(
pyhf.schema.variables, 'schemas', pyhf.schema.variables.schemas, raising=True
)
- old_path = pyhf.schema.path
+ old_path, old_cache = self_restoring_schema_globals
new_path = datadir / 'customschema'
with pytest.raises(pyhf.exceptions.SchemaNotFound):
@@ -40,36 +54,47 @@ def test_schema_changeable(datadir, monkeypatch):
pyhf.schema(new_path)
assert old_path != pyhf.schema.path
assert new_path == pyhf.schema.path
+ assert pyhf.schema.variables.SCHEMA_CACHE is not old_cache
+ assert len(pyhf.schema.variables.SCHEMA_CACHE) == 0
assert pyhf.Workspace(json.load(open(new_path / 'custom.json')))
- pyhf.schema(old_path)
+ assert len(pyhf.schema.variables.SCHEMA_CACHE) == 1
-def test_schema_changeable_context(datadir, monkeypatch):
+def test_schema_changeable_context(datadir, monkeypatch, self_restoring_schema_globals):
monkeypatch.setattr(
pyhf.schema.variables, 'schemas', pyhf.schema.variables.schemas, raising=True
)
- old_path = pyhf.schema.path
+ old_path, old_cache = self_restoring_schema_globals
new_path = datadir / 'customschema'
assert old_path == pyhf.schema.path
with pyhf.schema(new_path):
assert old_path != pyhf.schema.path
assert new_path == pyhf.schema.path
+ assert pyhf.schema.variables.SCHEMA_CACHE is not old_cache
+ assert len(pyhf.schema.variables.SCHEMA_CACHE) == 0
assert pyhf.Workspace(json.load(open(new_path / 'custom.json')))
+ assert len(pyhf.schema.variables.SCHEMA_CACHE) == 1
assert old_path == pyhf.schema.path
+ assert old_cache == pyhf.schema.variables.SCHEMA_CACHE
-def test_schema_changeable_context_error(datadir, monkeypatch):
+def test_schema_changeable_context_error(
+ datadir, monkeypatch, self_restoring_schema_globals
+):
monkeypatch.setattr(
pyhf.schema.variables, 'schemas', pyhf.schema.variables.schemas, raising=True
)
- old_path = pyhf.schema.path
+ old_path, old_cache = self_restoring_schema_globals
new_path = datadir / 'customschema'
with pytest.raises(ZeroDivisionError):
with pyhf.schema(new_path):
+ # this populates the current cache
+ pyhf.Workspace(json.load(open(new_path / 'custom.json')))
raise ZeroDivisionError()
assert old_path == pyhf.schema.path
+ assert old_cache == pyhf.schema.variables.SCHEMA_CACHE
def test_no_channels():
@@ -567,3 +592,48 @@ def test_patchset_fail(datadir, patchset_file):
patchset = json.load(open(datadir.joinpath(patchset_file)))
with pytest.raises(pyhf.exceptions.InvalidSpecification):
pyhf.schema.validate(patchset, 'patchset.json')
+
+
+def test_defs_always_cached(
+ socket_disabled, # noqa: F811
+ isolate_modules,
+):
+ """
+ Schema definitions should always be loaded from the local files and cached at first import.
+
+ Otherwise pyhf will crash in contexts where the jsonschema.RefResolver cannot lookup the definition by the schema-id
+ (e.g. a cluster node without network access).
+ """
+ modules_to_clear = [name for name in sys.modules if name.split('.')[0] == 'pyhf']
+ for module_name in modules_to_clear:
+ del sys.modules[module_name]
+ pyhf = importlib.import_module('pyhf')
+
+ spec = {
+ 'channels': [
+ {
+ 'name': 'singlechannel',
+ 'samples': [
+ {
+ 'name': 'signal',
+ 'data': [10],
+ 'modifiers': [
+ {'name': 'mu', 'type': 'normfactor', 'data': None}
+ ],
+ },
+ {
+ 'name': 'background',
+ 'data': [20],
+ 'modifiers': [
+ {
+ 'name': 'uncorr_bkguncrt',
+ 'type': 'shapesys',
+ 'data': [30],
+ }
+ ],
+ },
+ ],
+ }
+ ]
+ }
+ pyhf.schema.validate(spec, 'model.json') # may try to access network and fail
| Schema validation crashes when running in an environment without internet access
### Summary
In master and the 0.7.0 release candidate, pyhf operations involving model validation will crash in offline environments with a RefResolutionError. This is a common situation e.g. with worker nodes on HTC clusters.
The bug was introduced after 0.6.3, I think in #1753 where the [pre-loading was dropped](https://github.com/scikit-hep/pyhf/pull/1753/files#diff-01a944844c3739d996c27da33c727473ec48ebcac65f16b4001384bc3ae4e725L48).
### OS / Environment
```console
NAME="CentOS Linux"
VERSION="7 (Core)"
ID="centos"
ID_LIKE="rhel fedora"
VERSION_ID="7"
PRETTY_NAME="CentOS Linux 7 (Core)"
ANSI_COLOR="0;31"
CPE_NAME="cpe:/o:centos:centos:7"
HOME_URL="http://cern.ch/linux/"
BUG_REPORT_URL="http://cern.ch/linux/"
CENTOS_MANTISBT_PROJECT="CentOS-7"
CENTOS_MANTISBT_PROJECT_VERSION="7"
REDHAT_SUPPORT_PRODUCT="centos"
REDHAT_SUPPORT_PRODUCT_VERSION="7"
```
### Steps to Reproduce
I don't know a good way to prepare the environment to demonstrate this.
But the below test exposes the attempt by the RefResolver to resolve the schema id through the https URL, and fails against the release candidate/master, but passes in 0.6.3
<!--- Paste your minimal failing Python example code between the quotes below -->
```python (paste below)
from functools import partial
import pytest
import jsonschema
import pyhf
def make_asserting_handler(origin):
def asserting_handler(*args, **kwargs):
raise AssertionError(
f'called URL request handler from {origin} with args={args!r}, kwargs={kwargs!r} '
'when no call should have been needed'
)
return asserting_handler
@pytest.fixture
def no_http_jsonschema_ref_resolving(monkeypatch):
asserting_handler = make_asserting_handler('handlers')
handlers = {
'https': asserting_handler,
'http': asserting_handler,
}
WrappedResolver = partial(jsonschema.RefResolver, handlers=handlers)
monkeypatch.setattr('jsonschema.RefResolver', WrappedResolver, raising=True)
def test_preloaded_cache(
no_http_jsonschema_ref_resolving,
):
spec = {
'channels': [
{
'name': 'singlechannel',
'samples': [
{
'name': 'signal',
'data': [10],
'modifiers': [
{'name': 'mu', 'type': 'normfactor', 'data': None}
],
},
{
'name': 'background',
'data': [20],
'modifiers': [
{
'name': 'uncorr_bkguncrt',
'type': 'shapesys',
'data': [30],
}
],
},
],
}
]
}
try:
pyhf.schema.validate(spec, 'model.json')
except AttributeError:
pyhf.utils.validate(spec, 'model.json')
```
### File Upload (optional)
_No response_
### Expected Results
I expect schema validation to succeed without crashing even when there is no network access that allows resolving the https schema-ids.
### Actual Results
```console
jsonschema.exceptions.RefResolutionError: HTTPSConnectionPool(host='scikit-hep.org', port=443): Max retries exceeded with url: /pyhf/schemas/1.0.0/defs.json (Caused by NewConnectionError('<urllib3.connection.HTTPSConnection object at 0x2b2bb8457c40>: Failed to establish a new connection: [Errno 101] Network is unreachable'))
```
### pyhf Version
```console
pyhf, version 0.7.0rc2
```
### Code of Conduct
- [X] I agree to follow the Code of Conduct
| 2022-07-06T12:38:55 |
|
scikit-hep/pyhf | 1,919 | scikit-hep__pyhf-1919 | [
"1918"
] | a4d515464cbc06e0ead8463fac5c96cb7b39022c | diff --git a/src/pyhf/optimize/mixins.py b/src/pyhf/optimize/mixins.py
--- a/src/pyhf/optimize/mixins.py
+++ b/src/pyhf/optimize/mixins.py
@@ -1,9 +1,11 @@
"""Helper Classes for use of automatic differentiation."""
-from pyhf.tensor.manager import get_backend
+import logging
+
+import numpy as np
+
from pyhf import exceptions
from pyhf.optimize.common import shim
-
-import logging
+from pyhf.tensor.manager import get_backend
log = logging.getLogger(__name__)
@@ -82,6 +84,13 @@ def _internal_postprocess(self, fitresult, stitch_pars, return_uncertainties=Fal
if uncertainties is not None:
# extract number of fixed parameters
num_fixed_pars = len(fitted_pars) - len(fitresult.x)
+
+ # FIXME: Set uncertainties for fixed parameters to 0 manually
+ # https://github.com/scikit-hep/iminuit/issues/762
+ # https://github.com/scikit-hep/pyhf/issues/1918
+ # https://github.com/scikit-hep/cabinetry/pull/346
+ uncertainties = np.where(fitresult.minuit.fixed, 0.0, uncertainties)
+
# stitch in zero-uncertainty for fixed values
uncertainties = stitch_pars(
tensorlib.astensor(uncertainties),
diff --git a/src/pyhf/optimize/opt_minuit.py b/src/pyhf/optimize/opt_minuit.py
--- a/src/pyhf/optimize/opt_minuit.py
+++ b/src/pyhf/optimize/opt_minuit.py
@@ -50,14 +50,12 @@ def _get_minimizer(
par_names=None,
):
- step_sizes = [(b[1] - b[0]) / float(self.steps) for b in init_bounds]
fixed_vals = fixed_vals or []
# Minuit wants True/False for each parameter
fixed_bools = [False] * len(init_pars)
for index, val in fixed_vals:
fixed_bools[index] = True
init_pars[index] = val
- step_sizes[index] = 0.0
# Minuit requires jac=callable
if do_grad:
@@ -68,7 +66,6 @@ def _get_minimizer(
jac = None
minuit = iminuit.Minuit(wrapped_objective, init_pars, grad=jac, name=par_names)
- minuit.errors = step_sizes
minuit.limits = init_bounds
minuit.fixed = fixed_bools
minuit.print_level = self.verbose
| diff --git a/tests/test_optim.py b/tests/test_optim.py
--- a/tests/test_optim.py
+++ b/tests/test_optim.py
@@ -520,20 +520,6 @@ def test_init_pars_sync_fixed_values_minuit(mocker):
assert minimizer.fixed == [True, False, False]
-def test_step_sizes_fixed_parameters_minuit(mocker):
- opt = pyhf.optimize.minuit_optimizer()
-
- # patch all we need
- from pyhf.optimize import opt_minuit
-
- minuit = mocker.patch.object(getattr(opt_minuit, 'iminuit'), 'Minuit')
- minimizer = opt._get_minimizer(None, [9, 9, 9], [(0, 10)] * 3, fixed_vals=[(0, 1)])
-
- assert minuit.called
- assert minimizer.fixed == [True, False, False]
- assert minimizer.errors == [0.0, 0.01, 0.01]
-
-
def test_solver_options_behavior_scipy(mocker):
opt = pyhf.optimize.scipy_optimizer(solver_options={'arbitrary_option': 'foobar'})
| CI failing for `iminuit` `v2.12.2`
### Summary
On 2022-07-15 [`iminuit` `v2.12.2`](https://github.com/scikit-hep/iminuit/releases/tag/v2.12.2) was released to "fix bug in error heuristic and prevent assigning non-positive values". With this release the CI is failing for `tests/test_infer.py`, `tests/test_otim.py`, `tests/test_jit.py`, `tests/test_public_api.py`, `tests/test_scripts.py`, and `tests/test_validation.py`
### OS / Environment
```console
All
```
### Steps to Reproduce
Run the test suite in CI
### File Upload (optional)
_No response_
### Expected Results
CI to pass
### Actual Results
```console
All the above tests fail.
```
### pyhf Version
```console
`HEAD` which is currently 8e242c9c829963c714f9b10acd28494f004a2452
```
### Code of Conduct
- [X] I agree to follow the Code of Conduct
| I saw the same for `cabinetry`, presumably due to https://github.com/scikit-hep/iminuit/issues/762. | 2022-07-17T15:55:24 |
scikit-hep/pyhf | 1,934 | scikit-hep__pyhf-1934 | [
"1285"
] | 3295d86720a86f6280aba919b38dd252be6bc40d | diff --git a/src/pyhf/readxml.py b/src/pyhf/readxml.py
--- a/src/pyhf/readxml.py
+++ b/src/pyhf/readxml.py
@@ -1,8 +1,20 @@
from __future__ import annotations
import logging
-import os
-from typing import TYPE_CHECKING, Callable, Iterable, Tuple, Union, IO
+from typing import (
+ IO,
+ Callable,
+ Iterable,
+ List,
+ MutableMapping,
+ MutableSequence,
+ Sequence,
+ Set,
+ Tuple,
+ Union,
+ cast,
+)
+
import xml.etree.ElementTree as ET
from pathlib import Path
@@ -13,16 +25,32 @@
from pyhf import compat
from pyhf import exceptions
from pyhf import schema
+from pyhf.typing import (
+ Channel,
+ HistoSys,
+ LumiSys,
+ Measurement,
+ Modifier,
+ NormFactor,
+ NormSys,
+ Observation,
+ Parameter,
+ ParameterBase,
+ PathOrStr,
+ Sample,
+ ShapeFactor,
+ ShapeSys,
+ StatError,
+ Workspace,
+)
log = logging.getLogger(__name__)
-if TYPE_CHECKING:
- PathOrStr = Union[str, os.PathLike[str]]
-else:
- PathOrStr = Union[str, "os.PathLike[str]"]
-
-__FILECACHE__ = {}
+FileCacheType = MutableMapping[str, Tuple[Union[IO[str], IO[bytes]], Set[str]]]
MountPathType = Iterable[Tuple[Path, Path]]
+ResolverType = Callable[[str], Path]
+
+__FILECACHE__: FileCacheType = {}
__all__ = [
"clear_filecache",
@@ -37,11 +65,11 @@
]
-def __dir__():
+def __dir__() -> list[str]:
return __all__
-def resolver_factory(rootdir: Path, mounts: MountPathType) -> Callable[[str], Path]:
+def resolver_factory(rootdir: Path, mounts: MountPathType) -> ResolverType:
def resolver(filename: str) -> Path:
path = Path(filename)
for host_path, mount_path in mounts:
@@ -55,7 +83,7 @@ def resolver(filename: str) -> Path:
return resolver
-def extract_error(hist):
+def extract_error(hist: uproot.behaviors.TH1.TH1) -> list[float]:
"""
Determine the bin uncertainties for a histogram.
@@ -71,10 +99,16 @@ def extract_error(hist):
"""
variance = hist.variances() if hist.weighted else hist.to_numpy()[0]
- return np.sqrt(variance).tolist()
+ return cast(List[float], np.sqrt(variance).tolist())
-def import_root_histogram(resolver, filename, path, name, filecache=None):
+def import_root_histogram(
+ resolver: ResolverType,
+ filename: str,
+ path: str,
+ name: str,
+ filecache: FileCacheType | None = None,
+) -> tuple[list[float], list[float]]:
global __FILECACHE__
filecache = filecache or __FILECACHE__
@@ -103,21 +137,25 @@ def import_root_histogram(resolver, filename, path, name, filecache=None):
def process_sample(
- sample, resolver, inputfile, histopath, channel_name, track_progress=False
-):
- if 'InputFile' in sample.attrib:
- inputfile = sample.attrib.get('InputFile')
- if 'HistoPath' in sample.attrib:
- histopath = sample.attrib.get('HistoPath')
+ sample: ET.Element,
+ resolver: ResolverType,
+ inputfile: str,
+ histopath: str,
+ channel_name: str,
+ track_progress: bool = False,
+) -> Sample:
+ inputfile = sample.attrib.get('InputFile', inputfile)
+ histopath = sample.attrib.get('HistoPath', histopath)
histoname = sample.attrib['HistoName']
data, err = import_root_histogram(resolver, inputfile, histopath, histoname)
- parameter_configs = []
- modifiers = []
+ parameter_configs: MutableSequence[Parameter] = []
+ modifiers: MutableSequence[Modifier] = []
# first check if we need to add lumi modifier for this sample
if sample.attrib.get("NormalizeByTheory", "False") == 'True':
- modifiers.append({'name': 'lumi', 'type': 'lumi', 'data': None})
+ modifier_lumi: LumiSys = {'name': 'lumi', 'type': 'lumi', 'data': None}
+ modifiers.append(modifier_lumi)
modtags = tqdm.tqdm(
sample.iter(), unit='modifier', disable=not (track_progress), total=len(sample)
@@ -130,21 +168,23 @@ def process_sample(
if modtag == sample:
continue
if modtag.tag == 'OverallSys':
- modifiers.append(
- {
- 'name': modtag.attrib['Name'],
- 'type': 'normsys',
- 'data': {
- 'lo': float(modtag.attrib['Low']),
- 'hi': float(modtag.attrib['High']),
- },
- }
- )
+ modifier_normsys: NormSys = {
+ 'name': modtag.attrib['Name'],
+ 'type': 'normsys',
+ 'data': {
+ 'lo': float(modtag.attrib['Low']),
+ 'hi': float(modtag.attrib['High']),
+ },
+ }
+ modifiers.append(modifier_normsys)
elif modtag.tag == 'NormFactor':
- modifiers.append(
- {'name': modtag.attrib['Name'], 'type': 'normfactor', 'data': None}
- )
- parameter_config = {
+ modifier_normfactor: NormFactor = {
+ 'name': modtag.attrib['Name'],
+ 'type': 'normfactor',
+ 'data': None,
+ }
+ modifiers.append(modifier_normfactor)
+ parameter_config: Parameter = {
'name': modtag.attrib['Name'],
'bounds': [[float(modtag.attrib['Low']), float(modtag.attrib['High'])]],
'inits': [float(modtag.attrib['Val'])],
@@ -166,13 +206,12 @@ def process_sample(
modtag.attrib.get('HistoPathHigh', ''),
modtag.attrib['HistoNameHigh'],
)
- modifiers.append(
- {
- 'name': modtag.attrib['Name'],
- 'type': 'histosys',
- 'data': {'lo_data': lo, 'hi_data': hi},
- }
- )
+ modifier_histosys: HistoSys = {
+ 'name': modtag.attrib['Name'],
+ 'type': 'histosys',
+ 'data': {'lo_data': lo, 'hi_data': hi},
+ }
+ modifiers.append(modifier_histosys)
elif modtag.tag == 'StatError' and modtag.attrib['Activate'] == 'True':
if modtag.attrib.get('HistoName', '') == '':
staterr = err
@@ -186,13 +225,12 @@ def process_sample(
staterr = np.multiply(extstat, data).tolist()
if not staterr:
raise RuntimeError('cannot determine stat error.')
- modifiers.append(
- {
- 'name': f'staterror_{channel_name}',
- 'type': 'staterror',
- 'data': staterr,
- }
- )
+ modifier_staterror: StatError = {
+ 'name': f'staterror_{channel_name}',
+ 'type': 'staterror',
+ 'data': staterr,
+ }
+ modifiers.append(modifier_staterror)
elif modtag.tag == 'ShapeSys':
# NB: ConstraintType is ignored
if modtag.attrib.get('ConstraintType', 'Poisson') != 'Poisson':
@@ -207,17 +245,19 @@ def process_sample(
modtag.attrib['HistoName'],
)
# NB: we convert relative uncertainty to absolute uncertainty
- modifiers.append(
- {
- 'name': modtag.attrib['Name'],
- 'type': 'shapesys',
- 'data': [a * b for a, b in zip(data, shapesys_data)],
- }
- )
+ modifier_shapesys: ShapeSys = {
+ 'name': modtag.attrib['Name'],
+ 'type': 'shapesys',
+ 'data': [a * b for a, b in zip(data, shapesys_data)],
+ }
+ modifiers.append(modifier_shapesys)
elif modtag.tag == 'ShapeFactor':
- modifiers.append(
- {'name': modtag.attrib['Name'], 'type': 'shapefactor', 'data': None}
- )
+ modifier_shapefactor: ShapeFactor = {
+ 'name': modtag.attrib['Name'],
+ 'type': 'shapefactor',
+ 'data': None,
+ }
+ modifiers.append(modifier_shapefactor)
else:
log.warning('not considering modifier tag %s', modtag)
@@ -229,22 +269,27 @@ def process_sample(
}
-def process_data(sample, resolver, inputfile, histopath):
- if 'InputFile' in sample.attrib:
- inputfile = sample.attrib.get('InputFile')
- if 'HistoPath' in sample.attrib:
- histopath = sample.attrib.get('HistoPath')
+def process_data(
+ sample: ET.Element,
+ resolver: ResolverType,
+ inputfile: str,
+ histopath: str,
+) -> list[float]:
+ inputfile = sample.attrib.get('InputFile', inputfile)
+ histopath = sample.attrib.get('HistoPath', histopath)
histoname = sample.attrib['HistoName']
data, _ = import_root_histogram(resolver, inputfile, histopath, histoname)
return data
-def process_channel(channelxml, resolver, track_progress=False):
+def process_channel(
+ channelxml: ET.ElementTree, resolver: ResolverType, track_progress: bool = False
+) -> tuple[str, list[float], list[Sample], list[Parameter]]:
channel = channelxml.getroot()
- inputfile = channel.attrib.get('InputFile')
- histopath = channel.attrib.get('HistoPath')
+ inputfile = channel.attrib.get('InputFile', '')
+ histopath = channel.attrib.get('HistoPath', '')
samples = tqdm.tqdm(
channel.findall('Sample'), unit='sample', disable=not (track_progress)
@@ -259,7 +304,7 @@ def process_channel(channelxml, resolver, track_progress=False):
raise RuntimeError(f"Channel {channel_name} is missing data. See issue #1911.")
results = []
- channel_parameter_configs = []
+ channel_parameter_configs: list[Parameter] = []
for sample in samples:
samples.set_description(f" - sample {sample.attrib.get('Name')}")
result = process_sample(
@@ -271,7 +316,10 @@ def process_channel(channelxml, resolver, track_progress=False):
return channel_name, parsed_data, results, channel_parameter_configs
-def process_measurements(toplvl, other_parameter_configs=None):
+def process_measurements(
+ toplvl: ET.ElementTree,
+ other_parameter_configs: Sequence[Parameter] | None = None,
+) -> list[Measurement]:
"""
For a given XML structure, provide a parsed dictionary adhering to defs.json/#definitions/measurement.
@@ -283,11 +331,11 @@ def process_measurements(toplvl, other_parameter_configs=None):
:obj:`dict`: A measurement object.
"""
- results = []
+ results: list[Measurement] = []
other_parameter_configs = other_parameter_configs if other_parameter_configs else []
for x in toplvl.findall('Measurement'):
- parameter_configs_map = {k['name']: dict(**k) for k in other_parameter_configs}
+ parameter_configs_map: MutableMapping[str, Parameter] = {k['name']: dict(**k) for k in other_parameter_configs} # type: ignore[misc]
lumi = float(x.attrib['Lumi'])
lumierr = lumi * float(x.attrib['LumiRelErr'])
@@ -299,7 +347,7 @@ def process_measurements(toplvl, other_parameter_configs=None):
f"Measurement {measurement_name} is missing POI specification"
)
- result = {
+ result: Measurement = {
'name': measurement_name,
'config': {
'poi': poi.text.strip() if poi.text else '',
@@ -317,7 +365,7 @@ def process_measurements(toplvl, other_parameter_configs=None):
for param in x.findall('ParamSetting'):
# determine what all parameters in the paramsetting have in common
- overall_param_obj = {}
+ overall_param_obj: ParameterBase = {}
if param.attrib.get('Const'):
overall_param_obj['fixed'] = param.attrib['Const'] == 'True'
if param.attrib.get('Val'):
@@ -326,21 +374,21 @@ def process_measurements(toplvl, other_parameter_configs=None):
# might be specifying multiple parameters in the same ParamSetting
if param.text:
for param_name in param.text.strip().split(' '):
- param_interpretation = compat.interpret_rootname(param_name)
+ param_interpretation = compat.interpret_rootname(param_name) # type: ignore[no-untyped-call]
if not param_interpretation['is_scalar']:
raise ValueError(
f'pyhf does not support setting non-scalar parameters ("gammas") constant, such as for {param_name}.'
)
if param_interpretation['name'] == 'lumi':
- result['config']['parameters'][0].update(overall_param_obj)
+ result['config']['parameters'][0].update(overall_param_obj) # type: ignore[typeddict-item]
else:
# pop from parameter_configs_map because we don't want to duplicate
- param_obj = parameter_configs_map.pop(
+ param_obj: Parameter = parameter_configs_map.pop(
param_interpretation['name'],
{'name': param_interpretation['name']},
)
# ParamSetting will always take precedence
- param_obj.update(overall_param_obj)
+ param_obj.update(overall_param_obj) # type: ignore[typeddict-item]
# add it back in to the parameter_configs_map
parameter_configs_map[param_interpretation['name']] = param_obj
result['config']['parameters'].extend(parameter_configs_map.values())
@@ -349,8 +397,8 @@ def process_measurements(toplvl, other_parameter_configs=None):
return results
-def dedupe_parameters(parameters):
- duplicates = {}
+def dedupe_parameters(parameters: Sequence[Parameter]) -> list[Parameter]:
+ duplicates: MutableMapping[str, MutableSequence[Parameter]] = {}
for p in parameters:
duplicates.setdefault(p['name'], []).append(p)
for parname in duplicates:
@@ -373,14 +421,14 @@ def parse(
mounts: MountPathType | None = None,
track_progress: bool = False,
validation_as_error: bool = True,
-):
+) -> Workspace:
"""
Parse the ``configfile`` with respect to the ``rootdir``.
Args:
configfile (:class:`pathlib.Path` or :obj:`str` or file object): The top-level XML config file to parse.
rootdir (:class:`pathlib.Path` or :obj:`str`): The path to the working directory for interpreting relative paths in the configuration.
- mounts (:obj:`None` or :obj:`list` of 2-:obj:`tuple` of :class:`pathlib.Path` objects): The first field is the local path to where files are located, the second field is the path where the file or directory are saved in the XML configuration. This is similar in spirit to Docker volume mounts. Default is ``None``.
+ mounts (:obj:`None` or :obj:`list` of 2-:obj:`tuple` of :class:`os.PathLike` objects): The first field is the local path to where files are located, the second field is the path where the file or directory are saved in the XML configuration. This is similar in spirit to Docker volume mounts. Default is ``None``.
track_progress (:obj:`bool`): Show the progress bar. Default is to hide the progress bar.
validation_as_error (:obj:`bool`): Throw an exception (``True``) or print a warning (``False``) if the resulting HistFactory JSON does not adhere to the schema. Default is to throw an exception.
@@ -398,30 +446,27 @@ def parse(
# create a resolver for finding files
resolver = resolver_factory(Path(rootdir), mounts)
- channels = {}
+ channels: MutableSequence[Channel] = []
+ observations: MutableSequence[Observation] = []
parameter_configs = []
for inp in inputs:
inputs.set_description(f'Processing {inp}')
channel, data, samples, channel_parameter_configs = process_channel(
ET.parse(resolver(inp)), resolver, track_progress
)
- channels[channel] = {'data': data, 'samples': samples}
+ channels.append({'name': channel, 'samples': samples})
+ observations.append({'name': channel, 'data': data})
parameter_configs.extend(channel_parameter_configs)
parameter_configs = dedupe_parameters(parameter_configs)
- result = {
- 'measurements': process_measurements(
- toplvl, other_parameter_configs=parameter_configs
- ),
- 'channels': [
- {'name': channel_name, 'samples': channel_spec['samples']}
- for channel_name, channel_spec in channels.items()
- ],
- 'observations': [
- {'name': channel_name, 'data': channel_spec['data']}
- for channel_name, channel_spec in channels.items()
- ],
- 'version': schema.version,
+ measurements = process_measurements(
+ toplvl, other_parameter_configs=parameter_configs
+ )
+ result: Workspace = {
+ 'measurements': measurements,
+ 'channels': channels,
+ 'observations': observations,
+ 'version': schema.version, # type: ignore[typeddict-item]
}
try:
schema.validate(result, 'workspace.json')
@@ -433,6 +478,6 @@ def parse(
return result
-def clear_filecache():
+def clear_filecache() -> None:
global __FILECACHE__
__FILECACHE__ = {}
diff --git a/src/pyhf/schema/validator.py b/src/pyhf/schema/validator.py
--- a/src/pyhf/schema/validator.py
+++ b/src/pyhf/schema/validator.py
@@ -2,10 +2,10 @@
import pyhf.exceptions
from pyhf.schema.loader import load_schema
from pyhf.schema import variables
-from typing import Union
+from typing import Union, Mapping
-def validate(spec: dict, schema_name: str, version: Union[str, None] = None):
+def validate(spec: Mapping, schema_name: str, version: Union[str, None] = None):
"""
Validate a provided specification against a schema.
diff --git a/src/pyhf/typing.py b/src/pyhf/typing.py
new file mode 100644
--- /dev/null
+++ b/src/pyhf/typing.py
@@ -0,0 +1,136 @@
+import os
+import sys
+from typing import TYPE_CHECKING, MutableSequence, Sequence, Union
+
+if sys.version_info >= (3, 8):
+ from typing import Literal, TypedDict
+else:
+ from typing_extensions import Literal, TypedDict
+
+__all__ = (
+ "PathOrStr",
+ "ParameterBase",
+ "Parameter",
+ "Measurement",
+ "ModifierBase",
+ "NormSys",
+ "NormFactor",
+ "HistoSys",
+ "StatError",
+ "ShapeSys",
+ "ShapeFactor",
+ "LumiSys",
+ "Modifier",
+ "Sample",
+ "Channel",
+ "Observation",
+ "Workspace",
+)
+
+if TYPE_CHECKING:
+ PathOrStr = Union[str, os.PathLike[str]]
+else:
+ PathOrStr = Union[str, "os.PathLike[str]"]
+
+
+class ParameterBase(TypedDict, total=False):
+ auxdata: Sequence[float]
+ bounds: Sequence[Sequence[float]]
+ inits: Sequence[float]
+ sigmas: Sequence[float]
+ fixed: bool
+
+
+class Parameter(ParameterBase):
+ name: str
+
+
+class Config(TypedDict):
+ poi: str
+ parameters: MutableSequence[Parameter]
+
+
+class Measurement(TypedDict):
+ name: str
+ config: Config
+
+
+class ModifierBase(TypedDict):
+ name: str
+
+
+class NormSysData(TypedDict):
+ lo: float
+ hi: float
+
+
+class NormSys(ModifierBase):
+ type: Literal['normsys']
+ data: NormSysData
+
+
+class NormFactor(ModifierBase):
+ type: Literal['normfactor']
+ data: None
+
+
+class HistoSysData(TypedDict):
+ lo_data: Sequence[float]
+ hi_data: Sequence[float]
+
+
+class HistoSys(ModifierBase):
+ type: Literal['histosys']
+ data: HistoSysData
+
+
+class StatError(ModifierBase):
+ type: Literal['staterror']
+ data: Sequence[float]
+
+
+class ShapeSys(ModifierBase):
+ type: Literal['shapesys']
+ data: Sequence[float]
+
+
+class ShapeFactor(ModifierBase):
+ type: Literal['shapefactor']
+ data: None
+
+
+class LumiSys(TypedDict):
+ name: Literal['lumi']
+ type: Literal['lumi']
+ data: None
+
+
+Modifier = Union[
+ NormSys, NormFactor, HistoSys, StatError, ShapeSys, ShapeFactor, LumiSys
+]
+
+
+class SampleBase(TypedDict, total=False):
+ parameter_configs: Sequence[Parameter]
+
+
+class Sample(SampleBase):
+ name: str
+ data: Sequence[float]
+ modifiers: Sequence[Modifier]
+
+
+class Channel(TypedDict):
+ name: str
+ samples: Sequence[Sample]
+
+
+class Observation(TypedDict):
+ name: str
+ data: Sequence[float]
+
+
+class Workspace(TypedDict):
+ measurements: Sequence[Measurement]
+ channels: Sequence[Channel]
+ observations: Sequence[Observation]
| diff --git a/tests/test_import.py b/tests/test_import.py
--- a/tests/test_import.py
+++ b/tests/test_import.py
@@ -506,7 +506,7 @@ def test_import_missingPOI(mocker, datadir):
)
-def test_import_resolver(mocker):
+def test_import_resolver():
rootdir = Path('/current/working/dir')
mounts = [(Path('/this/path/changed'), Path('/my/abs/path'))]
resolver = pyhf.readxml.resolver_factory(rootdir, mounts)
| Add mypy to pre-commit checks
...Use mypy in pre-commit and turn on "check untyped functions", and do just enough typing to get that up and running. (Again, not something to rush for a release).
_Originally posted by @henryiii in https://github.com/scikit-hep/pyhf/issues/1272#issuecomment-772064533_
| Related to Issue #1284 and PR #948
@lukasheinrich @kratsg Let's move `mypy` work to `v0.7.0` to speed up the release of `v0.6.2`, unless you have strong objections. | 2022-08-11T02:35:40 |
scikit-hep/pyhf | 1,942 | scikit-hep__pyhf-1942 | [
"1941"
] | 6f8d87e65e2b9b6033974ef18cbb4d9bf62f3dd8 | diff --git a/src/pyhf/typing.py b/src/pyhf/typing.py
--- a/src/pyhf/typing.py
+++ b/src/pyhf/typing.py
@@ -1,6 +1,6 @@
import os
import sys
-from typing import TYPE_CHECKING, MutableSequence, Sequence, Union
+from typing import MutableSequence, Sequence, Union
if sys.version_info >= (3, 8):
from typing import Literal, TypedDict
@@ -27,10 +27,8 @@
"Workspace",
)
-if TYPE_CHECKING:
- PathOrStr = Union[str, os.PathLike[str]]
-else:
- PathOrStr = Union[str, "os.PathLike[str]"]
+# TODO: Switch to os.PathLike[str] once Python 3.8 support dropped
+PathOrStr = Union[str, "os.PathLike[str]"]
class ParameterBase(TypedDict, total=False):
| Use string form for type checking until Python 3.7 dropped
What the if here? The string form is fine until you drop Pythons that don’t support it.
_Originally posted by @henryiii in https://github.com/scikit-hep/pyhf/pull/1909#discussion_r944456765_
This was in reference to
```python
if T.TYPE_CHECKING:
PathOrStr = T.Union[str, os.PathLike[str]]
else:
PathOrStr = T.Union[str, "os.PathLike[str]"]
```
in PR #190 now in
https://github.com/scikit-hep/pyhf/blob/ad1dd86f1d7c1bcbf737805b6821e07c4ef75fca/src/pyhf/typing.py#L30-L33
So until Python 3.7 is dropped (I think this is the version I am not very up to date with my type checking knowledge) we could instead just drop the `if TYPE_CHECKING` and use
```python
#TODO: Switch to os.PathLike[str] once Python 3.7 dropped
PathOrStr = Union[str, "os.PathLike[str]"]
```
This would also allow for reverting PR #1937.
| 2022-08-12T17:06:14 |
||
scikit-hep/pyhf | 1,953 | scikit-hep__pyhf-1953 | [
"1952"
] | fd589efb50fe249df3079a276ca86d32e5a9305a | diff --git a/src/pyhf/workspace.py b/src/pyhf/workspace.py
--- a/src/pyhf/workspace.py
+++ b/src/pyhf/workspace.py
@@ -300,14 +300,15 @@ def __init__(self, spec, validate: bool = True, **config_kwargs):
"""
spec = copy.deepcopy(spec)
- super().__init__(spec, channels=spec['channels'])
self.schema = config_kwargs.pop('schema', 'workspace.json')
self.version = config_kwargs.pop('version', spec.get('version', None))
# run jsonschema validation of input specification against the (provided) schema
if validate:
log.info(f"Validating spec against schema: {self.schema}")
- schema.validate(self, self.schema, version=self.version)
+ schema.validate(spec, self.schema, version=self.version)
+
+ super().__init__(spec, channels=spec['channels'])
self.measurement_names = []
for measurement in self.get('measurements', []):
| diff --git a/tests/test_workspace.py b/tests/test_workspace.py
--- a/tests/test_workspace.py
+++ b/tests/test_workspace.py
@@ -910,3 +910,11 @@ def test_workspace_without_validation(mocker, simplemodels_model_data):
pyhf.Workspace(dict(ws), validate=False)
assert pyhf.schema.validate.called is False
+
+
+def test_workspace_invalid_specification():
+ spec = {"channels": [{"name": "SR", "samples_wrong_name": []}]}
+ # Ensure that an invalid specifications gets caught as such
+ # before a KeyError: 'samples' could be reached.
+ with pytest.raises(pyhf.exceptions.InvalidSpecification):
+ pyhf.Workspace(spec)
| Model specification is accessed before being validated
### Summary
The model specification is currently accessed before validation is done:
https://github.com/scikit-hep/pyhf/blob/a6186405ae5acee472133874e9a477dc41def7df/src/pyhf/mixins.py#L26-L37
called via https://github.com/scikit-hep/pyhf/blob/a6186405ae5acee472133874e9a477dc41def7df/src/pyhf/workspace.py#L303-L310
(validation happens at the bottom).
If there is no strong reason for the order being like this, I think it would be useful to run the validation first to filter out problematic specifications.
I ran into this when building a multi-channel workspace from a single-channel workspace for debugging purposes, and ended up pasting the channel at the wrong level of indentation.
### OS / Environment
```console
n/a
```
### Steps to Reproduce
<!--- Paste your minimal failing Python example code between the quotes below -->
```python (paste below)
import pyhf
spec = {
"channels": [
{
"name": "SR",
"samples_wrong_name": []
}
]
}
pyhf.Workspace(spec)
```
### File Upload (optional)
_No response_
### Expected Results
`pyhf.exceptions.InvalidSpecification` or similar raised via the schema validation
### Actual Results
```console
Traceback (most recent call last):
File "[...]/test.py", line 12, in <module>
pyhf.Workspace(spec)
File "[...]/pyhf/src/pyhf/workspace.py", line 303, in __init__
super().__init__(spec, channels=spec['channels'])
File "[...]/pyhf/src/pyhf/mixins.py", line 28, in __init__
self.channel_nbins[channel['name']] = len(channel['samples'][0]['data'])
KeyError: 'samples'
```
### pyhf Version
```console
pyhf, version 0.7.0rc2.dev18
```
### Code of Conduct
- [X] I agree to follow the Code of Conduct
| I tried out this change to run validation first, which seems to pass relevant tests (I see some unrelated failures in e.g. `test_asymptotic_calculator_has_fitted_pars` where the tolerance may need to be increased). Happy to submit this as a PR if you think this makes sense.
```diff
diff --git a/src/pyhf/workspace.py b/src/pyhf/workspace.py
index 6339f71c..070f3966 100644
--- a/src/pyhf/workspace.py
+++ b/src/pyhf/workspace.py
@@ -300,14 +300,15 @@ def __init__(self, spec, validate: bool = True, **config_kwargs):
"""
spec = copy.deepcopy(spec)
- super().__init__(spec, channels=spec['channels'])
self.schema = config_kwargs.pop('schema', 'workspace.json')
self.version = config_kwargs.pop('version', spec.get('version', None))
# run jsonschema validation of input specification against the (provided) schema
if validate:
log.info(f"Validating spec against schema: {self.schema}")
- schema.validate(self, self.schema, version=self.version)
+ schema.validate(spec, self.schema, version=self.version)
+
+ super().__init__(spec, channels=spec['channels'])
self.measurement_names = []
for measurement in self.get('measurements', []):
```
> Happy to submit this as a PR if you think this makes sense.
Please do. :rocket: It would be great if you could also add a test that has the
> `pyhf.exceptions.InvalidSpecification` or similar raised via the schema validation
that you mentioned.
Also thanks for this great Issue! | 2022-08-18T18:36:32 |
scikit-hep/pyhf | 1,965 | scikit-hep__pyhf-1965 | [
"1945"
] | d4e1fc20b01000351405a8cd717a1544bb5c0f27 | diff --git a/src/pyhf/modifiers/staterror.py b/src/pyhf/modifiers/staterror.py
--- a/src/pyhf/modifiers/staterror.py
+++ b/src/pyhf/modifiers/staterror.py
@@ -100,8 +100,12 @@ def finalize(self):
],
axis=0,
)
+ # here relerrs still has all the bins, while the staterror are usually per-channel
+ # so we need to pick out the masks for this modifier to extract the
+ # modifier configuration (sigmas, etc..)
+ # so loop over samples and extract the first mask
+ # while making sure any subsequent mask is consistent
relerrs = default_backend.sqrt(relerrs)
-
masks = {}
for modifier_data in self.builder_data[modname].values():
mask_this_sample = default_backend.astensor(
@@ -113,12 +117,14 @@ def finalize(self):
else:
assert (mask_this_sample == masks[modname]).all()
- for modifier_data in self.builder_data[modname].values():
- modifier_data['data']['mask'] = masks[modname]
+ # extract sigmas using this modifiers mask
sigmas = relerrs[masks[modname]]
+
# list of bools, consistent with other modifiers (no numpy.bool_)
fixed = default_backend.tolist(sigmas == 0)
- # ensures non-Nan constraint term, but in a future PR we need to remove constraints for these
+ # FIXME: sigmas that are zero will be fixed to 1.0 arbitrarily to ensure
+ # non-Nan constraint term, but in a future PR need to remove constraints
+ # for these
sigmas[fixed] = 1.0
self.required_parsets.setdefault(parname, [required_parset(sigmas, fixed)])
return self.builder_data
@@ -145,18 +151,6 @@ def __init__(self, modifiers, pdfconfig, builder_data, batch_size=None):
[[builder_data[m][s]['data']['mask']] for s in pdfconfig.samples]
for m in keys
]
- self.__staterror_uncrt = default_backend.astensor(
- [
- [
- [
- builder_data[m][s]['data']['uncrt'],
- builder_data[m][s]['data']['nom_data'],
- ]
- for s in pdfconfig.samples
- ]
- for m in keys
- ]
- )
global_concatenated_bin_indices = [
[[j for c in pdfconfig.channels for j in range(pdfconfig.channel_nbins[c])]]
]
| diff --git a/tests/test_modifiers.py b/tests/test_modifiers.py
--- a/tests/test_modifiers.py
+++ b/tests/test_modifiers.py
@@ -193,3 +193,55 @@ def test_invalid_bin_wise_modifier(datadir, patch_file):
with pytest.raises(pyhf.exceptions.InvalidModifier):
pyhf.Model(bad_spec)
+
+
+def test_issue1720_staterror_builder_mask(datadir):
+ with open(datadir.joinpath("issue1720_greedy_staterror.json")) as spec_file:
+ spec = json.load(spec_file)
+
+ spec["channels"][0]["samples"][1]["modifiers"][0]["type"] = "staterror"
+ config = pyhf.pdf._ModelConfig(spec)
+ builder = pyhf.modifiers.staterror.staterror_builder(config)
+
+ channel = spec["channels"][0]
+ sig_sample = channel["samples"][0]
+ bkg_sample = channel["samples"][1]
+ modifier = bkg_sample["modifiers"][0]
+
+ assert channel["name"] == "channel"
+ assert sig_sample["name"] == "signal"
+ assert bkg_sample["name"] == "bkg"
+ assert modifier["type"] == "staterror"
+
+ builder.append("staterror/NP", "channel", "bkg", modifier, bkg_sample)
+ collected_bkg = builder.collect(modifier, bkg_sample["data"])
+ assert collected_bkg == {"mask": [True], "nom_data": [1], "uncrt": [1.5]}
+
+ builder.append("staterror/NP", "channel", "signal", None, sig_sample)
+ collected_sig = builder.collect(None, sig_sample["data"])
+ assert collected_sig == {"mask": [False], "nom_data": [5], "uncrt": [0.0]}
+
+ finalized = builder.finalize()
+ assert finalized["staterror/NP"]["bkg"]["data"]["mask"].tolist() == [True]
+ assert finalized["staterror/NP"]["signal"]["data"]["mask"].tolist() == [False]
+
+
[email protected](
+ "inits",
+ [[-2.0], [-1.0], [0.0], [1.0], [2.0]],
+)
+def test_issue1720_greedy_staterror(datadir, inits):
+ """
+ Test that the staterror does not affect more samples than shapesys equivalently.
+ """
+ with open(datadir.joinpath("issue1720_greedy_staterror.json")) as spec_file:
+ spec = json.load(spec_file)
+
+ model_shapesys = pyhf.Workspace(spec).model()
+ expected_shapesys = model_shapesys.expected_actualdata(inits)
+
+ spec["channels"][0]["samples"][1]["modifiers"][0]["type"] = "staterror"
+ model_staterror = pyhf.Workspace(spec).model()
+ expected_staterror = model_staterror.expected_actualdata(inits)
+
+ assert expected_staterror == expected_shapesys
diff --git a/tests/test_modifiers/issue1720_greedy_staterror.json b/tests/test_modifiers/issue1720_greedy_staterror.json
new file mode 100644
--- /dev/null
+++ b/tests/test_modifiers/issue1720_greedy_staterror.json
@@ -0,0 +1,49 @@
+{
+ "channels": [
+ {
+ "name": "channel",
+ "samples": [
+ {
+ "data": [
+ 5
+ ],
+ "modifiers": [],
+ "name": "signal"
+ },
+ {
+ "data": [
+ 1
+ ],
+ "modifiers": [
+ {
+ "data": [
+ 1.5
+ ],
+ "name": "NP",
+ "type": "shapesys"
+ }
+ ],
+ "name": "bkg"
+ }
+ ]
+ }
+ ],
+ "measurements": [
+ {
+ "config": {
+ "parameters": [],
+ "poi": "NP"
+ },
+ "name": ""
+ }
+ ],
+ "observations": [
+ {
+ "data": [
+ 0
+ ],
+ "name": "channel"
+ }
+ ],
+ "version": "1.0.0"
+}
diff --git a/tests/test_scripts.py b/tests/test_scripts.py
--- a/tests/test_scripts.py
+++ b/tests/test_scripts.py
@@ -297,7 +297,7 @@ def test_testpoi(tmpdir, script_runner):
command = f'pyhf xml2json validation/xmlimport_input/config/example.xml --basedir validation/xmlimport_input/ --output-file {temp.strpath:s}'
ret = script_runner.run(*shlex.split(command))
- pois = [1.0, 0.5, 0.0]
+ pois = [1.0, 0.5, 0.001]
results_exp = []
results_obs = []
for test_poi in pois:
| staterror regressions blocking v0.7.0
Before `v0.7.0rc2` can go out and then `v0.7.0` we have to fix the regressions in `staterror`. This Issue is just to keep track of them and help us squash them all.
* [ ] Issue #1720
* [x] Issue #1944
| 2022-08-27T06:45:57 |
|
scikit-hep/pyhf | 1,969 | scikit-hep__pyhf-1969 | [
"1964"
] | 62aad7162edc0ded36b980404833db085d47de9a | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -141,6 +141,36 @@ def setup(app):
'tensorflow_probability',
]
+
+_type_aliases_inverted = {
+ 'pyhf.typing': [
+ 'PathOrStr',
+ 'ParameterBase',
+ 'Parameter',
+ 'Measurement',
+ 'ModifierBase',
+ 'NormSys',
+ 'NormFactor',
+ 'HistoSys',
+ 'StatError',
+ 'ShapeSys',
+ 'ShapeFactor',
+ 'LumiSys',
+ 'Modifier',
+ 'Sample',
+ 'Channel',
+ 'Observation',
+ 'Workspace',
+ 'Literal',
+ ],
+ 'numpy.typing': ['ArrayLike', 'DTypeLike', 'NBitBase', 'NDArray'],
+}
+autodoc_type_aliases = {
+ item: f'{k}.{item}' for k, v in _type_aliases_inverted.items() for item in v
+}
+
+autodoc_typehints_format = 'fully-qualified'
+
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
diff --git a/src/pyhf/tensor/numpy_backend.py b/src/pyhf/tensor/numpy_backend.py
--- a/src/pyhf/tensor/numpy_backend.py
+++ b/src/pyhf/tensor/numpy_backend.py
@@ -15,7 +15,7 @@
T = TypeVar("T", bound=NBitBase)
Tensor = Union["NDArray[np.number[T]]", "NDArray[np.bool_]"]
-
+FloatIntOrBool = Literal["float", "int", "bool"]
log = logging.getLogger(__name__)
@@ -53,7 +53,7 @@ def __init__(self, **kwargs: dict[str, str]):
self.name = 'numpy'
self.precision = kwargs.get('precision', '64b')
self.dtypemap: Mapping[
- Literal['float', 'int', 'bool'],
+ FloatIntOrBool,
DTypeLike, # Type[np.floating[T]] | Type[np.integer[T]] | Type[np.bool_],
] = {
'float': np.float64 if self.precision == '64b' else np.float32,
@@ -206,7 +206,7 @@ def isfinite(self, tensor: Tensor[T]) -> NDArray[np.bool_]:
return np.isfinite(tensor)
def astensor(
- self, tensor_in: ArrayLike, dtype: Literal['float'] = 'float'
+ self, tensor_in: ArrayLike, dtype: FloatIntOrBool = 'float'
) -> ArrayLike:
"""
Convert to a NumPy array.
@@ -247,9 +247,7 @@ def product(self, tensor_in: Tensor[T], axis: Shape | None = None) -> ArrayLike:
def abs(self, tensor: Tensor[T]) -> ArrayLike:
return np.abs(tensor)
- def ones(
- self, shape: Shape, dtype: Literal["float", "int", "bool"] = "float"
- ) -> ArrayLike:
+ def ones(self, shape: Shape, dtype: FloatIntOrBool = "float") -> ArrayLike:
try:
dtype_obj = self.dtypemap[dtype]
except KeyError:
@@ -261,9 +259,7 @@ def ones(
return np.ones(shape, dtype=dtype_obj)
- def zeros(
- self, shape: Shape, dtype: Literal["float", "int", "bool"] = "float"
- ) -> ArrayLike:
+ def zeros(self, shape: Shape, dtype: FloatIntOrBool = "float") -> ArrayLike:
try:
dtype_obj = self.dtypemap[dtype]
except KeyError:
| Improve typing rendering in docs
### Summary
As more of Issue #1284 is addressed (e.g. PR #1940) the rendering of the APIs in the reference docs has the unfortunate side effect of becoming increasingly unreadable and complicated.
As [`autodoc_type_aliases`](https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html#confval-autodoc_type_aliases) exist in Sphinx, these could be a way to restore the clean and simple API reference that has previously been presented.
### Documentation Page Link
https://scikit-hep.org/pyhf/_generated/pyhf.tensor.numpy_backend.numpy_backend.html
### Code of Conduct
- [X] I agree to follow the Code of Conduct
| 2022-08-29T04:59:35 |
||
scikit-hep/pyhf | 1,970 | scikit-hep__pyhf-1970 | [
"1335"
] | c68e43b394652e11d8ebf577fa9705e74191d69a | diff --git a/src/pyhf/infer/calculators.py b/src/pyhf/infer/calculators.py
--- a/src/pyhf/infer/calculators.py
+++ b/src/pyhf/infer/calculators.py
@@ -688,12 +688,14 @@ def __init__(
value during minimization.
test_stat (:obj:`str`): The test statistic to use as a numerical summary of the
data: ``'qtilde'``, ``'q'``, or ``'q0'``.
- ``'qtilde'`` (default) performs the calculation using the alternative test statistic,
- :math:`\tilde{q}_{\mu}`, as defined under the Wald approximation in Equation (62)
- of :xref:`arXiv:1007.1727` (:func:`~pyhf.infer.test_statistics.qmu_tilde`), ``'q'``
- performs the calculation using the test statistic :math:`q_{\mu}`
- (:func:`~pyhf.infer.test_statistics.qmu`), and ``'q0'`` performs the calculation using
- the discovery test statistic :math:`q_{0}` (:func:`~pyhf.infer.test_statistics.q0`).
+
+ * ``'qtilde'``: (default) performs the calculation using the alternative test statistic,
+ :math:`\tilde{q}_{\mu}`, as defined under the Wald approximation in Equation (62)
+ of :xref:`arXiv:1007.1727` (:func:`~pyhf.infer.test_statistics.qmu_tilde`).
+ * ``'q'``: performs the calculation using the test statistic :math:`q_{\mu}`
+ (:func:`~pyhf.infer.test_statistics.qmu`).
+ * ``'q0'``: performs the calculation using the discovery test statistic
+ :math:`q_{0}` (:func:`~pyhf.infer.test_statistics.q0`).
ntoys (:obj:`int`): Number of toys to use (how many times to sample the underlying distributions).
track_progress (:obj:`bool`): Whether to display the `tqdm` progress bar or not (outputs to `stderr`).
| Harmonize docstring for test_stat between AsymptoticCalculator and ToyCalculator
# Description
In `v0.6.0` docs, the `AsymptoticCalculator` API has a [nicely formatted docstring for the `test_stat` arg](https://pyhf.readthedocs.io/en/v0.6.0/_generated/pyhf.infer.calculators.AsymptoticCalculator.html)
https://github.com/scikit-hep/pyhf/blob/8de7566a2b6dc4a5278e0b0da9c0a45a2e5a752d/src/pyhf/infer/calculators.py#L199-L208
but the [`ToyCalculator`](https://pyhf.readthedocs.io/en/v0.6.0/_generated/pyhf.infer.calculators.ToyCalculator.html) API didn't have these changes applied to them (seems @matthewfeickert forgot to propagate these in PR #993)
https://github.com/scikit-hep/pyhf/blob/8de7566a2b6dc4a5278e0b0da9c0a45a2e5a752d/src/pyhf/infer/calculators.py#L625-L632
The `ToyCalculator` docstring should be updated to match.
| Similarly, need to update the warning for `qmu` to be using `qmu_tilde` with the more appropriate suggestion.
@matthewfeickert can I take this up? @kratsg could you please elaborate a bit more on the warning updates?
@matthewfeickert @kratsg not to disturb you again, but any updates on this?
The warning updates were already done. As for documentation, why not just submit the PR? But unless you know the statistics content, we'd recommend not just rewriting without knowing what the content itself is.
> But unless you know the statistics content, we'd recommend not just rewriting without knowing what the content itself is.
Yeah, I was envisioning a small rewrite of this when I opened the Issue. My advice would be to only work on a PR for this if you feel comfortable with the frequentist statistics being discussed, else we'll probably have to request multiple revisions.
Oh oh, my bad, I thought this required only formatting of the documentation. Thanks for letting me know! | 2022-08-30T18:42:14 |
|
scikit-hep/pyhf | 1,972 | scikit-hep__pyhf-1972 | [
"1982"
] | b6e02eebe7489d40c7fb71cc0f971aa19b7c94e1 | diff --git a/src/pyhf/mixins.py b/src/pyhf/mixins.py
--- a/src/pyhf/mixins.py
+++ b/src/pyhf/mixins.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
import logging
+from typing import Any, Sequence
+
+from pyhf.typing import Channel
log = logging.getLogger(__name__)
@@ -13,39 +18,74 @@ class _ChannelSummaryMixin:
**channels: A list of channels to provide summary information about. Follows the `defs.json#/definitions/channel` schema.
"""
- def __init__(self, *args, **kwargs):
+ def __init__(self, *args: Any, **kwargs: Sequence[Channel]):
channels = kwargs.pop('channels')
super().__init__(*args, **kwargs)
- self.channels = []
- self.samples = []
- self.modifiers = []
+ self._channels: list[str] = []
+ self._samples: list[str] = []
+ self._modifiers: list[tuple[str, str]] = []
# keep track of the width of each channel (how many bins)
- self.channel_nbins = {}
+ self._channel_nbins: dict[str, int] = {}
# need to keep track in which order we added the constraints
# so that we can generate correctly-ordered data
for channel in channels:
- self.channels.append(channel['name'])
- self.channel_nbins[channel['name']] = len(channel['samples'][0]['data'])
+ self._channels.append(channel['name'])
+ self._channel_nbins[channel['name']] = len(channel['samples'][0]['data'])
for sample in channel['samples']:
- self.samples.append(sample['name'])
+ self._samples.append(sample['name'])
for modifier_def in sample['modifiers']:
- self.modifiers.append(
+ self._modifiers.append(
(
modifier_def['name'], # mod name
modifier_def['type'], # mod type
)
)
- self.channels = sorted(list(set(self.channels)))
- self.samples = sorted(list(set(self.samples)))
- self.modifiers = sorted(list(set(self.modifiers)))
- self.channel_nbins = {
- channel: self.channel_nbins[channel] for channel in self.channels
+ self._channels = sorted(list(set(self._channels)))
+ self._samples = sorted(list(set(self._samples)))
+ self._modifiers = sorted(list(set(self._modifiers)))
+ self._channel_nbins = {
+ channel: self._channel_nbins[channel] for channel in self._channels
}
- self.channel_slices = {}
+ self._channel_slices = {}
begin = 0
- for c in self.channels:
- end = begin + self.channel_nbins[c]
- self.channel_slices[c] = slice(begin, end)
+ for c in self._channels:
+ end = begin + self._channel_nbins[c]
+ self._channel_slices[c] = slice(begin, end)
begin = end
+
+ @property
+ def channels(self) -> list[str]:
+ """
+ Ordered list of channel names in the model.
+ """
+ return self._channels
+
+ @property
+ def samples(self) -> list[str]:
+ """
+ Ordered list of sample names in the model.
+ """
+ return self._samples
+
+ @property
+ def modifiers(self) -> list[tuple[str, str]]:
+ """
+ Ordered list of pairs of modifier name/type in the model.
+ """
+ return self._modifiers
+
+ @property
+ def channel_nbins(self) -> dict[str, int]:
+ """
+ Dictionary mapping channel name to number of bins in the channel.
+ """
+ return self._channel_nbins
+
+ @property
+ def channel_slices(self) -> dict[str, slice]:
+ """
+ Dictionary mapping channel name to the bin slices in the model.
+ """
+ return self._channel_slices
diff --git a/src/pyhf/pdf.py b/src/pyhf/pdf.py
--- a/src/pyhf/pdf.py
+++ b/src/pyhf/pdf.py
@@ -220,13 +220,58 @@ def __init__(self, spec, **config_kwargs):
f"Unsupported options were passed in: {list(config_kwargs.keys())}."
)
+ # prefixed with underscore are documented via @property
+ self._par_order = []
+ self._poi_name = None
+ self._poi_index = None
+ self._nmaindata = sum(self.channel_nbins.values())
+ self._auxdata = []
+
+ # these are not documented properties
self.par_map = {}
- self.par_order = []
- self.poi_name = None
- self.poi_index = None
- self.auxdata = []
self.auxdata_order = []
- self.nmaindata = sum(self.channel_nbins.values())
+
+ @property
+ def par_order(self):
+ """
+ Return an ordered list of paramset names in the model.
+ """
+ return self._par_order
+
+ @property
+ def poi_name(self):
+ """
+ Return the name of the POI parameter in the model.
+ """
+ return self._poi_name
+
+ @property
+ def poi_index(self):
+ """
+ Return the index of the POI parameter in the model.
+ """
+ return self._poi_index
+
+ @property
+ def auxdata(self):
+ """
+ Return the auxiliary data in the model.
+ """
+ return self._auxdata
+
+ @property
+ def nmaindata(self):
+ """
+ Return the length of data in the main model.
+ """
+ return self._nmaindata
+
+ @property
+ def nauxdata(self):
+ """
+ Return the length of data in the constraint model.
+ """
+ return len(self._auxdata)
def set_parameters(self, _required_paramsets):
"""
@@ -240,9 +285,8 @@ def set_auxinfo(self, auxdata, auxdata_order):
"""
Sets a group of configuration data for the constraint terms.
"""
- self.auxdata = auxdata
+ self._auxdata = auxdata
self.auxdata_order = auxdata_order
- self.nauxdata = len(self.auxdata)
def suggested_init(self):
"""
@@ -400,8 +444,8 @@ def set_poi(self, name):
)
s = self.par_slice(name)
assert s.stop - s.start == 1
- self.poi_name = name
- self.poi_index = s.start
+ self._poi_name = name
+ self._poi_index = s.start
def _create_and_register_paramsets(self, required_paramsets):
next_index = 0
@@ -415,7 +459,7 @@ def _create_and_register_paramsets(self, required_paramsets):
sl = slice(next_index, next_index + paramset.n_parameters)
next_index = next_index + paramset.n_parameters
- self.par_order.append(param_name)
+ self._par_order.append(param_name)
self.par_map[param_name] = {'slice': sl, 'paramset': paramset}
@@ -700,7 +744,7 @@ def __init__(
schema.validate(self.spec, self.schema, version=self.version)
# build up our representation of the specification
poi_name = config_kwargs.pop('poi_name', 'mu')
- self.config = _ModelConfig(self.spec, **config_kwargs)
+ self._config = _ModelConfig(self.spec, **config_kwargs)
modifiers, _nominal_rates = _nominal_and_modifiers_from_spec(
modifier_set, self.config, self.spec, self.batch_size
@@ -733,6 +777,13 @@ def __init__(
sizes, ['main', 'aux'], self.batch_size
)
+ @property
+ def config(self):
+ """
+ The :class:`_ModelConfig` instance for the model.
+ """
+ return self._config
+
def expected_auxdata(self, pars):
"""
Compute the expected value of the auxiliary measurements.
| diff --git a/tests/test_workspace.py b/tests/test_workspace.py
--- a/tests/test_workspace.py
+++ b/tests/test_workspace.py
@@ -170,15 +170,20 @@ def test_get_workspace_data(workspace_factory, include_auxdata):
assert w.data(m, include_auxdata=include_auxdata)
-def test_get_workspace_data_bad_model(workspace_factory, caplog):
+def test_get_workspace_data_bad_model(workspace_factory, caplog, mocker):
w = workspace_factory()
m = w.model()
# the iconic fragrance of an expected failure
- m.config.channels = [c.replace('channel', 'chanel') for c in m.config.channels]
+
+ mocker.patch(
+ "pyhf.mixins._ChannelSummaryMixin.channels",
+ new_callable=mocker.PropertyMock,
+ return_value=["fakechannel"],
+ )
with caplog.at_level(logging.INFO, 'pyhf.pdf'):
with pytest.raises(KeyError):
assert w.data(m)
- assert 'Invalid channel' in caplog.text
+ assert "Invalid channel" in caplog.text
def test_json_serializable(workspace_factory):
| Include `model.config` property in the `pyhf.pdf.Model` API documentation
I would also suggest somehow pointing to `_ModelConfig` from the `pyhf.pdf.Model` page. The `.config` API is not listed there, and it is not particularly intuitive for a user to look at `_ModelConfig` to find out about that `.config` attribute, where it says
> `_ModelConfig` should not be called directly. It should instead by accessed through the `config` attribute of `Model`
Given that `_ModelConfig` implies internal API, I wonder whether all of this page should just live within the `Model` API documentation page? This is where I would expect a user to look for it.
_Originally posted by @alexander-held in https://github.com/scikit-hep/pyhf/pull/1972#pullrequestreview-1095476721_
| 2022-08-30T19:09:13 |
|
scikit-hep/pyhf | 1,976 | scikit-hep__pyhf-1976 | [
"1975"
] | 9842e824712fcf02d42e49aa07016ea40139f70c | diff --git a/src/pyhf/schema/validator.py b/src/pyhf/schema/validator.py
--- a/src/pyhf/schema/validator.py
+++ b/src/pyhf/schema/validator.py
@@ -27,8 +27,8 @@ def validate(spec: Mapping, schema_name: str, version: Union[str, None] = None):
# note: trailing slash needed for RefResolver to resolve correctly
resolver = jsonschema.RefResolver(
- base_uri=f"file://{variables.schemas}/",
- referrer=f"{version}/{schema_name}",
+ base_uri=f"file://{variables.schemas}/{version}/",
+ referrer=f"{schema_name}",
store=variables.SCHEMA_CACHE,
)
validator = jsonschema.Draft6Validator(
| jsonschema RefResolutionError for 0.6.3 or lower
### Summary
The newest version of `jsonschema` (4.15.0, released today) breaks the current release version of `pyhf` (0.6.3).
### OS / Environment
```console
NAME=Gentoo
ID=gentoo
PRETTY_NAME="Gentoo Linux"
ANSI_COLOR="1;32"
HOME_URL="https://www.gentoo.org/"
SUPPORT_URL="https://www.gentoo.org/support/"
BUG_REPORT_URL="https://bugs.gentoo.org/"
VERSION_ID="2.8"
```
### Steps to Reproduce
```bash
pip install 'jsonschema==4.15.0' 'pyhf==0.6.3'
```
```python
import pyhf
model = pyhf.simplemodels.uncorrelated_background(
signal=[12.0, 11.0], bkg=[50.0, 52.0], bkg_uncertainty=[3.0, 7.0]
)
```
### File Upload (optional)
_No response_
### Expected Results
I expected not to get an error because this is the [Hello World example](https://pyhf.readthedocs.io/en/v0.6.3/examples/notebooks/hello-world.html). I confirmed that there's no error with `jsonschema==4.14.0` (the previous version) in the exact same environment otherwise.
### Actual Results
```console
Traceback (most recent call last):
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/jsonschema/validators.py", line 889, in resolve_from_url
document = self.store[url]
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/jsonschema/_utils.py", line 28, in __getitem__
return self.store[self.normalize(uri)]
KeyError: 'file:///home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/pyhf/schemas/defs.json'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/urllib/request.py", line 1505, in open_local_file
stats = os.stat(localfile)
FileNotFoundError: [Errno 2] No such file or directory: '/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/pyhf/schemas/defs.json'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/jsonschema/validators.py", line 892, in resolve_from_url
document = self.resolve_remote(url)
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/jsonschema/validators.py", line 1000, in resolve_remote
with urlopen(uri) as url:
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/urllib/request.py", line 216, in urlopen
return opener.open(url, data, timeout)
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/urllib/request.py", line 519, in open
response = self._open(req, data)
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/urllib/request.py", line 536, in _open
result = self._call_chain(self.handle_open, protocol, protocol +
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/urllib/request.py", line 496, in _call_chain
result = func(*args)
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/urllib/request.py", line 1483, in file_open
return self.open_local_file(req)
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/urllib/request.py", line 1522, in open_local_file
raise URLError(exp)
urllib.error.URLError: <urlopen error [Errno 2] No such file or directory: '/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/pyhf/schemas/defs.json'>
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/pyhf/simplemodels.py", line 141, in uncorrelated_background
return Model(spec, batch_size=batch_size)
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/pyhf/pdf.py", line 682, in __init__
utils.validate(self.spec, self.schema, version=self.version)
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/pyhf/utils.py", line 62, in validate
return validator.validate(spec)
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/jsonschema/validators.py", line 302, in validate
for error in self.iter_errors(*args, **kwargs):
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/jsonschema/validators.py", line 277, in iter_errors
for error in errors:
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/jsonschema/_validators.py", line 294, in ref
scope, resolved = validator.resolver.resolve(ref)
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/jsonschema/validators.py", line 880, in resolve
return url, self._remote_cache(url)
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/jsonschema/validators.py", line 894, in resolve_from_url
raise exceptions.RefResolutionError(exc)
jsonschema.exceptions.RefResolutionError: <urlopen error [Errno 2] No such file or directory: '/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/pyhf/schemas/defs.json'>
```
### pyhf Version
```console
pyhf, version 0.6.3
```
### Code of Conduct
- [X] I agree to follow the Code of Conduct
| Something is off, there should be a `1.0.0` in there (for the version).
Yeah, I'm not sure what's going on there. Just confirmed I see it in `0.7.0rc1`, `0.7.0rc2`, and `master` as well.
Looks like the `4.15.0` changelog contains
```
* A specific API Reference page is now present in the documentation.
* ``$ref`` on earlier drafts (specifically draft 7 and 6) has been "fixed" to
follow the specified behavior when present alongside a sibling ``$id``.
Specifically the ID is now properly ignored, and references are resolved
against whatever resolution scope was previously relevant.
```
https://github.com/python-jsonschema/jsonschema/compare/v4.14.0...v4.15.0#diff-2c623f3c6a917be56c59d43279244996836262cb1e12d9d0786c9c49eef6b43c
We were following the jsonschema spec, so it seems the python jsonschema changed how it interprets paths.
I'm checking to see if we catch this regression first in our head-of-dependencies (https://github.com/scikit-hep/pyhf/actions/runs/2966841468). | 2022-08-31T20:34:54 |
|
scikit-hep/pyhf | 1,985 | scikit-hep__pyhf-1985 | [
"1984"
] | e4aae10b53b8b20c70776bd9c540efda203c9264 | diff --git a/src/pyhf/pdf.py b/src/pyhf/pdf.py
--- a/src/pyhf/pdf.py
+++ b/src/pyhf/pdf.py
@@ -429,6 +429,8 @@ def set_poi(self, name):
"""
Set the model parameter of interest to be model parameter ``name``.
+ If ``name`` is ``None``, this will unset the parameter of interest.
+
Example:
>>> import pyhf
>>> model = pyhf.simplemodels.uncorrelated_background(
@@ -438,6 +440,11 @@ def set_poi(self, name):
>>> model.config.poi_name
'mu'
"""
+ if name is None:
+ self._poi_name = None
+ self._poi_index = None
+ return
+
if name not in self.parameters:
raise exceptions.InvalidModel(
f"The parameter of interest '{name:s}' cannot be fit as it is not declared in the model specification."
| diff --git a/tests/test_public_api.py b/tests/test_public_api.py
--- a/tests/test_public_api.py
+++ b/tests/test_public_api.py
@@ -221,3 +221,15 @@ def test_set_schema_path_context(monkeypatch):
new_path = pathlib.Path('a/new/path')
with pyhf.schema(new_path):
assert pyhf.schema.path == new_path
+
+
+def test_pdf_set_poi(backend):
+ model = pyhf.simplemodels.uncorrelated_background([5.0], [10.0], [2.5])
+ assert model.config.poi_index == 0
+ assert model.config.poi_name == 'mu'
+ model.config.set_poi('uncorr_bkguncrt')
+ assert model.config.poi_index == 1
+ assert model.config.poi_name == 'uncorr_bkguncrt'
+ model.config.set_poi(None)
+ assert model.config.poi_index is None
+ assert model.config.poi_name is None
| Can no longer set several model config attributes
### Summary
The changes in #1972 cause `model.config.poi_index` to not be writable anymore. `cabinetry` sets this value directly to support running `pyhf.infer.hypotest` with a configurable POI (without the need to re-build the model).
### OS / Environment
```console
n/a
```
### Steps to Reproduce
<!--- Paste your minimal failing Python example code between the quotes below -->
```python (paste below)
import pyhf
model = pyhf.simplemodels.uncorrelated_background(
signal=[24.0, 22.0], bkg=[50.0, 52.0], bkg_uncertainty=[3.0, 7.0]
)
model.config.poi_index = 1
```
### File Upload (optional)
_No response_
### Expected Results
assignment succeeds
### Actual Results
```console
Traceback (most recent call last):
File "test.py", line 7, in <module>
model.config.poi_index = ""
AttributeError: can't set attribute
```
### pyhf Version
```console
pyhf, version 0.7.0rc4.dev2
```
### Code of Conduct
- [X] I agree to follow the Code of Conduct
| This affects also `model.config.poi_name` and presumably the rest of the attributes that were decorated with `@property`: `par_order`, `auxdata`, `nmaindata`, `nauxdata`, as well as `model.config` itself. I could imagine people possibly wanting to override `auxdata` and potentially the full `model.config`, I have not had the need to do so myself.
Adding the following two setters restores the functionality `cabinetry` uses at the moment:
```diff
diff --git a/src/pyhf/pdf.py b/src/pyhf/pdf.py
index db59fa1c..969b26db 100644
--- a/src/pyhf/pdf.py
+++ b/src/pyhf/pdf.py
@@ -245,6 +245,13 @@ def poi_name(self):
"""
return self._poi_name
+ @poi_name.setter
+ def poi_name(self, value):
+ """
+ Set the name of the POI parameter in the model.
+ """
+ self._poi_name = value
+
@property
def poi_index(self):
"""
@@ -252,6 +259,13 @@ def poi_index(self):
"""
return self._poi_index
+ @poi_index.setter
+ def poi_index(self, value):
+ """
+ Set the index of the POI parameter in the model.
+ """
+ self._poi_index = value
+
@property
def auxdata(self):
"""
```
When this is fixed there should additionally be a test that is added to ensure that these can be set by the user.
This is not a bug. The API for setting the `poi_index` is done through `model.config.set_poi(name: str)`. https://scikit-hep.org/pyhf/_generated/pyhf.pdf._ModelConfig.html#pyhf.pdf._ModelConfig.set_poi
We never encouraged anyone to override these other properties you mentioned either. The whole point is to try and make `Model` less dynamically configurable because there was always confusion about how mutable it was meant to be.
Making this explicit by using `set_poi` is a good solution, but it currently does not support setting POIs back to "no POI" mode where `poi_name` / `poi_index` are `None`. This worked when setting the attribute directly so far.
```python
import pyhf
spec = {
"channels": [
{
"name": "SR",
"samples": [
{
"data": [50],
"modifiers": [
{"data": None, "name": "mu", "type": "normfactor"},
],
"name": "Signal",
},
],
}
],
"measurements": [
{
"config": {
"parameters": [],
"poi": "",
},
"name": "meas",
}
],
"observations": [{"data": [50], "name": "SR"}],
"version": "1.0.0",
}
ws = pyhf.Workspace(spec)
model = ws.model()
data = ws.data(model)
print(model.config.poi_name is None) # True
print(model.config.poi_index is None) # True
model.config.set_poi("mu") # temporarily set for e.g. hypotest
pyhf.infer.hypotest(1.0, data, model)
# neither of the following works, expects string for a parameter that is in the model
# model.config.set_poi(None)
# model.config.set_poi("")
``` | 2022-09-05T16:28:40 |
scikit-hep/pyhf | 1,993 | scikit-hep__pyhf-1993 | [
"1994"
] | 287bfae60714d293fafef0ea55e15e043b8890a5 | diff --git a/src/pyhf/infer/calculators.py b/src/pyhf/infer/calculators.py
--- a/src/pyhf/infer/calculators.py
+++ b/src/pyhf/infer/calculators.py
@@ -418,8 +418,9 @@ def _false_case():
teststat = (qmu - qmu_A) / (2 * self.sqrtqmuA_v)
return teststat
+ # Use '<=' rather than '<' to avoid Issue #1992
teststat = tensorlib.conditional(
- (sqrtqmu_v < self.sqrtqmuA_v), _true_case, _false_case
+ (sqrtqmu_v <= self.sqrtqmuA_v), _true_case, _false_case
)
return tensorlib.astensor(teststat)
| diff --git a/src/pyhf/infer/test_statistics.py b/src/pyhf/infer/test_statistics.py
--- a/src/pyhf/infer/test_statistics.py
+++ b/src/pyhf/infer/test_statistics.py
@@ -71,7 +71,7 @@ def qmu(mu, data, pdf, init_pars, par_bounds, fixed_params, return_fitted_pars=F
\begin{equation}
q_{\mu} = \left\{\begin{array}{ll}
- -2\ln\lambda\left(\mu\right), &\hat{\mu} < \mu,\\
+ -2\ln\lambda\left(\mu\right), &\hat{\mu} \leq \mu,\\
0, & \hat{\mu} > \mu
\end{array}\right.
\end{equation}
@@ -160,7 +160,7 @@ def qmu_tilde(
\begin{equation}
\tilde{q}_{\mu} = \left\{\begin{array}{ll}
- -2\ln\tilde{\lambda}\left(\mu\right), &\hat{\mu} < \mu,\\
+ -2\ln\tilde{\lambda}\left(\mu\right), &\hat{\mu} \leq \mu,\\
0, & \hat{\mu} > \mu
\end{array}\right.
\end{equation}
diff --git a/tests/test_infer.py b/tests/test_infer.py
--- a/tests/test_infer.py
+++ b/tests/test_infer.py
@@ -476,3 +476,37 @@ def test_fixed_poi(tmpdir, hypotest_args):
pdf.config.param_set('mu').suggested_fixed = [True]
with pytest.raises(pyhf.exceptions.InvalidModel):
pyhf.infer.hypotest(*hypotest_args)
+
+
+def test_teststat_nan_guard():
+ # Example from Issue #1992
+ model = pyhf.simplemodels.uncorrelated_background(
+ signal=[1.0], bkg=[1.0], bkg_uncertainty=[1.0]
+ )
+ observations = [2]
+ test_poi = 0.0
+ data = observations + model.config.auxdata
+ init_pars = model.config.suggested_init()
+ par_bounds = model.config.suggested_bounds()
+ fixed_params = model.config.suggested_fixed()
+
+ test_stat = pyhf.infer.test_statistics.qmu_tilde(
+ test_poi, data, model, init_pars, par_bounds, fixed_params
+ )
+ assert test_stat == pytest.approx(0.0)
+ asymptotic_calculator = pyhf.infer.calculators.AsymptoticCalculator(
+ data, model, test_stat="qtilde"
+ )
+ # ensure not nan
+ assert ~np.isnan(asymptotic_calculator.teststatistic(test_poi))
+ assert asymptotic_calculator.teststatistic(test_poi) == pytest.approx(0.0)
+
+ # Example from Issue #529
+ model = pyhf.simplemodels.uncorrelated_background([0.005], [28.0], [5.0])
+ test_poi = 1.0
+ data = [28.0] + model.config.auxdata
+
+ test_results = pyhf.infer.hypotest(
+ test_poi, data, model, test_stat="qtilde", return_expected=True
+ )
+ assert all(~np.isnan(result) for result in test_results)
| Fix documentation for qmu_tilde test statistic
> Looking [at the docs](https://scikit-hep.org/pyhf/_generated/pyhf.infer.test_statistics.qmu_tilde.html), it seems μ^>μ is the only time we can get q~μ=0 (whether asimov or not).
The docs don't actually match equation (16) in the paper, where the first line is $\hat{\mu} \leq \mu$.
_Originally posted by @masonproffitt in https://github.com/scikit-hep/pyhf/issues/1992#issuecomment-1241380149_
As it turns out, the docs don't match the code: https://github.com/scikit-hep/pyhf/blob/cbd68c829748fff11bf44adac2ad9e9b15068af2/src/pyhf/infer/test_statistics.py#L30-L32
| 2022-09-09T00:48:41 |
|
scikit-hep/pyhf | 2,017 | scikit-hep__pyhf-2017 | [
"2015"
] | f7e974f7c65524e5240c871154c00ba8e954f0bd | diff --git a/src/conftest.py b/src/conftest.py
new file mode 120000
--- /dev/null
+++ b/src/conftest.py
@@ -0,0 +1 @@
+../tests/conftest.py
\ No newline at end of file
| GitHub Actions only error: AttributeError("'NoneType' object has no attribute '_sorted_indices'")
### Summary
In PR #1274 there is a `AttributeError("'NoneType' object has no attribute '_sorted_indices'")` that is happening for Python 3.10 only for unclear reasons. This is blocking release `v0.7.0`. This is also not reproducible locally on my laptop.
### OS / Environment
```console
GitHub Actions on Python 3.10
```
### Steps to Reproduce
<!--- ...or if you have a failing CLI command paste it between the quotes below -->
```console (paste below)
$ pytest src/pyhf/tensor/jax_backend.py
```
### File Upload (optional)
_No response_
### Expected Results
For the CI to pass
### Actual Results
```pytb
=================================== FAILURES ===================================
____________ [doctest] pyhf.tensor.jax_backend.jax_backend.astensor ____________
207
208 Convert to a JAX ndarray.
209
210 Example:
211
212 >>> import pyhf
213 >>> pyhf.set_backend("jax")
UNEXPECTED EXCEPTION: AttributeError("'NoneType' object has no attribute '_sorted_indices'")
Traceback (most recent call last):
File "/Users/runner/hostedtoolcache/Python/3.10.7/x64/lib/python3.10/doctest.py", line 1350, in __run
exec(compile(example.source, filename, "single",
File "<doctest pyhf.tensor.jax_backend.jax_backend.astensor[1]>", line 1, in <module>
File "/Users/runner/work/pyhf/pyhf/src/pyhf/events.py", line 161, in register_wrapper
result = func(*args, **kwargs)
File "/Users/runner/work/pyhf/pyhf/src/pyhf/tensor/manager.py", line 192, in set_backend
events.trigger("tensorlib_changed")()
File "/Users/runner/work/pyhf/pyhf/src/pyhf/events.py", line 70, in __call__
func()(arg(), *args, **kwargs)
File "/Users/runner/work/pyhf/pyhf/src/pyhf/tensor/common.py", line 33, in _precompute
self.sorted_indices = tensorlib.astensor(self._sorted_indices, dtype='int')
AttributeError: 'NoneType' object has no attribute '_sorted_indices'
/Users/runner/work/pyhf/pyhf/src/pyhf/tensor/jax_backend.py:213: UnexpectedException
------------------------------ Captured log call -------------------------------
INFO absl:xla_bridge.py:174 Remote TPU is not linked into jax; skipping remote TPU.
INFO absl:xla_bridge.py:350 Unable to initialize backend 'tpu_driver': Could not initialize backend 'tpu_driver'
INFO absl:xla_bridge.py:350 Unable to initialize backend 'cuda': module 'jaxlib.xla_extension' has no attribute 'GpuAllocatorConfig'
INFO absl:xla_bridge.py:350 Unable to initialize backend 'rocm': module 'jaxlib.xla_extension' has no attribute 'GpuAllocatorConfig'
INFO absl:xla_bridge.py:350 Unable to initialize backend 'tpu': module 'jaxlib.xla_extension' has no attribute 'get_tpu_client'
```
### pyhf Version
```console
PR #1274
```
### Code of Conduct
- [X] I agree to follow the Code of Conduct
| @kratsg @lukasheinrich If I run our CI from scratch inside of a `python:3.10` Docker container I am unable to reproduce the error, which makes me think we're seeing a transient issue with GitHub Actions and not something real on our side.
~~Should we just go ahead and merge PR #1274 and hope for the best while opening up some GitHub Issues with the virtual environment team?~~ Though this isn't happening on `master` at f7e974f7c65524e5240c871154c00ba8e954f0bd, so there is something in the PR #1274 code that is causing this on the GitHub Action runners. :?
Expanding on https://github.com/scikit-hep/pyhf/pull/1274#discussion_r975974433, this is able to be reproduced locally if one comments out one of the docstrings of any of the 3 functions under the `pyhf.infer.intervals.upper_limits` API
```diff
> git diff
diff --git a/src/pyhf/infer/intervals/upper_limits.py b/src/pyhf/infer/intervals/upper_limits.py
index 5cc93c13..c609091b 100644
--- a/src/pyhf/infer/intervals/upper_limits.py
+++ b/src/pyhf/infer/intervals/upper_limits.py
@@ -33,22 +33,22 @@ def toms748_scan(
Parameter of Interest (POI) using an automatic scan through
POI-space, using the :func:`~scipy.optimize.toms748` algorithm.
- Example:
- >>> import numpy as np
- >>> import pyhf
- >>> pyhf.set_backend("numpy")
- >>> model = pyhf.simplemodels.uncorrelated_background(
- ... signal=[12.0, 11.0], bkg=[50.0, 52.0], bkg_uncertainty=[3.0, 7.0]
- ... )
- >>> observations = [51, 48]
- >>> data = pyhf.tensorlib.astensor(observations + model.config.auxdata)
- >>> obs_limit, exp_limits = pyhf.infer.intervals.upper_limits.toms748_scan(
- ... data, model, 0., 5., rtol=0.01
- ... )
- >>> obs_limit
- array(1.01156939)
- >>> exp_limits
- [array(0.5600747), array(0.75702605), array(1.06234693), array(1.50116923), array(2.05078912)]
+ # Example:
+ # >>> import numpy as np
+ # >>> import pyhf
+ # >>> pyhf.set_backend("numpy")
+ # >>> model = pyhf.simplemodels.uncorrelated_background(
+ # ... signal=[12.0, 11.0], bkg=[50.0, 52.0], bkg_uncertainty=[3.0, 7.0]
+ # ... )
+ # >>> observations = [51, 48]
+ # >>> data = pyhf.tensorlib.astensor(observations + model.config.auxdata)
+ # >>> obs_limit, exp_limits = pyhf.infer.intervals.upper_limits.toms748_scan(
+ # ... data, model, 0., 5., rtol=0.01
+ # ... )
+ # >>> obs_limit
+ # array(1.01156939)
+ # >>> exp_limits
+ # [array(0.5600747), array(0.75702605), array(1.06234693), array(1.50116923), array(2.05078912)]
Args:
data (:obj:`tensor`): The observed data.
@@ -231,7 +231,6 @@ def upper_limit(
array(1.01764089)
>>> exp_limits
[array(0.59576921), array(0.76169166), array(1.08504773), array(1.50170482), array(2.06654952)]
- >>> pyhf.set_backend("jax")
Args:
data (:obj:`tensor`): The observed data.
```
and then run
```conosle
$ pytest \
src/pyhf/infer/intervals/ \
src/pyhf/tensor/jax_backend.py
================================================================================= test session starts =================================================================================
platform linux -- Python 3.10.6, pytest-7.1.3, pluggy-1.0.0
benchmark: 3.4.1 (defaults: timer=time.perf_counter disable_gc=False min_rounds=5 min_time=0.000005 max_time=1.0 calibration_precision=10 warmup=False warmup_iterations=100000)
Matplotlib: 3.6.0
Freetype: 2.6.1
rootdir: /home/feickert/Code/GitHub/pyhf, configfile: pyproject.toml
plugins: socket-0.5.1, console-scripts-1.3.1, benchmark-3.4.1, cov-3.0.0, mock-3.8.2, mpl-0.16.1, requests-mock-1.10.0, anyio-3.6.1
collected 18 items
src/pyhf/infer/intervals/upper_limits.py .. [ 11%]
src/pyhf/tensor/jax_backend.py F............... [100%]
====================================================================================== FAILURES =======================================================================================
_______________________________________________________________ [doctest] pyhf.tensor.jax_backend.jax_backend.astensor ________________________________________________________________
207
208 Convert to a JAX ndarray.
209
210 Example:
211
212 >>> import pyhf
213 >>> pyhf.set_backend("jax")
UNEXPECTED EXCEPTION: AttributeError("'NoneType' object has no attribute '_sorted_indices'")
Traceback (most recent call last):
File "/home/feickert/.pyenv/versions/3.10.6/lib/python3.10/doctest.py", line 1350, in __run
exec(compile(example.source, filename, "single",
File "<doctest pyhf.tensor.jax_backend.jax_backend.astensor[1]>", line 1, in <module>
File "/home/feickert/Code/GitHub/pyhf/src/pyhf/events.py", line 161, in register_wrapper
result = func(*args, **kwargs)
File "/home/feickert/Code/GitHub/pyhf/src/pyhf/tensor/manager.py", line 192, in set_backend
events.trigger("tensorlib_changed")()
File "/home/feickert/Code/GitHub/pyhf/src/pyhf/events.py", line 70, in __call__
func()(arg(), *args, **kwargs)
File "/home/feickert/Code/GitHub/pyhf/src/pyhf/tensor/common.py", line 33, in _precompute
self.sorted_indices = tensorlib.astensor(self._sorted_indices, dtype='int')
AttributeError: 'NoneType' object has no attribute '_sorted_indices'
/home/feickert/Code/GitHub/pyhf/src/pyhf/tensor/jax_backend.py:213: UnexpectedException
---------------------------------------------------------------------------------- Captured log call ----------------------------------------------------------------------------------
INFO absl:xla_bridge.py:350 Unable to initialize backend 'tpu_driver': NOT_FOUND: Unable to find driver in registry given worker:
INFO absl:xla_bridge.py:350 Unable to initialize backend 'cuda': module 'jaxlib.xla_extension' has no attribute 'GpuAllocatorConfig'
INFO absl:xla_bridge.py:350 Unable to initialize backend 'rocm': module 'jaxlib.xla_extension' has no attribute 'GpuAllocatorConfig'
INFO absl:xla_bridge.py:350 Unable to initialize backend 'tpu': INVALID_ARGUMENT: TpuPlatform is not available.
WARNING absl:xla_bridge.py:357 No GPU/TPU found, falling back to CPU. (Set TF_CPP_MIN_LOG_LEVEL=0 and rerun for more info.)
```
I think this might mean we have a bug in [`pyhf.tensor.manager`](https://github.com/scikit-hep/pyhf/blob/f7e974f7c65524e5240c871154c00ba8e954f0bd/src/pyhf/tensor/manager.py) or in how
https://github.com/scikit-hep/pyhf/blob/f7e974f7c65524e5240c871154c00ba8e954f0bd/tests/conftest.py#L49-L56
is used by doctest. Not sure, so input welcome. | 2022-09-21T05:03:32 |
|
scikit-hep/pyhf | 2,027 | scikit-hep__pyhf-2027 | [
"1700"
] | cccbda16e56b34150b55489f2d172d492f879209 | diff --git a/src/pyhf/optimize/mixins.py b/src/pyhf/optimize/mixins.py
--- a/src/pyhf/optimize/mixins.py
+++ b/src/pyhf/optimize/mixins.py
@@ -181,7 +181,7 @@ def minimize(
# handle non-pyhf ModelConfigs
try:
- par_names = pdf.config.par_names()
+ par_names = pdf.config.par_names
except AttributeError:
par_names = None
diff --git a/src/pyhf/pdf.py b/src/pyhf/pdf.py
--- a/src/pyhf/pdf.py
+++ b/src/pyhf/pdf.py
@@ -358,6 +358,7 @@ def par_slice(self, name):
"""
return self.par_map[name]['slice']
+ @property
def par_names(self):
"""
The names of the parameters in the model including binned-parameter indexing.
@@ -370,8 +371,10 @@ def par_names(self):
>>> model = pyhf.simplemodels.uncorrelated_background(
... signal=[12.0, 11.0], bkg=[50.0, 52.0], bkg_uncertainty=[3.0, 7.0]
... )
- >>> model.config.par_names()
+ >>> model.config.par_names
['mu', 'uncorr_bkguncrt[0]', 'uncorr_bkguncrt[1]']
+
+ .. versionchanged:: 0.7.0 Changed from method to property attribute.
"""
_names = []
for name in self.par_order:
| diff --git a/tests/test_optim.py b/tests/test_optim.py
--- a/tests/test_optim.py
+++ b/tests/test_optim.py
@@ -1,3 +1,4 @@
+from unittest.mock import patch, PropertyMock
import pyhf
from pyhf.optimize.mixins import OptimizerMixin
from pyhf.optimize.common import _get_tensor_shim, _make_stitch_pars
@@ -576,7 +577,10 @@ def test_minuit_param_names(mocker):
assert 'minuit' in result
assert result.minuit.parameters == ('mu', 'uncorr_bkguncrt[0]')
- pdf.config.par_names = mocker.Mock(return_value=None)
- _, result = pyhf.infer.mle.fit(data, pdf, return_result_obj=True)
- assert 'minuit' in result
- assert result.minuit.parameters == ('x0', 'x1')
+ with patch(
+ "pyhf.pdf._ModelConfig.par_names", new_callable=PropertyMock
+ ) as mock_par_names:
+ mock_par_names.return_value = None
+ _, result = pyhf.infer.mle.fit(data, pdf, return_result_obj=True)
+ assert "minuit" in result
+ assert result.minuit.parameters == ("x0", "x1")
diff --git a/tests/test_pdf.py b/tests/test_pdf.py
--- a/tests/test_pdf.py
+++ b/tests/test_pdf.py
@@ -957,7 +957,7 @@ def test_par_names_scalar_nonscalar():
model = pyhf.Model(spec, poi_name="scalar")
assert model.config.par_order == ["scalar", "nonscalar"]
- assert model.config.par_names() == [
+ assert model.config.par_names == [
'scalar',
'nonscalar[0]',
]
@@ -1159,7 +1159,7 @@ def test_pdf_clipping(backend):
model = ws.model()
data = tensorlib.astensor([100.0, 100.0, 10.0, 0.0, 0.0])
- for par_name in model.config.par_names():
+ for par_name in model.config.par_names:
if "np" in par_name:
par_values.append(-0.6) # np_1 / np_2
else:
diff --git a/tests/test_simplemodels.py b/tests/test_simplemodels.py
--- a/tests/test_simplemodels.py
+++ b/tests/test_simplemodels.py
@@ -18,7 +18,7 @@ def test_correlated_background(backend):
assert model.config.channels == ["single_channel"]
assert model.config.samples == ["background", "signal"]
assert model.config.par_order == ["correlated_bkg_uncertainty", "mu"]
- assert model.config.par_names() == ['correlated_bkg_uncertainty', "mu"]
+ assert model.config.par_names == ["correlated_bkg_uncertainty", "mu"]
assert model.config.suggested_init() == [0.0, 1.0]
@@ -29,7 +29,7 @@ def test_uncorrelated_background(backend):
assert model.config.channels == ["singlechannel"]
assert model.config.samples == ["background", "signal"]
assert model.config.par_order == ["mu", "uncorr_bkguncrt"]
- assert model.config.par_names() == [
+ assert model.config.par_names == [
'mu',
'uncorr_bkguncrt[0]',
'uncorr_bkguncrt[1]',
@@ -52,7 +52,7 @@ def test_correlated_background_default_backend(default_backend):
assert model.config.channels == ["single_channel"]
assert model.config.samples == ["background", "signal"]
assert model.config.par_order == ["correlated_bkg_uncertainty", "mu"]
- assert model.config.par_names() == ['correlated_bkg_uncertainty', "mu"]
+ assert model.config.par_names == ["correlated_bkg_uncertainty", "mu"]
assert model.config.suggested_init() == [0.0, 1.0]
@@ -68,7 +68,7 @@ def test_uncorrelated_background_default_backend(default_backend):
assert model.config.channels == ["singlechannel"]
assert model.config.samples == ["background", "signal"]
assert model.config.par_order == ["mu", "uncorr_bkguncrt"]
- assert model.config.par_names() == [
+ assert model.config.par_names == [
'mu',
'uncorr_bkguncrt[0]',
'uncorr_bkguncrt[1]',
| Make `model.config.par_names()` a property?
### Summary
Currently [`model.config.par_names()`](https://github.com/scikit-hep/pyhf/blob/767ed59d06274f810e47944f1fe4c95d94968543/src/pyhf/pdf.py#L304-L329) takes no arguments and returns a list of parameter names. The behavior slightly differs from `model.config.channels` and `model.config.samples`, which are properties. For consistency, it may be useful to turn `par_names` into a property as well.
### Additional Information
`model.config.par_names()` used to take an argument to set formatting, which required it to be a function. Since that has been dropped in the meantime, this opens up the possibility of using `@property`.
I don't know whether both `par_names` and `par_names()` can be supported simultaneously, which would allow to not break existing workflows with `par_names()`, but it may be better to only have a single API regardless.
### Code of Conduct
- [X] I agree to follow the Code of Conduct
| Similarly to this, `suggested_init`, `suggested_bounds` and `suggested_fixed` currently take no arguments either. One other consideration is that maybe some of these parts of the API are meant to eventually support kwargs, in which case switching to a property would not be useful. | 2022-09-23T19:48:33 |
scikit-hep/pyhf | 2,031 | scikit-hep__pyhf-2031 | [
"2029"
] | 931dd36cf573c4dbb4853494da18f4df07910af1 | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -500,7 +500,9 @@ def setup(app):
r'https://doi\.org/10\.31526/.*',
# https://doi.org/10.1051/epjconf/x DOI URLs will periodically generate 500 Server Error
r'https://doi\.org/10\.1051/epjconf/.*',
- # tags for a release won't exist until it is made, but the release notes need to reference them
+ # tags for a release won't exist until it is made, but the release notes
+ # and ReadTheDocs need to reference them
r'https://github.com/scikit-hep/pyhf/releases/tag/.*',
+ r'https://pyhf.readthedocs.io/en/.*',
]
linkcheck_retries = 50
| Add `https://pyhf.readthedocs.io/en/v` to linkcheck_ignore
During release will get
```
( faq: line 13) broken https://pyhf.readthedocs.io/ - 404 Client Error: Not Found for url: https://pyhf.readthedocs.io/en/v0.7.0/
```
As that doesn't exist yet. So should add it to
https://github.com/scikit-hep/pyhf/blob/931dd36cf573c4dbb4853494da18f4df07910af1/docs/conf.py#L497-L505
like we did with `https://github.com/scikit-hep/pyhf/releases/tag/.*` in PR #1705.
| 2022-09-24T19:21:47 |
||
scikit-hep/pyhf | 2,068 | scikit-hep__pyhf-2068 | [
"2067"
] | f6d1997ebaf102214ff5b26a632ec760311ce55e | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -58,6 +58,7 @@
'sphinx-issues',
'sphinx-copybutton>=0.3.2',
'sphinx-togglebutton>=0.3.0',
+ 'ipython!=8.7.0', # c.f. https://github.com/scikit-hep/pyhf/pull/2068
]
)
)
| docs build failing on Pygments lexter warning
Hm. Something related to https://github.com/spatialaudio/nbsphinx/issues/24 is breaking the docs build. We're getting
```pytb
WARNING: Pygments lexer name 'ipython3' is not known
```
for all the notebooks during the docs build and we fail on warnings.
_Originally posted by @matthewfeickert in https://github.com/scikit-hep/pyhf/issues/2066#issuecomment-1329937208_
| Given that
```console
$ python -m pip show Pygments | grep Required-by
Required-by: ipython, jupyter-console, nbconvert, nbdime, qtconsole, readme-renderer, rich, Sphinx
```
and between the last working build of the docs on 2022-11-17 and now (2022-11-28) the difference in installed dependencies includes
```diff
$ diff --color -u /tmp/pass.txt /tmp/fail.txt
...
-ipykernel 6.17.1
-ipython 8.6.0
+ipykernel 6.18.1
+ipython 8.7.0
```
I'm going to assume there is something different with `ipython` and look there.
This shows up in https://github.com/ipython/ipython/issues/13845 as well, so I think that this is going to exist for a bit and that PR #2066 should get merged in while this all gets figured out.
It does seem that https://github.com/ipython/ipython/issues/13845#issuecomment-1329226997 is correct though about
> this is due to the Pygments entry points not being correctly installed with ~8.6.0~ 8.7.0 as
```console
$ python -m pip list | grep 'setuptools\|ipython'
ipython 8.7.0
ipython-genutils 0.2.0
setuptools 65.6.3
$ python -c 'from importlib.metadata import entry_points; print(entry_points(group="pygments.lexers"))'
[]
$ python -m pip install --upgrade 'ipython==8.6.0'
$ python -m pip list | grep 'setuptools\|ipython'
ipython 8.6.0
ipython-genutils 0.2.0
setuptools 65.6.3
$ python -c 'from importlib.metadata import entry_points; print(entry_points(group="pygments.lexers"))'
[EntryPoint(name='ipython', value='IPython.lib.lexers:IPythonLexer', group='pygments.lexers'), EntryPoint(name='ipython3', value='IPython.lib.lexers:IPython3Lexer', group='pygments.lexers'), EntryPoint(name='ipythonconsole', value='IPython.lib.lexers:IPythonConsoleLexer', group='pygments.lexers')]
```
so to get around this just disallow `ipython` `v8.7.0` for the time being
```
ipython!=8.7.0
``` | 2022-11-29T02:16:19 |
|
scikit-hep/pyhf | 2,079 | scikit-hep__pyhf-2079 | [
"2078"
] | d9a5da255d3c0b1f902bac3e0e155fbba2766232 | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -7,7 +7,7 @@
'tensorflow-probability>=0.11.0', # c.f. PR #1657
],
'torch': ['torch>=1.10.0'], # c.f. PR #1657
- 'jax': ['jax>=0.2.10', 'jaxlib>=0.1.61,!=0.1.68'], # c.f. PR #1962, Issue #1501
+ 'jax': ['jax>=0.4.1', 'jaxlib>=0.4.1'], # c.f. PR #2079
'xmlio': ['uproot>=4.1.1'], # c.f. PR #1567
'minuit': ['iminuit>=2.7.0'], # c.f. PR #1895
}
diff --git a/src/pyhf/tensor/jax_backend.py b/src/pyhf/tensor/jax_backend.py
--- a/src/pyhf/tensor/jax_backend.py
+++ b/src/pyhf/tensor/jax_backend.py
@@ -2,6 +2,7 @@
config.update('jax_enable_x64', True)
+from jax import Array
import jax.numpy as jnp
from jax.scipy.special import gammaln, xlogy
from jax.scipy import special
@@ -54,10 +55,10 @@ class jax_backend:
__slots__ = ['name', 'precision', 'dtypemap', 'default_do_grad']
#: The array type for jax
- array_type = jnp.DeviceArray
+ array_type = Array
#: The array content type for jax
- array_subtype = jnp.DeviceArray
+ array_subtype = Array
def __init__(self, **kwargs):
self.name = 'jax'
@@ -84,7 +85,7 @@ def clip(self, tensor_in, min_value, max_value):
>>> pyhf.set_backend("jax")
>>> a = pyhf.tensorlib.astensor([-2, -1, 0, 1, 2])
>>> pyhf.tensorlib.clip(a, -1, 1)
- DeviceArray([-1., -1., 0., 1., 1.], dtype=float64)
+ Array([-1., -1., 0., 1., 1.], dtype=float64)
Args:
tensor_in (:obj:`tensor`): The input tensor object
@@ -106,8 +107,7 @@ def erf(self, tensor_in):
>>> pyhf.set_backend("jax")
>>> a = pyhf.tensorlib.astensor([-2., -1., 0., 1., 2.])
>>> pyhf.tensorlib.erf(a)
- DeviceArray([-0.99532227, -0.84270079, 0. , 0.84270079,
- 0.99532227], dtype=float64)
+ Array([-0.99532227, -0.84270079, 0. , 0.84270079, 0.99532227], dtype=float64)
Args:
tensor_in (:obj:`tensor`): The input tensor object
@@ -127,7 +127,7 @@ def erfinv(self, tensor_in):
>>> pyhf.set_backend("jax")
>>> a = pyhf.tensorlib.astensor([-2., -1., 0., 1., 2.])
>>> pyhf.tensorlib.erfinv(pyhf.tensorlib.erf(a))
- DeviceArray([-2., -1., 0., 1., 2.], dtype=float64)
+ Array([-2., -1., 0., 1., 2.], dtype=float64)
Args:
tensor_in (:obj:`tensor`): The input tensor object
@@ -147,8 +147,8 @@ def tile(self, tensor_in, repeats):
>>> pyhf.set_backend("jax")
>>> a = pyhf.tensorlib.astensor([[1.0], [2.0]])
>>> pyhf.tensorlib.tile(a, (1, 2))
- DeviceArray([[1., 1.],
- [2., 2.]], dtype=float64)
+ Array([[1., 1.],
+ [2., 2.]], dtype=float64)
Args:
tensor_in (:obj:`tensor`): The tensor to be repeated
@@ -171,7 +171,7 @@ def conditional(self, predicate, true_callable, false_callable):
>>> a = tensorlib.astensor([4])
>>> b = tensorlib.astensor([5])
>>> tensorlib.conditional((a < b)[0], lambda: a + b, lambda: a - b)
- DeviceArray([9.], dtype=float64)
+ Array([9.], dtype=float64)
Args:
predicate (:obj:`scalar`): The logical condition that determines which callable to evaluate
@@ -213,16 +213,16 @@ def astensor(self, tensor_in, dtype="float"):
>>> pyhf.set_backend("jax")
>>> tensor = pyhf.tensorlib.astensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
>>> tensor
- DeviceArray([[1., 2., 3.],
- [4., 5., 6.]], dtype=float64)
+ Array([[1., 2., 3.],
+ [4., 5., 6.]], dtype=float64)
>>> type(tensor) # doctest:+ELLIPSIS
- <class '...DeviceArray'>
+ <class '...Array'>
Args:
tensor_in (Number or Tensor): Tensor object
Returns:
- `jaxlib.xla_extension.DeviceArray`: A multi-dimensional, fixed-size homogeneous array.
+ `jaxlib.xla_extension.Array`: A multi-dimensional, fixed-size homogeneous array.
"""
# TODO: Remove doctest:+ELLIPSIS when JAX API stabilized
try:
@@ -294,9 +294,9 @@ def percentile(self, tensor_in, q, axis=None, interpolation="linear"):
>>> pyhf.set_backend("jax")
>>> a = pyhf.tensorlib.astensor([[10, 7, 4], [3, 2, 1]])
>>> pyhf.tensorlib.percentile(a, 50)
- DeviceArray(3.5, dtype=float64)
+ Array(3.5, dtype=float64)
>>> pyhf.tensorlib.percentile(a, 50, axis=1)
- DeviceArray([7., 2.], dtype=float64)
+ Array([7., 2.], dtype=float64)
Args:
tensor_in (`tensor`): The tensor containing the data
@@ -355,7 +355,7 @@ def simple_broadcast(self, *args):
... pyhf.tensorlib.astensor([1]),
... pyhf.tensorlib.astensor([2, 3, 4]),
... pyhf.tensorlib.astensor([5, 6, 7]))
- [DeviceArray([1., 1., 1.], dtype=float64), DeviceArray([2., 3., 4.], dtype=float64), DeviceArray([5., 6., 7.], dtype=float64)]
+ [Array([1., 1., 1.], dtype=float64), Array([2., 3., 4.], dtype=float64), Array([5., 6., 7.], dtype=float64)]
Args:
args (Array of Tensors): Sequence of arrays
@@ -381,13 +381,13 @@ def ravel(self, tensor):
>>> pyhf.set_backend("jax")
>>> tensor = pyhf.tensorlib.astensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
>>> pyhf.tensorlib.ravel(tensor)
- DeviceArray([1., 2., 3., 4., 5., 6.], dtype=float64)
+ Array([1., 2., 3., 4., 5., 6.], dtype=float64)
Args:
tensor (Tensor): Tensor object
Returns:
- `jaxlib.xla_extension.DeviceArray`: A flattened array.
+ `jaxlib.xla_extension.Array`: A flattened array.
"""
return jnp.ravel(tensor)
@@ -441,11 +441,11 @@ def poisson(self, n, lam):
>>> import pyhf
>>> pyhf.set_backend("jax")
>>> pyhf.tensorlib.poisson(5., 6.)
- DeviceArray(0.16062314, dtype=float64, weak_type=True)
+ Array(0.16062314, dtype=float64, weak_type=True)
>>> values = pyhf.tensorlib.astensor([5., 9.])
>>> rates = pyhf.tensorlib.astensor([6., 8.])
>>> pyhf.tensorlib.poisson(values, rates)
- DeviceArray([0.16062314, 0.12407692], dtype=float64)
+ Array([0.16062314, 0.12407692], dtype=float64)
Args:
n (:obj:`tensor` or :obj:`float`): The value at which to evaluate the approximation to the Poisson distribution p.m.f.
@@ -484,12 +484,12 @@ def normal(self, x, mu, sigma):
>>> import pyhf
>>> pyhf.set_backend("jax")
>>> pyhf.tensorlib.normal(0.5, 0., 1.)
- DeviceArray(0.35206533, dtype=float64, weak_type=True)
+ Array(0.35206533, dtype=float64, weak_type=True)
>>> values = pyhf.tensorlib.astensor([0.5, 2.0])
>>> means = pyhf.tensorlib.astensor([0., 2.3])
>>> sigmas = pyhf.tensorlib.astensor([1., 0.8])
>>> pyhf.tensorlib.normal(values, means, sigmas)
- DeviceArray([0.35206533, 0.46481887], dtype=float64)
+ Array([0.35206533, 0.46481887], dtype=float64)
Args:
x (:obj:`tensor` or :obj:`float`): The value at which to evaluate the Normal distribution p.d.f.
@@ -510,10 +510,10 @@ def normal_cdf(self, x, mu=0, sigma=1):
>>> import pyhf
>>> pyhf.set_backend("jax")
>>> pyhf.tensorlib.normal_cdf(0.8)
- DeviceArray(0.7881446, dtype=float64)
+ Array(0.7881446, dtype=float64)
>>> values = pyhf.tensorlib.astensor([0.8, 2.0])
>>> pyhf.tensorlib.normal_cdf(values)
- DeviceArray([0.7881446 , 0.97724987], dtype=float64)
+ Array([0.7881446 , 0.97724987], dtype=float64)
Args:
x (:obj:`tensor` or :obj:`float`): The observed value of the random variable to evaluate the CDF for
@@ -536,7 +536,7 @@ def poisson_dist(self, rate):
>>> values = pyhf.tensorlib.astensor([4, 9])
>>> poissons = pyhf.tensorlib.poisson_dist(rates)
>>> poissons.log_prob(values)
- DeviceArray([-1.74030218, -2.0868536 ], dtype=float64)
+ Array([-1.74030218, -2.0868536 ], dtype=float64)
Args:
rate (:obj:`tensor` or :obj:`float`): The mean of the Poisson distribution (the expected number of events)
@@ -558,7 +558,7 @@ def normal_dist(self, mu, sigma):
>>> values = pyhf.tensorlib.astensor([4, 9])
>>> normals = pyhf.tensorlib.normal_dist(means, stds)
>>> normals.log_prob(values)
- DeviceArray([-1.41893853, -2.22579135], dtype=float64)
+ Array([-1.41893853, -2.22579135], dtype=float64)
Args:
mu (:obj:`tensor` or :obj:`float`): The mean of the Normal distribution
@@ -579,8 +579,8 @@ def to_numpy(self, tensor_in):
>>> pyhf.set_backend("jax")
>>> tensor = pyhf.tensorlib.astensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
>>> tensor
- DeviceArray([[1., 2., 3.],
- [4., 5., 6.]], dtype=float64)
+ Array([[1., 2., 3.],
+ [4., 5., 6.]], dtype=float64)
>>> numpy_ndarray = pyhf.tensorlib.to_numpy(tensor)
>>> numpy_ndarray
array([[1., 2., 3.],
@@ -606,12 +606,12 @@ def transpose(self, tensor_in):
>>> pyhf.set_backend("jax")
>>> tensor = pyhf.tensorlib.astensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
>>> tensor
- DeviceArray([[1., 2., 3.],
- [4., 5., 6.]], dtype=float64)
+ Array([[1., 2., 3.],
+ [4., 5., 6.]], dtype=float64)
>>> pyhf.tensorlib.transpose(tensor)
- DeviceArray([[1., 4.],
- [2., 5.],
- [3., 6.]], dtype=float64)
+ Array([[1., 4.],
+ [2., 5.],
+ [3., 6.]], dtype=float64)
Args:
tensor_in (:obj:`tensor`): The input tensor object.
| diff --git a/tests/constraints.txt b/tests/constraints.txt
--- a/tests/constraints.txt
+++ b/tests/constraints.txt
@@ -1,5 +1,5 @@
# core
-scipy==1.3.2 # c.f. PR #2044
+scipy==1.5.0 # c.f. PR #2079
click==8.0.0 # c.f. PR #1958, #1909
tqdm==4.56.0
jsonschema==4.15.0 # c.f. PR #1979
@@ -19,5 +19,5 @@ torch==1.10.0
# Use Google Cloud Storage buckets for long term wheel support
# c.f. https://github.com/google/jax/discussions/7608#discussioncomment-1269342
--find-links https://storage.googleapis.com/jax-releases/jax_releases.html
-jax==0.2.10
-jaxlib==0.1.61 # c.f. PR #1962
+jax==0.4.1 # c.f. PR #2079
+jaxlib==0.4.1 # c.f. PR #2079
| jax v0.4.1's jax.Array breaks schema
In [JAX v0.4.1](https://github.com/google/jax/releases/tag/jax-v0.4.1)
> We introduce `jax.Array` which is a unified array type that subsumes `DeviceArray`, `ShardedDeviceArray`, and `GlobalDeviceArray` types in JAX. The `jax.Array` type helps make parallelism a core feature of JAX, simplifies and unifies JAX internals, and allows us to unify `jit` and `pjit`. `jax.Array` has been enabled by default in JAX 0.4 and makes some breaking change to the `pjit` API. The [`jax.Array` migration guide](https://jax.readthedocs.io/en/latest/jax_array_migration.html) can help you migrate your codebase to `jax.Array`. You can also look at the [Distributed arrays and automatic parallelization](https://jax.readthedocs.io/en/latest/notebooks/Distributed_arrays_and_automatic_parallelization.html) tutorial to understand the new concepts.
This causes [`test_schema_tensor_type_allowed`](https://github.com/scikit-hep/pyhf/blob/d9a5da255d3c0b1f902bac3e0e155fbba2766232/tests/test_schema.py#L649) to fail
```console
$ pytest tests/test_schema.py -k 'test_schema_tensor_type_allowed[jax]'
...
> raise pyhf.exceptions.InvalidSpecification(err, schema_name)
E pyhf.exceptions.InvalidSpecification: Array([10.], dtype=float64) is not of type 'array'.
E Path: channels[0].samples[0].data
E Instance: [10.] Schema: model.json
```
As this is failing on schema tests this might require a patch release.
| 2022-12-14T23:15:19 |
|
scikit-hep/pyhf | 2,135 | scikit-hep__pyhf-2135 | [
"2132"
] | 48e7f28ebdbdb2efb2fb4caaefe3e0ae603394a5 | diff --git a/docs/generate_jupyterlite_iframe.py b/docs/generate_jupyterlite_iframe.py
--- a/docs/generate_jupyterlite_iframe.py
+++ b/docs/generate_jupyterlite_iframe.py
@@ -4,7 +4,7 @@
def main():
code = """\
import piplite
-await piplite.install(["pyhf==0.7.0"])
+await piplite.install(["pyhf==0.7.0", "matplotlib>=3.0.0"])
%matplotlib inline
import pyhf\
"""
| Matplotlib broken in Pyodide demo in docs
In the docs https://pyhf.readthedocs.io/en/v0.7.0/, the Pyodide example is broken for me:
```pytb
---------------------------------------------------------------------------
ModuleNotFoundError Traceback (most recent call last)
Cell In[1], line 3
1 import piplite
2 await piplite.install(["pyhf==0.7.0"])
----> 3 get_ipython().run_line_magic('matplotlib', 'inline')
4 import pyhf
File /lib/python3.10/site-packages/IPython/core/interactiveshell.py:2369, in InteractiveShell.run_line_magic(self, magic_name, line, _stack_depth)
2367 kwargs['local_ns'] = self.get_local_scope(stack_depth)
2368 with self.builtin_trap:
-> 2369 result = fn(*args, **kwargs)
2371 # The code below prevents the output from being displayed
2372 # when using magics with decodator @output_can_be_silenced
2373 # when the last Python token in the expression is a ';'.
2374 if getattr(fn, magic.MAGIC_OUTPUT_CAN_BE_SILENCED, False):
File /lib/python3.10/site-packages/IPython/core/magics/pylab.py:99, in PylabMagics.matplotlib(self, line)
97 print("Available matplotlib backends: %s" % backends_list)
98 else:
---> 99 gui, backend = self.shell.enable_matplotlib(args.gui.lower() if isinstance(args.gui, str) else args.gui)
100 self._show_matplotlib_backend(args.gui, backend)
File /lib/python3.10/site-packages/IPython/core/interactiveshell.py:3540, in InteractiveShell.enable_matplotlib(self, gui)
3519 def enable_matplotlib(self, gui=None):
3520 """Enable interactive matplotlib and inline figure support.
3521
3522 This takes the following steps:
(...)
3538 display figures inline.
3539 """
-> 3540 from matplotlib_inline.backend_inline import configure_inline_support
3542 from IPython.core import pylabtools as pt
3543 gui, backend = pt.find_gui_and_backend(gui, self.pylab_gui_select)
File /lib/python3.10/site-packages/matplotlib_inline/__init__.py:1
----> 1 from . import backend_inline, config # noqa
2 __version__ = "0.1.6" # noqa
File /lib/python3.10/site-packages/matplotlib_inline/backend_inline.py:6
1 """A matplotlib backend for publishing figures via display_data"""
3 # Copyright (c) IPython Development Team.
4 # Distributed under the terms of the BSD 3-Clause License.
----> 6 import matplotlib
7 from matplotlib import colors
8 from matplotlib.backends import backend_agg
ModuleNotFoundError: The module 'matplotlib' is included in the Pyodide distribution, but it is not installed.
You can install it by calling:
await micropip.install("matplotlib") in Python, or
await pyodide.loadPackage("matplotlib") in JavaScript
See https://pyodide.org/en/stable/usage/loading-packages.html for more details.
```
It used to work previously, though I can not say for sure when it last worked for me. Running on MacOS (ARM), tried Firefox and Chrome (resulting in the above), while Safari seems stuck executing the import commands provided.
| Ah that's unfortunate. Thanks for reporting @alexander-held. It is similarly broken for me. :(
I'll have to dig into this over the weekend.
cc @henryiii given his involvement in Pyodide in general to see if he has any prior knowledge on what happened here.
When did it break? Guessing it was a Jupyterlite update? That was a little bit ago, though (something like https://jupyterlite.readthedocs.io/en/0.1.0-beta/changelog.html#b17) Also, It think a solution might be to include matplotlib in the list of things you install.
Might have been an update to the demo site, which I think you are using? https://github.com/jupyterlite/jupyterlite/commit/ef71996020ceb25cde6829f099740f50edfc297a was recent.
FYI, I'd probably recommend building Jupyterlite yourself, rather than using a demo site that is not stable. Something like https://github.com/henryiii/se-for-sci/blob/b97673d82361f1be50c1519efac2a1de50e1e157/noxfile.py#L8-L15, for example.
> It think a solution might be to include matplotlib in the list of things you install.
Yeah, that definitely works. | 2023-03-13T06:39:10 |
|
scikit-hep/pyhf | 2,197 | scikit-hep__pyhf-2197 | [
"2200"
] | 22c1699f044f798f2d0fe904d336f3ce9ee50b92 | diff --git a/src/pyhf/pdf.py b/src/pyhf/pdf.py
--- a/src/pyhf/pdf.py
+++ b/src/pyhf/pdf.py
@@ -464,10 +464,13 @@ def set_poi(self, name):
raise exceptions.InvalidModel(
f"The parameter of interest '{name:s}' cannot be fit as it is not declared in the model specification."
)
- s = self.par_slice(name)
- assert s.stop - s.start == 1
+ if self.param_set(name).n_parameters > 1:
+ # multi-parameter modifiers are not supported as POIs
+ raise exceptions.InvalidModel(
+ f"The parameter '{name:s}' contains multiple components and is not currently supported as parameter of interest."
+ )
self._poi_name = name
- self._poi_index = s.start
+ self._poi_index = self.par_slice(name).start
def _create_and_register_paramsets(self, required_paramsets):
next_index = 0
| diff --git a/tests/test_pdf.py b/tests/test_pdf.py
--- a/tests/test_pdf.py
+++ b/tests/test_pdf.py
@@ -1329,3 +1329,33 @@ def test_is_shared_paramset_shapesys_same_sample_same_channel():
with pytest.raises(pyhf.exceptions.InvalidModel):
pyhf.Workspace(spec).model()
+
+
+def test_multi_component_poi():
+ spec = {
+ "channels": [
+ {
+ "name": "SR",
+ "samples": [
+ {
+ "data": [5.0, 10.0],
+ "modifiers": [
+ {"data": None, "name": "mu", "type": "shapefactor"}
+ ],
+ "name": "Signal",
+ }
+ ],
+ }
+ ],
+ "measurements": [
+ {"config": {"parameters": [], "poi": "mu"}, "name": "example"}
+ ],
+ "observations": [{"data": [5.0, 10.0], "name": "SR"}],
+ "version": "1.0.0",
+ }
+
+ with pytest.raises(
+ pyhf.exceptions.InvalidModel,
+ match="The parameter 'mu' contains multiple components and is not currently supported as parameter of interest.",
+ ):
+ pyhf.Workspace(spec).model()
| `shapefactor` as parameter of interest
### Discussed in https://github.com/scikit-hep/pyhf/discussions/2194
<div type='discussions-op-text'>
<sup>Originally posted by **TommyDESY** May 4, 2023</sup>
Hi all,
I'm currently setting up a fit with 5 templates/samples. I included as a PoI a `normfactor` (Unconstrained Normalisation) which didn't cause any particular issue. I managed to create my pyhf model and managed to plot pulls, errors and fit with Asimov data.
However, what I actually want to do is use a `shapefactor` (Data-driven Shape). I use exactly the same code as previously but here an error arises. After building my pyhf spec I add the `shapefactor` as a modifier:
`sample['modifiers'].append({'name': 'mu', 'type': 'shapefactor', 'data': None})`
When I build my model with `model = pyhf.Model(pyhf_spec)`, I get an AssertionError from pyhf/pdf.py l. 468:
` 467 s = self.par_slice(name)`
`--> 468 assert s.stop - s.start == 1`
With a `normfactor`, this slice is indeed 1, but with `shapefactor` this slice has the same size as the number of bins in the fit (17 bins in lepton energy in my specific case).
I tried to remove this line from pdf.py but then this causes other errors when running the MLE fit. Therefore, it seems that this requirement is important.
What am I missing exactly here ? Is there anything specific that needs to be added to the pyhf spec when considering data-driven shapes ? Or is this due to some know issue ?
Thank you for your help.</div>
| 2023-05-11T15:12:00 |
|
scikit-hep/pyhf | 2,208 | scikit-hep__pyhf-2208 | [
"2207"
] | 22c1699f044f798f2d0fe904d336f3ce9ee50b92 | diff --git a/src/pyhf/tensor/numpy_backend.py b/src/pyhf/tensor/numpy_backend.py
--- a/src/pyhf/tensor/numpy_backend.py
+++ b/src/pyhf/tensor/numpy_backend.py
@@ -2,10 +2,16 @@
from __future__ import annotations
import logging
-from typing import Callable, Generic, Mapping, Sequence, TypeVar, Union
+from typing import TYPE_CHECKING, Callable, Generic, Mapping, Sequence, TypeVar, Union
import numpy as np
-from numpy.typing import ArrayLike, DTypeLike, NBitBase, NDArray
+
+# Needed while numpy lower bound is older than v1.21.0
+if TYPE_CHECKING:
+ from numpy.typing import ArrayLike, DTypeLike, NBitBase, NDArray
+else:
+ NBitBase = "NBitBase"
+
from scipy import special
from scipy.special import gammaln, xlogy
from scipy.stats import norm, poisson
| Add stricter version guards for NumPy
### Summary
@kskovpen noted on the IRIS-HEP Slack that they were having issues with older NumPy versions and `pyhf` `v0.7.1`:
```pytb
File "<stdin>", line 1, in <module>
File "/user/kskovpen/.local/lib/python3.8/site-packages/pyhf/__init__.py", line 3, in <module>
from pyhf.tensor.manager import get_backend
File "/user/kskovpen/.local/lib/python3.8/site-packages/pyhf/tensor/manager.py", line 49, in <module>
_default_backend: TensorBackend = BackendRetriever.numpy_backend()
File "/user/kskovpen/.local/lib/python3.8/site-packages/pyhf/tensor/__init__.py", line 20, in __getattr__
from pyhf.tensor.numpy_backend import numpy_backend
File "/user/kskovpen/.local/lib/python3.8/site-packages/pyhf/tensor/numpy_backend.py", line 8, in <module>
from numpy.typing import ArrayLike, DTypeLike, NBitBase, NDArray
ModuleNotFoundError: No module named 'numpy.typing'
```
@agoose77 correctly noted that `numpy.typing` wasn't introduced until `numpy` `v1.21.0` which is the version that `pyhf` tests against for our lower bounds
https://github.com/scikit-hep/pyhf/blob/22c1699f044f798f2d0fe904d336f3ce9ee50b92/tests/constraints.txt#L9
but that we don't explicitly enforce a lower bound as we defer to `scipy` to do that.
https://github.com/scikit-hep/pyhf/blob/22c1699f044f798f2d0fe904d336f3ce9ee50b92/pyproject.toml#L51-L53
Though the lowest constraints possible on `numpy` come from the lower bound on `scipy` which for `scipy` `v1.5.1` is `numpy>=1.14.5`:
```
$ docker run --rm -ti python:3.8 /bin/bash
root@2cc06d3a1b63:/# python -m venv venv && . venv/bin/activate
(venv) root@2cc06d3a1b63:/# python -m pip --quiet install --upgrade pip setuptools wheel
(venv) root@2cc06d3a1b63:/# python -m pip install 'scipy==1.5.1'
Collecting scipy==1.5.1
Downloading scipy-1.5.1-cp38-cp38-manylinux1_x86_64.whl (25.8 MB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 25.8/25.8 MB 10.9 MB/s eta 0:00:00
Collecting numpy>=1.14.5 (from scipy==1.5.1)
Downloading numpy-1.24.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (17.3 MB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 17.3/17.3 MB 11.2 MB/s eta 0:00:00
Installing collected packages: numpy, scipy
Successfully installed numpy-1.24.3 scipy-1.5.1
(venv) root@2cc06d3a1b63:/#
```
though the oldest Python 3.8 `numpy` with wheels is `v1.17.3` and indeed this problem becomes reproducible then
```
...
(venv) root@2cc06d3a1b63:/# python -m pip --quiet install --upgrade 'numpy==1.17.3' 'scipy==1.5.1'
(venv) root@2cc06d3a1b63:/# python -c 'import pyhf'
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/venv/lib/python3.8/site-packages/pyhf/__init__.py", line 3, in <module>
from pyhf.tensor.manager import get_backend
File "/venv/lib/python3.8/site-packages/pyhf/tensor/manager.py", line 49, in <module>
_default_backend: TensorBackend = BackendRetriever.numpy_backend()
File "/venv/lib/python3.8/site-packages/pyhf/tensor/__init__.py", line 20, in __getattr__
from pyhf.tensor.numpy_backend import numpy_backend
File "/venv/lib/python3.8/site-packages/pyhf/tensor/numpy_backend.py", line 8, in <module>
from numpy.typing import ArrayLike, DTypeLike, NBitBase, NDArray
ModuleNotFoundError: No module named 'numpy.typing'
(venv) root@2cc06d3a1b63:/#
```
and stays that way until `numpy` `v1.21.0` (which again is where `numpy.typing` got added) is used
```
...
(venv) root@2cc06d3a1b63:/# python -m pip --quiet install --upgrade 'numpy<=1.21.0' 'scipy==1.5.1'
(venv) root@2cc06d3a1b63:/# python -c 'import pyhf'
(venv) root@2cc06d3a1b63:/#
```
So this
https://github.com/scikit-hep/pyhf/blob/22c1699f044f798f2d0fe904d336f3ce9ee50b92/src/pyhf/tensor/numpy_backend.py#L8
should be guarded against more.
@agoose77 has suggested
> that import should probably be behind a `if TYPE_CHECKING` guard, or bump the minimum NumPy version.
### OS / Environment
```console
# cat /etc/os-release
PRETTY_NAME="Debian GNU/Linux 11 (bullseye)"
NAME="Debian GNU/Linux"
VERSION_ID="11"
VERSION="11 (bullseye)"
VERSION_CODENAME=bullseye
ID=debian
HOME_URL="https://www.debian.org/"
SUPPORT_URL="https://www.debian.org/support"
BUG_REPORT_URL="https://bugs.debian.org/"
```
### Steps to Reproduce
<!--- ...or if you have a failing CLI command paste it between the quotes below -->
```console (paste below)
$ docker run --rm -ti python:3.8 /bin/bash
root@2cc06d3a1b63:/# python -m venv venv && . venv/bin/activate
(venv) root@2cc06d3a1b63:/# python -m pip --quiet install --upgrade pip setuptools wheel
(venv) root@2cc06d3a1b63:/# python -m pip --quiet install --upgrade 'numpy==1.17.3' 'scipy==1.5.1'
(venv) root@2cc06d3a1b63:/# python -c 'import pyhf'
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/venv/lib/python3.8/site-packages/pyhf/__init__.py", line 3, in <module>
from pyhf.tensor.manager import get_backend
File "/venv/lib/python3.8/site-packages/pyhf/tensor/manager.py", line 49, in <module>
_default_backend: TensorBackend = BackendRetriever.numpy_backend()
File "/venv/lib/python3.8/site-packages/pyhf/tensor/__init__.py", line 20, in __getattr__
from pyhf.tensor.numpy_backend import numpy_backend
File "/venv/lib/python3.8/site-packages/pyhf/tensor/numpy_backend.py", line 8, in <module>
from numpy.typing import ArrayLike, DTypeLike, NBitBase, NDArray
ModuleNotFoundError: No module named 'numpy.typing'
(venv) root@2cc06d3a1b63:/#
```
### File Upload (optional)
_No response_
### Expected Results
For `pyhf` to properly enforce lower bounds.
### Actual Results
`pyhf` allows for installation of `numpy` versions before `numpy.typing` was introduced.
### pyhf Version
```console
pyhf, version 0.7.1
```
### Code of Conduct
- [X] I agree to follow the Code of Conduct
| > @agoose77 has suggested
>
> > that import should probably be behind a `if TYPE_CHECKING` guard, or bump the minimum NumPy version.
I think it is going to have to be adding a lower bound to NumPy as if the lines
```python
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from numpy.typing import ArrayLike, DTypeLike, NBitBase, NDArray
```
are added to `pyhf/tensor/numpy_backend.py` then things will still fail with
```pytb
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/venv/lib/python3.8/site-packages/pyhf/__init__.py", line 3, in <module>
from pyhf.tensor.manager import get_backend
File "/venv/lib/python3.8/site-packages/pyhf/tensor/manager.py", line 49, in <module>
_default_backend: TensorBackend = BackendRetriever.numpy_backend()
File "/venv/lib/python3.8/site-packages/pyhf/tensor/__init__.py", line 20, in __getattr__
from pyhf.tensor.numpy_backend import numpy_backend
File "/venv/lib/python3.8/site-packages/pyhf/tensor/numpy_backend.py", line 17, in <module>
T = TypeVar("T", bound=NBitBase)
NameError: name 'NBitBase' is not defined
```
given
https://github.com/scikit-hep/pyhf/blob/22c1699f044f798f2d0fe904d336f3ce9ee50b92/src/pyhf/tensor/numpy_backend.py#L15
This isn't great (but also not terrible) in the grand scheme of giving control to `scipy` as [`scipy` `v1.5.1`](https://pypi.org/project/scipy/1.5.1/) was released 2020-07-04 (happy [Higgspendence day](https://twitter.com/search?q=Higgspendence%20day&src=typed_query&f=live) :tada:) and [`numpy` `v1.21.0`](https://pypi.org/project/numpy/1.21.0/) was released on 2021-06-22. So at this point in 2023-05 decently old, but at the same time a year after the oldest supported SciPy version. This gets worse if we were to additionally backport this as a fix to a `v0.7.x` release as
https://github.com/scikit-hep/pyhf/blob/6e5feefcd802863b1dbf5517380f11afcafd3ab5/pyproject.toml#L52-L55
and `scipy` `v1.2.0` was released in 2018.
You can make your TypeVar use strings (`bound="NBitBase"`) to fix this particular problem!
> You can make your TypeVar use strings (`bound="NBitBase"`) to fix this particular problem!
Ah, yes, right again! :smile: I really need to get better at typing :sweat_smile: | 2023-05-17T06:43:27 |
|
scikit-hep/pyhf | 2,220 | scikit-hep__pyhf-2220 | [
"2191"
] | 2219abf4f2f3e453d23c38bdbdceb42b6aef87b5 | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -55,6 +55,7 @@ def setup(app):
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.intersphinx',
+ 'sphinx_rtd_theme',
'sphinxcontrib.bibtex',
'sphinx.ext.napoleon',
'sphinx_click.ext',
| Menu on mobile page not accessible for pyhf v0.7.1 docs
### Summary
On the [`pyhf` `v0.7.1` docs](https://pyhf.readthedocs.io/en/v0.7.1/) and on the `main` docs build the drop down menu (circled in screen shot bellow) fails to open when clicked on.

Things work fine on desktop and confusingly @alexander-held has pointed out that the [`v0.5.2` `cabinetry` docs](https://cabinetry.readthedocs.io/en/stable/) (where were [released](https://github.com/scikit-hep/cabinetry/releases/tag/v0.5.2) very close in time to the `pyhf` `v0.7.1` docs) have a menu that works fine on mobile.
### Documentation Page Link
https://pyhf.readthedocs.io/en/v0.7.1/
### Code of Conduct
- [X] I agree to follow the Code of Conduct
| 2023-05-22T23:16:43 |
||
scikit-hep/pyhf | 2,242 | scikit-hep__pyhf-2242 | [
"2241"
] | 951912f12a9624b5bfb7a3dd46c68fd78ef9b61b | diff --git a/src/pyhf/tensor/numpy_backend.py b/src/pyhf/tensor/numpy_backend.py
--- a/src/pyhf/tensor/numpy_backend.py
+++ b/src/pyhf/tensor/numpy_backend.py
@@ -254,7 +254,7 @@ def sum(self, tensor_in: Tensor[T], axis: int | None = None) -> ArrayLike:
return np.sum(tensor_in, axis=axis)
def product(self, tensor_in: Tensor[T], axis: Shape | None = None) -> ArrayLike:
- return np.product(tensor_in, axis=axis) # type: ignore[arg-type]
+ return np.prod(tensor_in, axis=axis) # type: ignore[arg-type]
def abs(self, tensor: Tensor[T]) -> ArrayLike:
return np.abs(tensor)
| diff --git a/docs/examples/notebooks/pytorch_tests_onoff.ipynb b/docs/examples/notebooks/pytorch_tests_onoff.ipynb
--- a/docs/examples/notebooks/pytorch_tests_onoff.ipynb
+++ b/docs/examples/notebooks/pytorch_tests_onoff.ipynb
@@ -35,7 +35,7 @@
" self.auxdata.append(bkg_over_bsq)\n",
"\n",
" def alphas(self, pars):\n",
- " return np.product([pars, self.bkg_over_db_squared], axis=0)\n",
+ " return np.prod([pars, self.bkg_over_db_squared], axis=0)\n",
"\n",
" def logpdf(self, a, alpha):\n",
" return _log_poisson_impl(a, alpha)\n",
| Upcoming deprecation of np.product
### Summary
Both `paramview` and `pdf` cause deprecation warnings due to the use of `np.product` instead of `np.prod` when using `numpy` 1.25 or above. This is not flagged in CI, which uses an older version of `numpy`, presumably due to other dependencies capping it still.
### OS / Environment
```console
n/a
```
### Steps to Reproduce
run e.g. `pytest tests/test_pdf.py` in an environment with `numpy` 1.25
### File Upload (optional)
_No response_
### Expected Results
no deprecation warnings
### Actual Results
```console
[...]/pyhf/src/pyhf/parameters/paramview.py:65: DeprecationWarning: `product` is deprecated as of NumPy 1.25.0, and will be removed in NumPy 2.0. Please use `prod` instead.
fullsize = default_backend.product(default_backend.astensor(shape))
[...]/pyhf/src/pyhf/pdf.py:709: DeprecationWarning: `product` is deprecated as of NumPy 1.25.0, and will be removed in NumPy 2.0. Please use `prod` instead.
newbysample = tensorlib.product(allfac, axis=0)
```
### pyhf Version
```console
HEAD
```
### Code of Conduct
- [X] I agree to follow the Code of Conduct
| 2023-07-04T15:20:18 |
|
scikit-hep/pyhf | 2,278 | scikit-hep__pyhf-2278 | [
"1785"
] | b81d903c888d98183da63e6cc3ada70668b349ef | diff --git a/src/pyhf/optimize/opt_minuit.py b/src/pyhf/optimize/opt_minuit.py
--- a/src/pyhf/optimize/opt_minuit.py
+++ b/src/pyhf/optimize/opt_minuit.py
@@ -28,7 +28,10 @@ def __init__(self, *args, **kwargs):
Args:
errordef (:obj:`float`): See minuit docs. Default is ``1.0``.
steps (:obj:`int`): Number of steps for the bounds. Default is ``1000``.
- strategy (:obj:`int`): See :attr:`iminuit.Minuit.strategy`. Default is ``None``.
+ strategy (:obj:`int`): See :attr:`iminuit.Minuit.strategy`.
+ Default is ``None``, which results in either
+ :attr:`iminuit.Minuit.strategy` ``0`` or ``1`` from the evaluation of
+ ``int(not pyhf.tensorlib.default_do_grad)``.
tolerance (:obj:`float`): Tolerance for termination.
See specific optimizer for detailed meaning.
Default is ``0.1``.
@@ -99,11 +102,14 @@ def _minimize(
fitresult (scipy.optimize.OptimizeResult): the fit result
"""
maxiter = options.pop('maxiter', self.maxiter)
- # 0: Fast, user-provided gradient
- # 1: Default, no user-provided gradient
- strategy = options.pop(
- 'strategy', self.strategy if self.strategy is not None else not do_grad
- )
+ # do_grad value results in iminuit.Minuit.strategy of either:
+ # 0: Fast. Does not check a user-provided gradient.
+ # 1: Default. Checks user-provided gradient against numerical gradient.
+ strategy = options.pop("strategy", self.strategy)
+ # Guard against None from either self.strategy defaulting to None or
+ # passing strategy=None as options kwarg
+ if strategy is None:
+ strategy = 0 if do_grad else 1
tolerance = options.pop('tolerance', self.tolerance)
if options:
raise exceptions.Unsupported(
| diff --git a/tests/test_optim.py b/tests/test_optim.py
--- a/tests/test_optim.py
+++ b/tests/test_optim.py
@@ -173,27 +173,35 @@ def test_minuit_strategy_do_grad(mocker, backend):
assert spy.spy_return.minuit.strategy == 1
[email protected]('strategy', [0, 1])
[email protected]('strategy', [0, 1, 2])
def test_minuit_strategy_global(mocker, backend, strategy):
pyhf.set_backend(
pyhf.tensorlib, pyhf.optimize.minuit_optimizer(strategy=strategy, tolerance=0.2)
)
spy = mocker.spy(pyhf.optimize.minuit_optimizer, '_minimize')
- m = pyhf.simplemodels.uncorrelated_background([50.0], [100.0], [10.0])
- data = pyhf.tensorlib.astensor([125.0] + m.config.auxdata)
+ model = pyhf.simplemodels.uncorrelated_background([50.0], [100.0], [10.0])
+ data = pyhf.tensorlib.astensor([125.0] + model.config.auxdata)
- pyhf.infer.mle.fit(data, m)
+ pyhf.infer.mle.fit(data, model)
assert spy.call_count == 1
assert spy.spy_return.minuit.strategy == strategy
- pyhf.infer.mle.fit(data, m, strategy=0)
+ pyhf.infer.mle.fit(data, model, strategy=None)
assert spy.call_count == 2
- assert spy.spy_return.minuit.strategy == 0
+ assert spy.spy_return.minuit.strategy == int(not pyhf.tensorlib.default_do_grad)
- pyhf.infer.mle.fit(data, m, strategy=1)
+ pyhf.infer.mle.fit(data, model, strategy=0)
assert spy.call_count == 3
+ assert spy.spy_return.minuit.strategy == 0
+
+ pyhf.infer.mle.fit(data, model, strategy=1)
+ assert spy.call_count == 4
assert spy.spy_return.minuit.strategy == 1
+ pyhf.infer.mle.fit(data, model, strategy=2)
+ assert spy.call_count == 5
+ assert spy.spy_return.minuit.strategy == 2
+
def test_set_tolerance(backend):
m = pyhf.simplemodels.uncorrelated_background([50.0], [100.0], [10.0])
| Documentation of default Minuit strategy choice
### Summary
The documentation of `minuit_optimizer` lists `strategy` as a possible kwarg with default value of `None`, but it does not explain what that choice leads to. `pyhf` uses `0`/`1` depending on whether user-provided gradients are used (#1172 / #1183), and I believe it would be good to have that documented on the optimizer page. I'd be happy to submit a PR.
### Documentation Page Link
https://pyhf.readthedocs.io/en/v0.6.3/_generated/pyhf.optimize.opt_minuit.minuit_optimizer.html#pyhf.optimize.opt_minuit.minuit_optimizer
### Code of Conduct
- [X] I agree to follow the Code of Conduct
| Maybe I misunderstood the way `strategy` is supposed to be used, but it seems like `None` is actually not a supported setting:
```python
import pyhf
model = pyhf.simplemodels.uncorrelated_background(
signal=[12.0, 11.0], bkg=[50.0, 52.0], bkg_uncertainty=[3.0, 7.0]
)
data = [51, 48] + model.config.auxdata
pyhf.set_backend("numpy", "minuit")
pyhf.infer.mle.fit(data, model, strategy=None)
```
will fail:
```pytb
Traceback (most recent call last):
File "[...]/test.py", line 8, in <module>
pyhf.infer.mle.fit(data, model, strategy=None)
File "[...]/pyhf/src/pyhf/infer/mle.py", line 131, in fit
return opt.minimize(
File "[...]/pyhf/src/pyhf/optimize/mixins.py", line 185, in minimize
result = self._internal_minimize(
File "[...]/pyhf/src/pyhf/optimize/mixins.py", line 51, in _internal_minimize
result = self._minimize(
File "[...]/pyhf/src/pyhf/optimize/opt_minuit.py", line 117, in _minimize
minimizer.strategy = strategy
File "[...]/iminuit/minuit.py", line 203, in strategy
self._strategy.strategy = value
TypeError: (): incompatible function arguments. The following argument types are supported:
1. (arg0: iminuit._core.MnStrategy, arg1: int) -> None
Invoked with: <iminuit._core.MnStrategy object at 0x1420a0070>, None
```
`iminuit` only supports values of 0/1/2.
Is the intention from the `pyhf` side to support `None`, or to only support the values of 0/1/2? It is not completely clear to me which of the kwargs on that page is the relevant one (related to #1786), i.e. `__init__` vs `_minimize`. | 2023-08-15T23:18:45 |
scikit-hep/pyhf | 2,300 | scikit-hep__pyhf-2300 | [
"2299"
] | b6874878c58093f8c1fecc06d2f631fa82e6e064 | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -529,4 +529,5 @@ def setup(app):
linkcheck_retries = 50
# JupyterLite configuration
-jupyterlite_dir = "lite"
+# Use Path as jupyterlite-sphinx expects PosixPath
+jupyterlite_dir = Path("lite")
| Sphinx warning in docs for config value `jupyterlite_dir` has type `str`, defaults to `PosixPath`
Following Issue #2297, the test build of the docs is failing with
```
WARNING: The config value `jupyterlite_dir' has type `str', defaults to `PosixPath'.
```
This warning is treated as an error as we do this intentionally
https://github.com/scikit-hep/pyhf/blob/b6874878c58093f8c1fecc06d2f631fa82e6e064/docs/Makefile#L5-L8
I'm not sure if this means that we need to update
https://github.com/scikit-hep/pyhf/blob/b6874878c58093f8c1fecc06d2f631fa82e6e064/docs/conf.py#L531-L532
to be a Pathlib Path or not. I'm not sure how that would work though.
| 2023-08-17T21:08:07 |
||
scikit-hep/pyhf | 2,310 | scikit-hep__pyhf-2310 | [
"2311"
] | 495401077ed83490b5518f239821eb34919fba85 | diff --git a/src/pyhf/events.py b/src/pyhf/events.py
--- a/src/pyhf/events.py
+++ b/src/pyhf/events.py
@@ -64,12 +64,20 @@ def _flush(self):
self._callbacks = _callbacks
def __call__(self, *args, **kwargs):
- for func, arg in self.callbacks:
+ for func, arg in self._callbacks:
# weakref: needs to be de-ref'd first before calling
if arg is not None:
- func()(arg(), *args, **kwargs)
+ arg_ref = arg()
+ if arg_ref is not None:
+ func()(arg_ref, *args, **kwargs)
else:
func()(*args, **kwargs)
+ # Flush after calling all the callbacks, not before, as callbacks in the
+ # beginning of the iteration might cause new dead arg weakrefs in
+ # callbacks that are iterated over later.
+ # Checking for dead weakrefs in each iteration and flushing at the end
+ # avoids redundant dead weakref checking in subsequent calls.
+ self._flush()
def __iter__(self):
return iter(self.callbacks)
| weakrefs to dead objects occuring when changing backends in pyhf benchmark
### Summary
I try to perform a benchmark of `pyhf` using `pytest-benchmark` quite similarly to the benchmark in the `tests/benchmarks` directory.
A short example of such a simple benchmark is given below. To reproduce this bug, the python code needs to be saved in a file of the format `test_<name>.py` and executed via `pytest test_<name>.py`.
The bug occurs only sometimes when the backend is changed between different benchmarking cases. Since the occurence of the bug includes some amount of randomness, it may happen that it doesn't occur on the first try but that the benchmark must be executed multiple times. The full benchmark takes around 1 min on my machine.
The suspected origin of this bug is that every time the backend is changed, an event called `tensorlib_changed` is triggered that in turn leads to the execution of some `_precompute()` functions on different objects (e.g. a `TensorViewer` object as in the error message). The problem occurs, when the referenced object no longer exists, as all references to it have been removed. The reference used to call the function does not change this as it is a weakref.
A proposed solution can be found in PR #2310.
### OS / Environment
```console
PRETTY_NAME="Ubuntu 22.04.3 LTS"
NAME="Ubuntu"
VERSION_ID="22.04"
VERSION="22.04.3 LTS (Jammy Jellyfish)"
VERSION_CODENAME=jammy
ID=ubuntu
ID_LIKE=debian
HOME_URL="https://www.ubuntu.com/"
SUPPORT_URL="https://help.ubuntu.com/"
BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/"
PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy"
UBUNTU_CODENAME=jammy
```
### Steps to Reproduce
<!--- Paste your minimal failing Python example code between the quotes below -->
```python (paste below)
# content of test_benchmark.py
import pytest
import pyhf
@pytest.fixture(
scope='function',
params=[
(pyhf.tensor.numpy_backend(), None),
(pyhf.tensor.pytorch_backend(), None),
(pyhf.tensor.pytorch_backend(precision='64b'), None),
(pyhf.tensor.tensorflow_backend(), None),
(pyhf.tensor.jax_backend(), None),
(
pyhf.tensor.numpy_backend(poisson_from_normal=True),
pyhf.optimize.minuit_optimizer(),
),
],
ids=['numpy', 'pytorch', 'pytorch64',
'tensorflow',
'jax', 'numpy_minuit'],
)
def backend(request):
# get the ids of all the backends
param_ids = request._fixturedef.ids
# the backend we're using: numpy, tensorflow, etc...
param_id = param_ids[request.param_index]
# name of function being called (with params), the original name is .originalname
func_name = request._pyfuncitem.name
pyhf.set_backend(*request.param)
yield request.param
def hypotest(data, pdf):
return pyhf.infer.hypotest(1.0, data, pdf, test_stat="qtilde", return_expected=True)
bins = [1, 2, 4, 8, 16, 32]
bin_ids = [f'{n_bins}_bins' for n_bins in bins]
@pytest.mark.parametrize('n_bins', bins, ids=bin_ids)
def test_hypotest(benchmark, backend, n_bins):
model = pyhf.simplemodels.uncorrelated_background(signal=[12.0]*n_bins, bkg=[50.0]*n_bins, bkg_uncertainty=[5.0]*n_bins)
data = [51.0]*n_bins + model.config.auxdata
assert benchmark(hypotest, data, model)
```
<!--- ...or if you have a failing CLI command paste it between the quotes below -->
```console (paste below)
pytest test_benchmark.py
```
### File Upload (optional)
_No response_
### Expected Results
The expected behavior is to output the benchmarking results for all considered cases as it can be observed when executing `pytest` in `pyhf/tests/benchmarks/`.
This output should not show any "test failures" as no normal tests are performed but only functions that run without an error, when called outside of the benchmark.
### Actual Results
```console
_________________________ ERROR at setup of test_hypotest[jax-1_bins] _________________________
request = <SubRequest 'backend' for <Function test_hypotest[jax-1_bins]>>
@pytest.fixture(
scope='function',
params=[
(pyhf.tensor.numpy_backend(), None),
(pyhf.tensor.pytorch_backend(), None),
(pyhf.tensor.pytorch_backend(precision='64b'), None),
(pyhf.tensor.tensorflow_backend(), None),
(pyhf.tensor.jax_backend(), None),
(
pyhf.tensor.numpy_backend(poisson_from_normal=True),
pyhf.optimize.minuit_optimizer(),
),
],
ids=['numpy', 'pytorch', 'pytorch64',
'tensorflow',
'jax', 'numpy_minuit'],
)
def backend(request):
# get the ids of all the backends
param_ids = request._fixturedef.ids
# the backend we're using: numpy, tensorflow, etc...
param_id = param_ids[request.param_index]
# name of function being called (with params), the original name is .originalname
func_name = request._pyfuncitem.name
> pyhf.set_backend(*request.param)
test_hypo_pyhf.py:29:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../../pyhfDev/pyhf/src/pyhf/events.py:161: in register_wrapper
result = func(*args, **kwargs)
../../pyhfDev/pyhf/src/pyhf/tensor/manager.py:193: in set_backend
events.trigger("tensorlib_changed")()
../../pyhfDev/pyhf/src/pyhf/events.py:70: in __call__
func()(arg(), *args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = None
def _precompute(self):
tensorlib, _ = get_backend()
> self.sorted_indices = tensorlib.astensor(self._sorted_indices, dtype='int')
E AttributeError: 'NoneType' object has no attribute '_sorted_indices'
../../pyhfDev/pyhf/src/pyhf/tensor/common.py:33: AttributeError
```
### pyhf Version
```console
pyhf, version 0.7.1.dev116
```
### Code of Conduct
- [X] I agree to follow the Code of Conduct
| 2023-08-31T09:34:50 |
||
scikit-hep/pyhf | 2,328 | scikit-hep__pyhf-2328 | [
"2327"
] | 10b17dd404ca2a0d7b109caaa806c72725ff61c2 | diff --git a/src/pyhf/pdf.py b/src/pyhf/pdf.py
--- a/src/pyhf/pdf.py
+++ b/src/pyhf/pdf.py
@@ -768,7 +768,9 @@ def __init__(
log.info(f"Validating spec against schema: {self.schema:s}")
schema.validate(self.spec, self.schema, version=self.version)
# build up our representation of the specification
- poi_name = config_kwargs.pop('poi_name', 'mu')
+ # Default to no POI name
+ # https://github.com/scikit-hep/pyhf/issues/2327
+ poi_name = config_kwargs.pop("poi_name", None)
self._config = _ModelConfig(self.spec, **config_kwargs)
modifiers, _nominal_rates = _nominal_and_modifiers_from_spec(
diff --git a/src/pyhf/simplemodels.py b/src/pyhf/simplemodels.py
--- a/src/pyhf/simplemodels.py
+++ b/src/pyhf/simplemodels.py
@@ -10,7 +10,7 @@ def __dir__():
def correlated_background(
- signal, bkg, bkg_up, bkg_down, batch_size=None, validate=True
+ signal, bkg, bkg_up, bkg_down, batch_size=None, validate=True, poi_name="mu"
):
r"""
Construct a simple single channel :class:`~pyhf.pdf.Model` with a
@@ -27,10 +27,14 @@ def correlated_background(
batch_size (:obj:`None` or :obj:`int`): Number of simultaneous (batched) Models to compute.
validate (:obj:`bool`): If :obj:`True`, validate the model before returning.
Only set this to :obj:`False` if you have an experimental use case and know what you're doing.
+ poi_name (:obj:`str`): The :class:`~pyhf.pdf.Model` parameter of interest name.
+ Defaults to ``"mu"``.
Returns:
~pyhf.pdf.Model: The statistical model adhering to the :obj:`model.json` schema.
+ .. versionchanged:: 0.8.0 Added ``poi_name`` argument.
+
Example:
>>> import pyhf
>>> pyhf.set_backend("numpy")
@@ -79,11 +83,11 @@ def correlated_background(
}
]
}
- return Model(spec, batch_size=batch_size, validate=validate)
+ return Model(spec, batch_size=batch_size, validate=validate, poi_name=poi_name)
def uncorrelated_background(
- signal, bkg, bkg_uncertainty, batch_size=None, validate=True
+ signal, bkg, bkg_uncertainty, batch_size=None, validate=True, poi_name="mu"
):
"""
Construct a simple single channel :class:`~pyhf.pdf.Model` with a
@@ -114,10 +118,13 @@ def uncorrelated_background(
batch_size (:obj:`None` or :obj:`int`): Number of simultaneous (batched) Models to compute
validate (:obj:`bool`): If :obj:`True`, validate the model before returning.
Only set this to :obj:`False` if you have an experimental use case and know what you're doing.
+ poi_name (:obj:`str`): The :class:`~pyhf.pdf.Model` parameter of interest name.
+ Defaults to ``"mu"``.
Returns:
~pyhf.pdf.Model: The statistical model adhering to the :obj:`model.json` schema
+ .. versionchanged:: 0.8.0 Added ``poi_name`` argument.
"""
spec = {
'channels': [
@@ -146,7 +153,7 @@ def uncorrelated_background(
}
]
}
- return Model(spec, batch_size=batch_size, validate=validate)
+ return Model(spec, batch_size=batch_size, validate=validate, poi_name=poi_name)
# Deprecated APIs
| diff --git a/tests/test_backend_consistency.py b/tests/test_backend_consistency.py
--- a/tests/test_backend_consistency.py
+++ b/tests/test_backend_consistency.py
@@ -99,7 +99,7 @@ def test_hypotest_qmu_tilde(
else [signal_sample, background_sample]
)
spec = {'channels': [{'name': 'singlechannel', 'samples': samples}]}
- pdf = pyhf.Model(spec)
+ pdf = pyhf.Model(spec, poi_name="mu")
data = source['bindata']['data'] + pdf.config.auxdata
diff --git a/tests/test_optim.py b/tests/test_optim.py
--- a/tests/test_optim.py
+++ b/tests/test_optim.py
@@ -312,7 +312,7 @@ def spec(source):
@pytest.mark.parametrize('mu', [1.0], ids=['mu=1'])
def test_optim(backend, source, spec, mu):
- pdf = pyhf.Model(spec)
+ pdf = pyhf.Model(spec, poi_name="mu")
data = source['bindata']['data'] + pdf.config.auxdata
init_pars = pdf.config.suggested_init()
@@ -336,7 +336,7 @@ def test_optim(backend, source, spec, mu):
@pytest.mark.parametrize('mu', [1.0], ids=['mu=1'])
def test_optim_with_value(backend, source, spec, mu):
- pdf = pyhf.Model(spec)
+ pdf = pyhf.Model(spec, poi_name="mu")
data = source['bindata']['data'] + pdf.config.auxdata
init_pars = pdf.config.suggested_init()
@@ -364,7 +364,7 @@ def test_optim_with_value(backend, source, spec, mu):
@pytest.mark.parametrize('mu', [1.0], ids=['mu=1'])
@pytest.mark.only_numpy_minuit
def test_optim_uncerts(backend, source, spec, mu):
- pdf = pyhf.Model(spec)
+ pdf = pyhf.Model(spec, poi_name="mu")
data = source['bindata']['data'] + pdf.config.auxdata
init_pars = pdf.config.suggested_init()
@@ -391,7 +391,7 @@ def test_optim_uncerts(backend, source, spec, mu):
@pytest.mark.parametrize('mu', [1.0], ids=['mu=1'])
@pytest.mark.only_numpy_minuit
def test_optim_correlations(backend, source, spec, mu):
- pdf = pyhf.Model(spec)
+ pdf = pyhf.Model(spec, poi_name="mu")
data = source['bindata']['data'] + pdf.config.auxdata
init_pars = pdf.config.suggested_init()
diff --git a/tests/test_validation.py b/tests/test_validation.py
--- a/tests/test_validation.py
+++ b/tests/test_validation.py
@@ -788,7 +788,9 @@ def test_validation(setup):
source = setup['source']
pdf = pyhf.Model(
- setup['spec'], modifier_settings={'normsys': {'interpcode': 'code1'}}
+ setup["spec"],
+ modifier_settings={"normsys": {"interpcode": "code1"}},
+ poi_name="mu",
)
if 'channels' in source:
| Default POI name for model construction
The current default for the POI name in model construction is `"mu"`. The safer choice here would be to go with `""`, i.e. no POI specified. This reduces the assumptions being made and is not going to cause failures for models where no "mu" parameter is defined. I would suggest updating the default accordingly.
https://github.com/scikit-hep/pyhf/blob/f771581ae387493776f2c40170f82b63d0fad317/src/pyhf/pdf.py#L771
| 2023-09-13T21:57:01 |
|
scikit-hep/pyhf | 2,339 | scikit-hep__pyhf-2339 | [
"1652"
] | c096b33cf859246771e06099ee1b48063ca051cd | diff --git a/src/pyhf/pdf.py b/src/pyhf/pdf.py
--- a/src/pyhf/pdf.py
+++ b/src/pyhf/pdf.py
@@ -650,7 +650,21 @@ def logpdf(self, maindata, pars):
"""
return self.make_pdf(pars).log_prob(maindata)
- def _modifications(self, pars):
+ def modifications(self, pars):
+ """
+ Obtain the additive and multiplicative modifications to the expected
+ event rates for the model parameters.
+
+ Args:
+ pars (:obj:`tensor`): The model parameters
+
+ Returns:
+ Tuple of additive and multiplicative modifications:
+ - deltas (:obj:`list`) is the result of an ``apply(pars)`` of combined modifiers
+ with ``"addition"`` ``op_code``
+ - factors (:obj:`list`) is the result of ``apply(pars)`` of combined modifiers
+ with ``"multiplication"`` ``op_code``
+ """
deltas = list(
filter(
lambda x: x is not None,
@@ -695,7 +709,7 @@ def expected_data(self, pars, return_by_sample=False):
"""
tensorlib, _ = get_backend()
pars = tensorlib.astensor(pars)
- deltas, factors = self._modifications(pars)
+ deltas, factors = self.modifications(pars)
allsum = tensorlib.concatenate(deltas + [self.nominal_rates])
@@ -826,8 +840,12 @@ def expected_auxdata(self, pars):
pars = tensorlib.astensor(pars)
return self.make_pdf(pars)[1].expected_data()
- def _modifications(self, pars):
- return self.main_model._modifications(pars)
+ def modifications(self, pars):
+ """
+ The modifications applied to the :class:`~pyhf.pdf._MainModel`. See
+ :func:`pyhf.pdf._MainModel.modifications` for details.
+ """
+ return self.main_model.modifications(pars)
@property
def nominal_rates(self):
| diff --git a/tests/test_modifiers.py b/tests/test_modifiers.py
--- a/tests/test_modifiers.py
+++ b/tests/test_modifiers.py
@@ -77,7 +77,7 @@ def test_staterror_holes():
model = pyhf.Model(spec, poi_name="")
assert model.config.npars == 9
- _, factors = model._modifications(
+ _, factors = model.modifications(
pyhf.tensorlib.astensor([2, 2.0, 1.0, 1.0, 3.0, 4.0, 1.0, 5.0, 6.0])
)
assert model.config.param_set("staterror_1").suggested_fixed == [
@@ -151,7 +151,7 @@ def test_shapesys_holes():
}
model = pyhf.Model(spec, poi_name="mu")
- _, factors = model._modifications(
+ _, factors = model.modifications(
pyhf.tensorlib.astensor([1.0, 2.0, 1.0, 1.0, 3.0, 4.0, 1.0, 1.0, 5.0])
)
assert (factors[1][0, 0, 0, :] == [2.0, 1.0, 1.0, 3.0, 1.0, 1.0, 1.0, 1.0]).all()
diff --git a/tests/test_pdf.py b/tests/test_pdf.py
--- a/tests/test_pdf.py
+++ b/tests/test_pdf.py
@@ -754,7 +754,7 @@ def test_lumi_np_scaling():
[[[1.0, 1.0]]],
[[[1.0, 1.0]]],
]
- assert pdf._modifications(np.array(pars))[1][0].tolist() == [mods]
+ assert pdf.modifications(np.array(pars))[1][0].tolist() == [mods]
assert pdf.expected_data(pars).tolist() == [120.0, 110.0, 1.0]
pars[poi_slice], pars[lumi_slice] = [[1.0], [alpha_lumi]]
@@ -763,7 +763,7 @@ def test_lumi_np_scaling():
[[[1.0, 1.0]]],
[[[alpha_lumi, alpha_lumi]]],
]
- assert pdf._modifications(np.array(pars))[1][0].tolist() == [mods]
+ assert pdf.modifications(np.array(pars))[1][0].tolist() == [mods]
assert pytest.approx(pdf.expected_data(pars).tolist()) == [
100 + 20.0 * alpha_lumi,
110.0 * alpha_lumi,
| Promote `model._modifications` to public API
### Summary
[`pyhf.pdf._MainModel._modifications`](https://github.com/scikit-hep/pyhf/blob/be5ae6561d482d18601c9301f28f7b242b5a7a2f/src/pyhf/pdf.py#L529-L543) is useful for model visualization and debugging. See for example https://github.com/scikit-hep/pyhf/pull/1639#discussion_r730752320. It also came up in the discussion https://github.com/scikit-hep/pyhf/discussions/1414. This function is currently not part of the public API. Given its usefulness, I propose changing that. This suggestion also came up in https://github.com/scikit-hep/pyhf/pull/1461#discussion_r633482129.
Two alternatives are possible: using the `_modifications` API even though it signals it is not public (accepting a possible breaking change in the future), or re-implementing it externally.
### Additional Information
See https://github.com/scikit-hep/pyhf/pull/1639#discussion_r730752320 for example usage, this API could presumably also be used for [`pyhf-validation/scripts/validate_systs.py`](https://github.com/pyhf/pyhf-validation/blob/master/scripts/validate_systs.py).
### Code of Conduct
- [X] I agree to follow the Code of Conduct
| 2023-09-20T17:07:00 |
|
scikit-hep/pyhf | 2,357 | scikit-hep__pyhf-2357 | [
"2353"
] | 50936682671edf814a8fe47a3ec661c914aed15a | diff --git a/src/pyhf/schema/validator.py b/src/pyhf/schema/validator.py
--- a/src/pyhf/schema/validator.py
+++ b/src/pyhf/schema/validator.py
@@ -1,4 +1,5 @@
import numbers
+from pathlib import Path
from typing import Mapping, Union
import jsonschema
@@ -70,12 +71,15 @@ def validate(
version = version or variables.SCHEMA_VERSION
- schema = load_schema(f'{version}/{schema_name}')
+ schema = load_schema(str(Path(version).joinpath(schema_name)))
- # note: trailing slash needed for RefResolver to resolve correctly
+ # note: trailing slash needed for RefResolver to resolve correctly and by
+ # design, pathlib strips trailing slashes. See ref below:
+ # * https://bugs.python.org/issue21039
+ # * https://github.com/python/cpython/issues/65238
resolver = jsonschema.RefResolver(
- base_uri=f"file://{variables.schemas}/{version}/",
- referrer=f"{schema_name}",
+ base_uri=f"{Path(variables.schemas).joinpath(version).as_uri()}/",
+ referrer=schema_name,
store=variables.SCHEMA_CACHE,
)
| jsonschema cannot find path specified \\1.0.0\\def.json
### Summary
When I try to install pyhf in a venv and then create a simple model, it gives me a n error
```
<urlopen error [WinError 3] The system cannot find the path specified: '\\1.0.0\\defs.json'>
```
When i check the path to the schema with
```
>>> pyhf.schema.path
WindowsPath('C:/Users/alexh/Downloads/pyhf_test/pyhf_venv/Lib/site-packages/pyhf/schemas')
```
Following this path then into the 1.0.0 folder, defs.json exists.
### OS / Environment
```console
# Windows 11
# Python 3.11.6
```
### Steps to Reproduce
```
$ python -m venv venv_pyhf
$ venv_pyhf/Scripts/activate.ps1
$ pip install pyhf
$ python
>>> import pyhf
>>> model = pyhf.simplemodels.uncorrelated_background(signal=[1,2,3], bkg=[4,5,6], bkg_uncertainty=[2,3,4])
_RefResolutionError: <urlopen error [WinError 3] The system cannot find the path specified: '\\1.0.0\\defs.json'>
```
### File Upload (optional)
_No response_
### Expected Results
Expected to get a pyhf.pdf.Model.
### Actual Results
```console
Traceback (most recent call last):
File "C:\Users\alexh\Downloads\pyhf_test\pyhf_venv\Lib\site-packages\jsonschema\validators.py", line 1097, in resolve_from_url
document = self.store[url]
~~~~~~~~~~^^^^^
File "C:\Users\alexh\Downloads\pyhf_test\pyhf_venv\Lib\site-packages\jsonschema\_utils.py", line 20, in __getitem__
return self.store[self.normalize(uri)]
~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^
KeyError: 'file://C:\\Users\\alexh\\Downloads\\pyhf_test\\pyhf_venv\\Lib\\site-packages\\pyhf\\schemas/1.0.0/defs.json'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.11_3.11.1776.0_x64__qbz5n2kfra8p0\Lib\urllib\request.py", line 1505, in open_local_file
stats = os.stat(localfile)
^^^^^^^^^^^^^^^^^^
FileNotFoundError: [WinError 3] The system cannot find the path specified: '\\1.0.0\\defs.json'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\alexh\Downloads\pyhf_test\pyhf_venv\Lib\site-packages\jsonschema\validators.py", line 1100, in resolve_from_url
document = self.resolve_remote(url)
^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\alexh\Downloads\pyhf_test\pyhf_venv\Lib\site-packages\jsonschema\validators.py", line 1204, in resolve_remote
with urlopen(uri) as url:
^^^^^^^^^^^^
File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.11_3.11.1776.0_x64__qbz5n2kfra8p0\Lib\urllib\request.py", line 216, in urlopen
return opener.open(url, data, timeout)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.11_3.11.1776.0_x64__qbz5n2kfra8p0\Lib\urllib\request.py", line 519, in open
response = self._open(req, data)
^^^^^^^^^^^^^^^^^^^^^
File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.11_3.11.1776.0_x64__qbz5n2kfra8p0\Lib\urllib\request.py", line 536, in _open
result = self._call_chain(self.handle_open, protocol, protocol +
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.11_3.11.1776.0_x64__qbz5n2kfra8p0\Lib\urllib\request.py", line 496, in _call_chain
result = func(*args)
^^^^^^^^^^^
File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.11_3.11.1776.0_x64__qbz5n2kfra8p0\Lib\urllib\request.py", line 1483, in file_open
return self.open_local_file(req)
^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.11_3.11.1776.0_x64__qbz5n2kfra8p0\Lib\urllib\request.py", line 1522, in open_local_file
raise URLError(exp)
urllib.error.URLError: <urlopen error [WinError 3] The system cannot find the path specified: '\\1.0.0\\defs.json'>
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\Users\alexh\Downloads\pyhf_test\pyhf_venv\Lib\site-packages\pyhf\simplemodels.py", line 149, in uncorrelated_background
return Model(spec, batch_size=batch_size, validate=validate)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\alexh\Downloads\pyhf_test\pyhf_venv\Lib\site-packages\pyhf\pdf.py", line 769, in __init__
schema.validate(self.spec, self.schema, version=self.version)
File "C:\Users\alexh\Downloads\pyhf_test\pyhf_venv\Lib\site-packages\pyhf\schema\validator.py", line 93, in validate
return validator.validate(spec)
^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\alexh\Downloads\pyhf_test\pyhf_venv\Lib\site-packages\jsonschema\validators.py", line 434, in validate
for error in self.iter_errors(*args, **kwargs):
File "C:\Users\alexh\Downloads\pyhf_test\pyhf_venv\Lib\site-packages\jsonschema\validators.py", line 368, in iter_errors
for error in errors:
File "C:\Users\alexh\Downloads\pyhf_test\pyhf_venv\Lib\site-packages\jsonschema\_keywords.py", line 284, in ref
yield from validator._validate_reference(ref=ref, instance=instance)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\alexh\Downloads\pyhf_test\pyhf_venv\Lib\site-packages\jsonschema\validators.py", line 461, in _validate_reference
scope, resolved = resolve(ref)
^^^^^^^^^^^^
File "C:\Users\alexh\Downloads\pyhf_test\pyhf_venv\Lib\site-packages\jsonschema\validators.py", line 1086, in resolve
return url, self._remote_cache(url)
^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\alexh\Downloads\pyhf_test\pyhf_venv\Lib\site-packages\jsonschema\validators.py", line 1102, in resolve_from_url
raise exceptions._RefResolutionError(exc)
jsonschema.exceptions._RefResolutionError: <urlopen error [WinError 3] The system cannot find the path specified: '\\1.0.0\\defs.json'>
```
### pyhf Version
```console
pyhf, version 0.7.4
```
### Code of Conduct
- [X] I agree to follow the Code of Conduct
| This is going to be interesting to debug as I do not have a Windows machine to test some things. We're relying primarily on `pathlib` for almost everything so it's not abundantly clear to me at first what is going on. Is there a way for you to print out what the following gives you upon import?
```python
>>> pyhf.schema.variables.SCHEMA_CACHE.keys()
dict_keys(['https://scikit-hep.org/pyhf/schemas/1.0.0/defs.json'])
>>> pyhf.schema.variables.schemas
PosixPath('/Users/kratsg/pyhf/src/pyhf/schemas')
```
I'm wondering what happens in the latter. The former is fine, but I'm realizing Windows filepaths are slightly different from web addresses...
Looks similar to me on the first, but the second seems different.
```
(pyhf_venv) PS C:\Users\alexh\Downloads\pyhf_test> python
Python 3.11.6
>>> import pyhf
>>> import pyhf.schema
>>> pyhf.schema.variables.SCHEMA_CACHE.keys()
dict_keys(['https://scikit-hep.org/pyhf/schemas/1.0.0/defs.json'])
>>> pyhf.schema.variables.schemas
WindowsPath('C:/Users/alexh/Downloads/pyhf_test/pyhf_venv/Lib/site-packages/pyhf/schemas')
```
Let me know if there is anything else i can help with testing on windows :-)
We technically don't support Windows at the moment, but I will admit that I find this an interesting problem. The difference in `pyhf.schema.variables.schemas` seems like it should be benign given that they both are properly pointing to an installation location with a `pyhf/schemas` directory present. I don't have any experience on how `jsonschema` resolves this on Windows, though, so I think we would need to get access to dev machines or Windows VMs to properly debug.
I suspect I've figured out the issue and will submit a PR shortly. | 2023-10-24T21:40:46 |
|
scikit-hep/pyhf | 2,455 | scikit-hep__pyhf-2455 | [
"2454"
] | adddb0797c564a0158a8e2e69a58ee1f98604bf7 | diff --git a/src/pyhf/contrib/utils.py b/src/pyhf/contrib/utils.py
--- a/src/pyhf/contrib/utils.py
+++ b/src/pyhf/contrib/utils.py
@@ -91,7 +91,12 @@ def download(archive_url, output_directory, force=False, compress=False):
with tarfile.open(
mode="r:*", fileobj=BytesIO(response.content)
) as archive:
- archive.extractall(output_directory)
+ # TODO: Simplify after pyhf is Python 3.12+ only
+ # c.f. https://docs.python.org/3.12/library/tarfile.html#extraction-filters
+ if hasattr(tarfile, "data_filter"):
+ archive.extractall(output_directory, filter="data")
+ else:
+ archive.extractall(output_directory)
except tarfile.ReadError:
if not zipfile.is_zipfile(BytesIO(response.content)):
raise exceptions.InvalidArchive(
| In Python 3.12 tarfile raises DeprecationWarning: Python 3.14 will, by default, filter extracted tar archives and reject files or modify their metadata.
While testing Python 3.12 in CI
https://github.com/scikit-hep/pyhf/blob/adddb0797c564a0158a8e2e69a58ee1f98604bf7/tests/test_scripts.py#L591-L604
raised
```pytb
> assert ret.success
E assert False
E + where False = <pytest_console_scripts.RunResult object at 0x29fd18b90>.success
...
DeprecationWarning: Python 3.14 will, by default, filter extracted tar archives and reject files or modify their metadata. Use the filter argument to control this behavior.
```
This should get fixed before Python 3.12 support is added.
| c.f. https://docs.python.org/3.12/library/tarfile.html#tarfile-extraction-filter and [PEP 706 – Filter for `tarfile.extractall`](https://peps.python.org/pep-0706/). | 2024-03-13T06:15:50 |
|
scikit-hep/pyhf | 2,459 | scikit-hep__pyhf-2459 | [
"2453"
] | 50f107609b2fd0f2f6085ac760708b73e244eb25 | diff --git a/src/pyhf/writexml.py b/src/pyhf/writexml.py
--- a/src/pyhf/writexml.py
+++ b/src/pyhf/writexml.py
@@ -56,7 +56,7 @@ def _export_root_histogram(hist_name, data):
# https://stackoverflow.com/a/4590052
def indent(elem, level=0):
i = "\n" + level * " "
- if elem:
+ if elem is not None:
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
| diff --git a/tests/test_export.py b/tests/test_export.py
--- a/tests/test_export.py
+++ b/tests/test_export.py
@@ -447,7 +447,7 @@ def test_integer_data(datadir, mocker):
mocker.patch("pyhf.writexml._ROOT_DATA_FILE")
channel = pyhf.writexml.build_channel(spec, channel_spec, {})
- assert channel
+ assert channel is not None
@pytest.mark.parametrize(
| In Python 3.12 xml.etree.ElementTree will raise DeprecationWarning: Testing an element's truth value will raise an exception in future versions
While testing Python 3.12 in CI
https://github.com/scikit-hep/pyhf/blob/adddb0797c564a0158a8e2e69a58ee1f98604bf7/tests/test_export.py#L438-L450
raised
```pytb
> assert channel
E DeprecationWarning: Testing an element's truth value will raise an exception in future versions. Use specific 'len(elem)' or 'elem is not None' test instead.
```
This comes from https://github.com/python/cpython/issues/83122 which landed in Python 3.12. This should get fixed before Python 3.12 support is added.
From the Python 3.12 docs: https://docs.python.org/3.12/library/xml.etree.elementtree.html#element-objects
> Caution: Elements with no subelements will test as `False`. Testing the truth value of an Element is deprecated and will raise an exception in Python 3.14. Use specific `len(elem)` or `elem is None` test instead.:
>
>
> ```python
> element = root.find('foo')
>
> if not element: # careful!
> print("element not found, or element has no subelements")
>
> if element is None:
> print("element not found")
> ```
>
> _Changed in version 3.12_: Testing the truth value of an Element emits [DeprecationWarning](https://docs.python.org/3/library/exceptions.html#DeprecationWarning).
| 2024-03-20T08:42:22 |
|
scikit-hep/pyhf | 2,513 | scikit-hep__pyhf-2513 | [
"2512"
] | 7d316ef3d48a6ede64c7d8b7d8d238f6283550a2 | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -55,7 +55,6 @@ def setup(app):
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.intersphinx',
- 'sphinx_rtd_theme',
'sphinxcontrib.bibtex',
'sphinx.ext.napoleon',
'sphinx_click.ext',
@@ -243,13 +242,13 @@ def setup(app):
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
-html_theme = 'sphinx_rtd_theme'
+html_theme = 'pydata_sphinx_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
-html_theme_options = {}
+html_theme_options = {"header_links_before_dropdown": 6}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = []
| Move documentation pages to PyData Sphinx Theme
### Summary
Hi all! After talking to @matthewfeickert , I was wondering if you folks are interested in moving to the PyData Sphinx Theme for your documentation pages. I have experimented with it and it would be a very simple PR. The advantage in doing this is that the PyData Sphinx Theme has been doing a number of usability, design and accessibility improvements to the theme, and it is widely adopted by other projects in the ecosystem.
Let me know what you think.
### Additional Information
Here is how it looks with minimal customization:

### Code of Conduct
- [X] I agree to follow the Code of Conduct
| 2024-06-05T20:03:21 |
||
cltk/cltk | 347 | cltk__cltk-347 | [
"345",
"297"
] | 83513d2118797c08e460f2b17f466eb4cd5d02f6 | diff --git a/cltk/corpus/latin/__init__.py b/cltk/corpus/latin/__init__.py
--- a/cltk/corpus/latin/__init__.py
+++ b/cltk/corpus/latin/__init__.py
@@ -6,17 +6,36 @@
"""
CLTK Latin corpus readers
"""
+
import os.path
from nltk.corpus.reader.plaintext import PlaintextCorpusReader
+from nltk.tokenize.punkt import PunktSentenceTokenizer, PunktParameters
+
+from cltk.tokenize.sentence import TokenizeSentence
+from cltk.tokenize.word import WordTokenizer
# Would like to have this search through a CLTK_DATA environment variable
# Better to use something like make_cltk_path in cltk.utils.file_operations?
home = os.path.expanduser('~')
cltk_path = os.path.join(home, 'CLTK_DATA')
+word_tokenizer = WordTokenizer('latin')
+
+if os.path.exists(cltk_path + 'latin/model/latin_models_cltk/tokenizers/sentence'):
+ sent_tokenizer = TokenizeSentence('latin')
+else:
+ punkt_param = PunktParameters()
+ abbreviations = ['c', 'l', 'm', 'p', 'q', 't', 'ti', 'sex', 'a', 'd', 'cn', 'sp', "m'", 'ser', 'ap', 'n', 'v', 'k', 'mam', 'post', 'f', 'oct', 'opet', 'paul', 'pro', 'sert', 'st', 'sta', 'v', 'vol', 'vop']
+ punkt_param.abbrev_types = set(abbreviations)
+ sent_tokenizer = PunktSentenceTokenizer(punkt_param)
+
# Latin Library
try:
- latinlibrary = PlaintextCorpusReader(cltk_path + '/latin/text/latin_text_latin_library', '.*\.txt', encoding='utf-8')
+ latinlibrary = PlaintextCorpusReader(cltk_path + '/latin/text/latin_text_latin_library',
+ '.*\.txt',
+ word_tokenizer=word_tokenizer,
+ sent_tokenizer=sent_tokenizer,
+ encoding='utf-8')
pass
except IOError as e:
print("Corpus not found. Please check that the Latin Library is installed in CLTK_DATA.")
diff --git a/cltk/tokenize/sentence.py b/cltk/tokenize/sentence.py
--- a/cltk/tokenize/sentence.py
+++ b/cltk/tokenize/sentence.py
@@ -88,3 +88,9 @@ def tokenize_sentences(self: object, untokenized_string: str):
for sentence in tokenizer.sentences_from_text(untokenized_string, realign_boundaries=True): # pylint: disable=C0301
tokenized_sentences.append(sentence)
return tokenized_sentences
+
+ def tokenize(self: object, untokenized_string: str):
+ # NLTK's PlaintextCorpusReader needs a function called tokenize
+ # in functions used as a parameter for sentence tokenization.
+ # So this is an alias for tokenize_sentences().
+ return self.tokenize_sentences(untokenized_string)
diff --git a/cltk/tokenize/word.py b/cltk/tokenize/word.py
--- a/cltk/tokenize/word.py
+++ b/cltk/tokenize/word.py
@@ -125,18 +125,21 @@ def replace(m):
for sent in sents:
temp_tokens = word_tokenizer.word_tokenize(sent)
- if temp_tokens[0].endswith('ne'):
- if temp_tokens[0].lower() not in exceptions:
- temp = [temp_tokens[0][:-2], '-ne']
- temp_tokens = temp + temp_tokens[1:]
-
- if temp_tokens[-1].endswith('.'):
- final_word = temp_tokens[-1][:-1]
- del temp_tokens[-1]
- temp_tokens += [final_word, '.']
-
- for token in temp_tokens:
- tokens.append(token)
+ # Need to check that tokens exist before handling them; needed to make stream.readlines work in PlaintextCorpusReader
+
+ if temp_tokens:
+ if temp_tokens[0].endswith('ne'):
+ if temp_tokens[0].lower() not in exceptions:
+ temp = [temp_tokens[0][:-2], '-ne']
+ temp_tokens = temp + temp_tokens[1:]
+
+ if temp_tokens[-1].endswith('.'):
+ final_word = temp_tokens[-1][:-1]
+ del temp_tokens[-1]
+ temp_tokens += [final_word, '.']
+
+ for token in temp_tokens:
+ tokens.append(token)
# Break enclitic handling into own function?
specific_tokens = []
| Latinlibrary corpus word/sentence loading raises error
The NLTK PlaintextCorpusReader methods words() and sents() raise errors when using commands such as:
```
from cltk.corpus.latin import latinlibrary
latinlibrary.sents()
```
- Words: The reader streams the input, sometimes returning empty lists, which cause index out of range errors with the current Latin lemmatizer.
- Sents: Currently relies on an external NLTK corpus; will raise errors if not installed.
Questions about corpus reader
Following up from PR #296.
I like this, have a few questions as I have not used the NLTK's PlainCorpusReader.
Using your new feature, a few observations and questions.
``` python
In [1]: from cltk.corpus.latin import latinlibrary
In [2]: type(latinlibrary)
Out[2]: nltk.corpus.reader.plaintext.PlaintextCorpusReader
In [3]: dir(latinlibrary)
Out[3]:
['CorpusView',
'__class__',
'__delattr__',
'__dict__',
'__dir__',
'__doc__',
'__eq__',
'__format__',
'__ge__',
'__getattribute__',
'__gt__',
'__hash__',
'__init__',
'__le__',
'__lt__',
'__module__',
'__ne__',
'__new__',
'__reduce__',
'__reduce_ex__',
'__repr__',
'__setattr__',
'__sizeof__',
'__str__',
'__subclasshook__',
'__unicode__',
'__weakref__',
'_encoding',
'_fileids',
'_get_root',
'_para_block_reader',
'_read_para_block',
'_read_sent_block',
'_read_word_block',
'_root',
'_sent_tokenizer',
'_tagset',
'_word_tokenizer',
'abspath',
'abspaths',
'citation',
'encoding',
'ensure_loaded',
'fileids',
'license',
'open',
'paras',
'raw',
'readme',
'root',
'sents',
'unicode_repr',
'words']
In [5]: latinlibrary.words()
Out[5]: ['DUODECIM', 'TABULARUM', 'LEGES', 'DUODECIM', ...]
```
How do I view all words in this?
``` python
In [9]: latinlibrary.sents()
Out[9]: ---------------------------------------------------------------------------
LookupError Traceback (most recent call last)
/Users/kyle/cltk/venv/lib/python3.5/site-packages/IPython/core/formatters.py in __call__(self, obj)
697 type_pprinters=self.type_printers,
698 deferred_pprinters=self.deferred_printers)
--> 699 printer.pretty(obj)
700 printer.flush()
701 return stream.getvalue()
…
/Users/kyle/cltk/venv/lib/python3.5/site-packages/nltk/data.py in find(resource_name, paths)
639 sep = '*' * 70
640 resource_not_found = '\n%s\n%s\n%s' % (sep, msg, sep)
--> 641 raise LookupError(resource_not_found)
642
643
LookupError:
**********************************************************************
Resource 'tokenizers/punkt/PY3/english.pickle' not found.
Please use the NLTK Downloader to obtain the resource: >>>
nltk.download()
Searched in:
- '/Users/kyle/nltk_data'
- '/usr/share/nltk_data'
- '/usr/local/share/nltk_data'
- '/usr/lib/nltk_data'
- '/usr/local/lib/nltk_data'
- ''
**********************************************************************
```
What kind of tokenizer is it looking for, sentence or word? I made a sentence tokenizer for Latin (in the models repo), so I assume it's usable here, though I don't know the code.
``` python
In [17]: latinlibrary.raw()
---------------------------------------------------------------------------
UnicodeDecodeError Traceback (most recent call last)
<ipython-input-17-8861dd5120c2> in <module>()
----> 1 latinlibrary.raw()
…
/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/encodings/utf_8.py in decode(input, errors)
14
15 def decode(input, errors='strict'):
---> 16 return codecs.utf_8_decode(input, errors, True)
17
18 class IncrementalEncoder(codecs.IncrementalEncoder):
UnicodeDecodeError: 'utf-8' codec can't decode byte 0x82 in position 32177: invalid start byte
```
Having full Unicode support is a must for us, obviously.
Finally, could this allow for sub-selection of texts? Or can this just choose the entirety of a corpus.
Thanks for this! I'm not trying to be too critical. Just want to understand the implementation's current limitations.
|
Have a look at this gist for some examples (and output) that I generated this morning. It shows access to words, sentences, fileids, and sub-selection of texts. "raw" and "sents" work on my machine—I'll try to figure out why.
As far as unicode support—do you have the most recent copy of the latin_text_latin_library corpus installed? I had converted all of the txt files to utf-8 and pushed the changes earlier in the month.
I am definitely going to do some more research on using custom sent/word tokenizers with this reader.
And nothing too critical—really want this to work! https://gist.github.com/diyclassics/5f4e7ff7963e255dd44278577ffcbf6e#file-gistfile1-txt
You're right, I don't have the latest vers of the corpus. That certainly is
why.
I can answer any question about the sentence tokenizer. Seems that we'll
need to override where it looks for it, from the nltk_data to our dir.
On Sunday, May 29, 2016, Patrick J. Burns [email protected] wrote:
> Have a look at this gist for some examples (and output) that I generated
> this morning. It shows access to words, sentences, fileids, and
> sub-selection of texts. "raw" and "sends" work on my machine—I'll try to
> figure out why.
>
> As far as unicode support—do you have the most recent copy of the
> latin_text_latin_library corpus installed? I had converted all of the txt
> files to utf-8 and pushed the changes earlier in the month.
>
> I am definitely going to do some more research on using custom sent/word
> tokenizers with this reader.
>
> And nothing too critical—really want this to work!
> https://gist.github.com/diyclassics/5f4e7ff7963e255dd44278577ffcbf6e#file-gistfile1-txt
>
> —
> You are receiving this because you authored the thread.
> Reply to this email directly, view it on GitHub
> https://github.com/cltk/cltk/issues/297#issuecomment-222370091, or mute
> the thread
> https://github.com/notifications/unsubscribe/ABxN-FIludiJCVlJsgMa5QSH20iIx8OLks5qGcHegaJpZM4IpUFX
> .
##
Kyle P. Johnson, Ph.D.
Natural language processing, data science, architecture
https://[email protected] |
https://kyle-p-johnson.com | https://github.com/kylepjohnson
Classical Language Toolkit, Founder
http://cltk.org | https://github.com/cltk/cltk
Looks like PlaintextCorpusReader can take the following arguments "sent_tokenizer" & "word_tokenizer"—just ran this successfully with the CLTK Latin sentence and word tokenizers. Makes sense for these to be the defaults for importing the latin corpora, no?
Btw—did updating the corpus take care of just the unicode problem or did that also affect the other functions [like .sents()]?
So I updated and that fixed the Unicode error. The sents() issue remains, as I do not have (nor do I want) the NLTK's english tokenizer. Error here: https://gist.github.com/kylepjohnson/6bbfef58f2eec14d222fca9b32f72fe6.
| 2016-08-05T19:57:28 |
|
cltk/cltk | 352 | cltk__cltk-352 | [
"351"
] | 43b085b75ae819951bdfdbc816b1f22f4abf78a8 | diff --git a/cltk/corpus/old_english/__init__.py b/cltk/corpus/old_english/__init__.py
new file mode 100644
| Importing CorpusImporter raises error
If I run: `from cltk.corpus.utils.importer import CorpusImporter`
I get:
```
Corpus not found. Please check that the Latin Library is installed in CLTK_DATA.
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python3.5/site-packages/cltk/corpus/utils/importer.py", line 14, in <module>
from cltk.corpus.old_english.corpora import OLD_ENGLISH_CORPORA
ImportError: No module named 'cltk.corpus.old_english'
```
This appears to be a regression introduced somewhere [between v0.1.39 and v0.1.41](https://github.com/cltk/cltk/compare/v0.1.39...v0.1.41). You can verify by using [`cltk_docker`](https://github.com/cltk/cltk_docker), if you change the `pip install --upgrade cltk` line in the `Dockerfile` to `pip install --upgrade cltk==0.1.39`, `install.py` will succeed, otherwise it will fail at the CorpusImporter import line (also happens if you specify `pip install --upgrade git+git://github.com/cltk/cltk.git` for the latest git revision).
| 2016-08-12T20:00:41 |
||
cltk/cltk | 371 | cltk__cltk-371 | [
"366",
"366"
] | ed17f360f1fe08004a474a4d51cd58db9eb63791 | diff --git a/cltk/corpus/latin/__init__.py b/cltk/corpus/latin/__init__.py
--- a/cltk/corpus/latin/__init__.py
+++ b/cltk/corpus/latin/__init__.py
@@ -17,7 +17,7 @@
# Would like to have this search through a CLTK_DATA environment variable
# Better to use something like make_cltk_path in cltk.utils.file_operations?
home = os.path.expanduser('~')
-cltk_path = os.path.join(home, 'CLTK_DATA')
+cltk_path = os.path.join(home, 'cltk_data')
word_tokenizer = WordTokenizer('latin')
| Inconsistent case of data directory
[The latin module](https://github.com/cltk/cltk/blob/36b6fe7aa23a8c4f41ef694ddb64b4b02534146c/cltk/corpus/latin/__init__.py#L20) tries to load the latin library from "~/CLTK_DATA", which fails on a case-sensitive filesystem.
All other modules seem to use "~/cltk_data", so that should be standardized.
(It would probably be best if the path was factored out and also configurable, but fixing at least that one inconsistency lets me actually import stuff without errors.)
Inconsistent case of data directory
[The latin module](https://github.com/cltk/cltk/blob/36b6fe7aa23a8c4f41ef694ddb64b4b02534146c/cltk/corpus/latin/__init__.py#L20) tries to load the latin library from "~/CLTK_DATA", which fails on a case-sensitive filesystem.
All other modules seem to use "~/cltk_data", so that should be standardized.
(It would probably be best if the path was factored out and also configurable, but fixing at least that one inconsistency lets me actually import stuff without errors.)
| Stefan, thanks for this. We can fix this immediately. Would you like to make the PR?
Stefan, thanks for this. We can fix this immediately. Would you like to make the PR?
| 2016-08-29T13:39:37 |
|
cltk/cltk | 372 | cltk__cltk-372 | [
"2"
] | 98b29950b4748e056a2ed791f86e4e1fa82be7bb | diff --git a/cltk/tokenize/latin_exceptions.py b/cltk/tokenize/latin_exceptions.py
--- a/cltk/tokenize/latin_exceptions.py
+++ b/cltk/tokenize/latin_exceptions.py
@@ -13,126 +13,40 @@
st_exceptions = []
# quisque
-que_exceptions += ['quisque', 'quidque', 'quicque', 'quodque', 'cuiusque', 'cuique',
- 'quemque', 'quoque', 'quique', 'quaeque', 'quorumque', 'quarumque',
- 'quibusque', 'quosque', 'quasque']
+que_exceptions += ['quisque', 'quidque', 'quicque', 'quodque', 'cuiusque', 'cuique', 'quemque', 'quoque', 'quique', 'quaeque', 'quorumque', 'quarumque', 'quibusque', 'quosque', 'quasque']
# uterque
-que_exceptions += ['uterque', 'utraque', 'utrumque', 'utriusque', 'utrique', 'utrumque',
- 'utramque', 'utroque', 'utraque', 'utrique', 'utraeque', 'utrorumque',
- 'utrarumque', 'utrisque', 'utrosque', 'utrasque']
+que_exceptions += ['uterque', 'utraque', 'utrumque', 'utriusque', 'utrique', 'utrumque', 'utramque', 'utroque', 'utraque', 'utrique', 'utraeque', 'utrorumque', 'utrarumque', 'utrisque', 'utrosque', 'utrasque']
# quiscumque
-que_exceptions += ['quicumque', 'quidcumque', 'quodcumque', 'cuiuscumque', 'cuicumque',
- 'quemcumque', 'quamcumque', 'quocumque', 'quacumque', 'quicumque',
- 'quaecumque', 'quorumcumque', 'quarumcumque', 'quibuscumque',
- 'quoscumque', 'quascumque']
+que_exceptions += ['quicumque', 'quidcumque', 'quodcumque', 'cuiuscumque', 'cuicumque', 'quemcumque', 'quamcumque', 'quocumque', 'quacumque', 'quicumque', 'quaecumque', 'quorumcumque', 'quarumcumque', 'quibuscumque', 'quoscumque', 'quascumque']
# unuscumque
-que_exceptions += ['unusquisque', 'unaquaeque', 'unumquodque', 'unumquidque',
- 'uniuscuiusque', 'unicuique', 'unumquemque', 'unamquamque', 'unoquoque',
- 'unaquaque']
+que_exceptions += ['unusquisque', 'unaquaeque', 'unumquodque', 'unumquidque', 'uniuscuiusque', 'unicuique', 'unumquemque', 'unamquamque', 'unoquoque', 'unaquaque']
# plerusque
-que_exceptions += ['plerusque', 'pleraque', 'plerumque', 'plerique', 'pleraeque',
- 'pleroque', 'pleramque', 'plerorumque', 'plerarumque', 'plerisque',
- 'plerosque', 'plerasque']
+que_exceptions += ['plerusque', 'pleraque', 'plerumque', 'plerique', 'pleraeque', 'pleroque', 'pleramque', 'plerorumque', 'plerarumque', 'plerisque', 'plerosque', 'plerasque']
# misc
-que_exceptions += ['absque', 'abusque', 'adaeque', 'adusque', 'aeque', 'antique', 'atque',
- 'circumundique', 'conseque', 'cumque', 'cunque', 'denique', 'deque',
- 'donique', 'hucusque', 'inique', 'inseque', 'itaque', 'longinque',
- 'namque', 'neque', 'oblique', 'peraeque', 'praecoque', 'propinque',
- 'qualiscumque', 'quandocumque', 'quandoque', 'quantuluscumque',
- 'quantumcumque', 'quantuscumque', 'quinque', 'quocumque',
- 'quomodocumque', 'quomque', 'quotacumque', 'quotcumque',
- 'quotienscumque', 'quotiensque', 'quotusquisque', 'quousque', 'relinque',
- 'simulatque', 'torque', 'ubicumque', 'ubique', 'undecumque', 'undique',
- 'usque', 'usquequaque', 'utcumque', 'utercumque', 'utique', 'utrimque',
- 'utrique', 'utriusque', 'utrobique', 'utrubique']
+que_exceptions += ['absque', 'abusque', 'adaeque', 'adusque', 'aeque', 'antique', 'atque', 'circumundique', 'conseque', 'cumque', 'cunque', 'denique', 'deque', 'donique', 'hucusque', 'inique', 'inseque', 'itaque', 'longinque', 'namque', 'neque', 'oblique', 'peraeque', 'praecoque', 'propinque', 'qualiscumque', 'quandocumque', 'quandoque', 'quantuluscumque', 'quantumcumque', 'quantuscumque', 'quinque', 'quocumque', 'quomodocumque', 'quomque', 'quotacumque', 'quotcumque', 'quotienscumque', 'quotiensque', 'quotusquisque', 'quousque', 'relinque', 'simulatque', 'torque', 'ubicumque', 'ubique', 'undecumque', 'undique', 'usque', 'usquequaque', 'utcumque', 'utercumque', 'utique', 'utrimque', 'utrique', 'utriusque', 'utrobique', 'utrubique']
-ne_exceptions += ['absone', 'acharne', 'acrisione', 'acumine', 'adhucine', 'adsuetudine',
- 'aeetine', 'aeschynomene', 'aesone', 'agamemnone', 'agmine', 'albane',
- 'alcyone', 'almone', 'alsine', 'amasene', 'ambitione', 'amne', 'amoene',
- 'amymone', 'anadyomene', 'andrachne', 'anemone', 'aniene', 'anne',
- 'antigone', 'aparine', 'apolline', 'aquilone', 'arachne', 'arne',
- 'arundine', 'ascanione', 'asiane', 'asine', 'aspargine', 'babylone',
- 'barine', 'bellone', 'belone', 'bene', 'benigne', 'bipenne', 'bizone',
- 'bone', 'bubone', 'bulbine', 'cacumine', 'caligine', 'calymne', 'cane',
- 'carcine', 'cardine', 'carmine', 'catacecaumene', 'catone', 'cerne',
- 'certamine', 'chalbane', 'chamaedaphne', 'chamaemyrsine', 'chaone',
- 'chione', 'christiane', 'clymene', 'cognomine', 'commagene', 'commune',
- 'compone', 'concinne', 'condicione', 'condigne', 'cone', 'confine',
- 'consone', 'corone', 'crastine', 'crepidine', 'crimine', 'crine',
- 'culmine', 'cupidine', 'cyane', 'cydne', 'cyllene', 'cyrene', 'daphne',
- 'depone', 'desine', 'dicione', 'digne', 'dine', 'dione', 'discrimine',
- 'diutine', 'dracone', 'dulcedine', 'elatine', 'elephantine', 'elleborine',
- 'epidamne', 'erigone', 'euadne', 'euphrone', 'euphrosyne', 'examine',
- 'faune', 'femine', 'feminine', 'ferrugine', 'fine', 'flamine', 'flumine',
- 'formidine', 'fragmine', 'fraterne', 'fulmine', 'fune', 'germane',
- 'germine', 'geryone', 'gorgone', 'gramine', 'grandine', 'haecine',
- 'halcyone', 'hammone', 'harundine', 'hedone', 'helene', 'helxine',
- 'hermione', 'heroine', 'hesione', 'hicine', 'hicne', 'hierabotane',
- 'hippocrene', 'hispane', 'hodierne', 'homine', 'hominesne', 'hortamine',
- 'hucine', 'humane', 'hunccine', 'huncine', 'iasione', 'iasone', 'igne',
- 'imagine', 'immane', 'immune', 'impoene', 'impone', 'importune', 'impune',
- 'inane', 'inconcinne', 'indagine', 'indigne', 'inferne', 'inguine',
- 'inhumane', 'inpone', 'inpune', 'insane', 'insigne', 'inurbane', 'ismene',
- 'istucine', 'itone', 'iuuene', 'karthagine', 'labiene', 'lacedaemone',
- 'lanugine', 'latine', 'legione', 'lene', 'lenone', 'libidine', 'limine',
- 'limone', 'lumine', 'magne', 'maligne', 'mane', 'margine', 'marone',
- 'masculine', 'matutine', 'medicamine', 'melpomene', 'memnone', 'mesene',
- 'messene', 'misene', 'mitylene', 'mnemosyne', 'moderamine', 'moene',
- 'mone', 'mortaline', 'mucrone', 'munimine', 'myrmidone', 'mytilene',
- 'necne', 'neptune', 'nequene', 'nerine', 'nocturne', 'nomine', 'nonne',
- 'nullane', 'numine', 'nuncine', 'nyctimene', 'obscene', 'obsidione',
- 'oenone', 'omine', 'omne', 'oppone', 'opportune', 'ordine', 'origine',
- 'orphne', 'oxymyrsine', 'paene', 'pallene', 'pane', 'paraetacene',
- 'patalene', 'pectine', 'pelagine', 'pellene', 'pene', 'perbene',
- 'perbenigne', 'peremne', 'perenne', 'perindigne', 'peropportune',
- 'persephone', 'phryne', 'pirene', 'pitane', 'plane', 'pleione', 'plene',
- 'pone', 'praefiscine', 'prasiane', 'priene', 'priuigne', 'procne',
- 'proditione', 'progne', 'prone', 'propone', 'pulmone', 'pylene', 'pyrene',
- 'pythone', 'ratione', 'regione', 'religione', 'remane', 'retine', 'rhene',
- 'rhododaphne', 'robigine', 'romane', 'roxane', 'rubigine', 'sabine',
- 'sane', 'sanguine', 'saturne', 'seditione', 'segne', 'selene', 'semine',
- 'semiplene', 'sene', 'sepone', 'serene', 'sermone', 'serrane', 'siccine',
- 'sicine', 'sine', 'sithone', 'solane', 'sollemne', 'somne', 'sophene',
- 'sperne', 'spiramine', 'stamine', 'statione', 'stephane', 'sterne',
- 'stramine', 'subpone', 'subtegmine', 'subtemine', 'sulmone', 'superne',
- 'supine', 'suppone', 'susiane', 'syene', 'tantane', 'tantine', 'taprobane',
- 'tegmine', 'telamone', 'temne', 'temone', 'tene', 'testudine', 'theophane',
- 'therone', 'thyone', 'tiberine', 'tibicine', 'tiburne', 'tirone',
- 'tisiphone', 'torone', 'transitione', 'troiane', 'turbine', 'turne',
- 'tyrrhene', 'uane', 'uelamine', 'uertigine', 'uesane', 'uimine', 'uirgine',
- 'umbone', 'unguine', 'uolumine', 'uoragine', 'urbane', 'uulcane', 'zone']
+ne_exceptions += ['absone', 'acharne', 'acrisione', 'acumine', 'adhucine', 'adsuetudine', 'aeetine', 'aeschynomene', 'aesone', 'agamemnone', 'agmine', 'albane', 'alcyone', 'almone', 'alsine', 'amasene', 'ambitione', 'amne', 'amoene', 'amymone', 'anadyomene', 'andrachne', 'anemone', 'aniene', 'anne', 'antigone', 'aparine', 'apolline', 'aquilone', 'arachne', 'arne', 'arundine', 'ascanione', 'asiane', 'asine', 'aspargine', 'babylone', 'barine', 'bellone', 'belone', 'bene', 'benigne', 'bipenne', 'bizone', 'bone', 'bubone', 'bulbine', 'cacumine', 'caligine', 'calymne', 'cane', 'carcine', 'cardine', 'carmine', 'catacecaumene', 'catone', 'cerne', 'certamine', 'chalbane', 'chamaedaphne', 'chamaemyrsine', 'chaone', 'chione', 'christiane', 'clymene', 'cognomine', 'commagene', 'commune', 'compone', 'concinne', 'condicione', 'condigne', 'cone', 'confine', 'consone', 'corone', 'crastine', 'crepidine', 'crimine', 'crine', 'culmine', 'cupidine', 'cyane', 'cydne', 'cyllene', 'cyrene', 'daphne', 'depone', 'desine', 'dicione', 'digne', 'dine', 'dione', 'discrimine', 'diutine', 'dracone', 'dulcedine', 'elatine', 'elephantine', 'elleborine', 'epidamne', 'erigone', 'euadne', 'euphrone', 'euphrosyne', 'examine', 'faune', 'femine', 'feminine', 'ferrugine', 'fine', 'flamine', 'flumine', 'formidine', 'fragmine', 'fraterne', 'fulmine', 'fune', 'germane', 'germine', 'geryone', 'gorgone', 'gramine', 'grandine', 'haecine', 'halcyone', 'hammone', 'harundine', 'hedone', 'helene', 'helxine', 'hermione', 'heroine', 'hesione', 'hicine', 'hicne', 'hierabotane', 'hippocrene', 'hispane', 'hodierne', 'homine', 'hominesne', 'hortamine', 'hucine', 'humane', 'hunccine', 'huncine', 'iasione', 'iasone', 'igne', 'imagine', 'immane', 'immune', 'impoene', 'impone', 'importune', 'impune', 'inane', 'inconcinne', 'indagine', 'indigne', 'inferne', 'inguine', 'inhumane', 'inpone', 'inpune', 'insane', 'insigne', 'inurbane', 'ismene', 'istucine', 'itone', 'iuuene', 'karthagine', 'labiene', 'lacedaemone', 'lanugine', 'latine', 'legione', 'lene', 'lenone', 'libidine', 'limine', 'limone', 'lumine', 'magne', 'maligne', 'mane', 'margine', 'marone', 'masculine', 'matutine', 'medicamine', 'melpomene', 'memnone', 'mesene', 'messene', 'misene', 'mitylene', 'mnemosyne', 'moderamine', 'moene', 'mone', 'mortaline', 'mucrone', 'munimine', 'myrmidone', 'mytilene', 'necne', 'neptune', 'nequene', 'nerine', 'nocturne', 'nomine', 'nonne', 'nullane', 'numine', 'nuncine', 'nyctimene', 'obscene', 'obsidione', 'oenone', 'omine', 'omne', 'oppone', 'opportune', 'ordine', 'origine', 'orphne', 'oxymyrsine', 'paene', 'pallene', 'pane', 'paraetacene', 'patalene', 'pectine', 'pelagine', 'pellene', 'pene', 'perbene', 'perbenigne', 'peremne', 'perenne', 'perindigne', 'peropportune', 'persephone', 'phryne', 'pirene', 'pitane', 'plane', 'pleione', 'plene', 'pone', 'praefiscine', 'prasiane', 'priene', 'priuigne', 'procne', 'proditione', 'progne', 'prone', 'propone', 'pulmone', 'pylene', 'pyrene', 'pythone', 'ratione', 'regione', 'religione', 'remane', 'retine', 'rhene', 'rhododaphne', 'robigine', 'romane', 'roxane', 'rubigine', 'sabine', 'sane', 'sanguine', 'saturne', 'seditione', 'segne', 'selene', 'semine', 'semiplene', 'sene', 'sepone', 'serene', 'sermone', 'serrane', 'siccine', 'sicine', 'sine', 'sithone', 'solane', 'sollemne', 'somne', 'sophene', 'sperne', 'spiramine', 'stamine', 'statione', 'stephane', 'sterne', 'stramine', 'subpone', 'subtegmine', 'subtemine', 'sulmone', 'superne', 'supine', 'suppone', 'susiane', 'syene', 'tantane', 'tantine', 'taprobane', 'tegmine', 'telamone', 'temne', 'temone', 'tene', 'testudine', 'theophane', 'therone', 'thyone', 'tiberine', 'tibicine', 'tiburne', 'tirone', 'tisiphone', 'torone', 'transitione', 'troiane', 'turbine', 'turne', 'tyrrhene', 'uane', 'uelamine', 'uertigine', 'uesane', 'uimine', 'uirgine', 'umbone', 'unguine', 'uolumine', 'uoragine', 'urbane', 'uulcane', 'zone']
-n_exceptions += ['aenean', 'agmen', 'alioquin', 'an', 'attamen', 'carmen', 'certamen', 'cognomen', 'crimen', 'dein',
- 'discrimen', 'en', 'epitheton', 'exin', 'flumen', 'forsan', 'forsitan', 'fulmen', 'iason', 'in',
- 'limen', 'liquamen', 'lumen', 'nomen', 'non', 'numen', 'omen', 'orion', 'quin', 'semen', 'specimen',
- 'tamen', 'titan']
+n_exceptions += ['aenean', 'agmen', 'alioquin', 'an', 'attamen', 'carmen', 'certamen', 'cognomen', 'crimen', 'dein', 'discrimen', 'en', 'epitheton', 'exin', 'flumen', 'forsan', 'forsitan', 'fulmen', 'iason', 'in', 'limen', 'liquamen', 'lumen', 'nomen', 'non', 'numen', 'omen', 'orion', 'quin', 'semen', 'specimen', 'tamen', 'titan']
+
+# English words; this list added to better handle English header, navigation, etc. in plaintext files of the Latin Library corpus.
+n_exceptions += ['alcuin', 'caen', 'christian', 'chronicon', 'châtillon', 'claudian', 'john', 'justin', 'latin', 'lucan', 'martin', 'novatian', 'quintilian', 'roman', 'tertullian']
-ue_exceptions += ['agaue', 'ambigue', 'assidue', 'aue', 'boue', 'breue', 'calue', 'caue',
- 'ciue', 'congrue', 'contigue', 'continue', 'curue', 'exigue', 'exue',
- 'fatue', 'faue', 'fue', 'furtiue', 'gradiue', 'graue', 'ignaue',
- 'incongrue', 'ingenue', 'innocue', 'ioue', 'lasciue', 'leue', 'moue',
- 'mutue', 'naue', 'neue', 'niue', 'perexigue', 'perspicue', 'pingue',
- 'praecipue', 'praegraue', 'prospicue', 'proterue', 'remoue', 'resolue',
- 'saeue', 'salue', 'siue', 'solue', 'strenue', 'sue', 'summoue',
- 'superflue', 'supplicue', 'tenue', 'uiue', 'ungue', 'uoue']
+ue_exceptions += ['agaue', 'ambigue', 'assidue', 'aue', 'boue', 'breue', 'calue', 'caue', 'ciue', 'congrue', 'contigue', 'continue', 'curue', 'exigue', 'exue', 'fatue', 'faue', 'fue', 'furtiue', 'gradiue', 'graue', 'ignaue', 'incongrue', 'ingenue', 'innocue', 'ioue', 'lasciue', 'leue', 'moue', 'mutue', 'naue', 'neue', 'niue', 'perexigue', 'perspicue', 'pingue', 'praecipue', 'praegraue', 'prospicue', 'proterue', 'remoue', 'resolue', 'saeue', 'salue', 'siue', 'solue', 'strenue', 'sue', 'summoue', 'superflue', 'supplicue', 'tenue', 'uiue', 'ungue', 'uoue']
-ve_exceptions += ['agave', 'ave', 'bove', 'breve', 'calve', 'cave', 'cive', 'curve', 'fave',
- 'furtive', 'gradive', 'grave', 'ignave', 'iove', 'lascive', 'leve', 'move',
- 'nave', 'neve', 'nive', 'praegrave', 'prospicve', 'proterve', 'remove',
- 'resolve', 'saeve', 'salve', 'sive', 'solve', 'summove', 'vive', 'vove']
+ve_exceptions += ['agave', 'ave', 'bove', 'breve', 'calve', 'cave', 'cive', 'curve', 'fave', 'furtive', 'gradive', 'grave', 'ignave', 'iove', 'lascive', 'leve', 'move', 'nave', 'neve', 'nive', 'praegrave', 'prospicve', 'proterve', 'remove', 'resolve', 'saeve', 'salve', 'sive', 'solve', 'summove', 'vive', 'vove']
-st_exceptions += ['abest', 'adest', 'ast', 'deest', 'est', 'inest', 'interest', 'post', 'potest', 'prodest', 'subest',
- 'superest']
+st_exceptions += ['abest', 'adest', 'ast', 'deest', 'est', 'inest', 'interest', 'post', 'potest', 'prodest', 'subest', 'superest']
latin_exceptions = list(set(que_exceptions
- + ne_exceptions
- + n_exceptions
- + ue_exceptions
- + ve_exceptions
- + st_exceptions
- ))
+ + ne_exceptions
+ + n_exceptions
+ + ue_exceptions
+ + ve_exceptions
+ + st_exceptions
+ ))
| Dev
dev branch merge for training set fix
| 2016-08-29T20:52:23 |
||
cltk/cltk | 399 | cltk__cltk-399 | [
"46"
] | 82a2bfd55416756fa1df728aad248a6dcab9b2cd | diff --git a/cltk/corpus/latin/corpora.py b/cltk/corpus/latin/corpora.py
--- a/cltk/corpus/latin/corpora.py
+++ b/cltk/corpus/latin/corpora.py
@@ -88,5 +88,9 @@
{'location': 'remote',
'type': 'text',
'name': 'latin_text_corpus_grammaticorum_latinorum',
- 'origin': 'https://github.com/cltk/latin_text_corpus_grammaticorum_latinorum.git'}
+ 'origin': 'https://github.com/cltk/latin_text_corpus_grammaticorum_latinorum.git'},
+ {'location': 'remote',
+ 'type': 'text',
+ 'name': 'latin_text_poeti_ditalia',
+ 'origin': 'https://github.com/cltk/latin_text_poeti_ditalia.git'}
]
| Compile Poeti d’Italia for CLTK
http://www.mqdq.it/mqdq/poetiditalia/indice_autori_alfa.jsp?scelta=AZ&path=metri_opere
| This site will be challenging to crawl. I don't want to invest the time right now to get this data.
That said, we should keep this site in mind as a good project for a non-programmer to essentially do a copy-paste walk-through of the site, making an index along the way. There's a lot of unusual stuff in here.
I agree.. lots of unusual material and with metrical patterns! My only
concern is I don't see any open access statement of cc license, only "all
rights reserved", what shall we do? Also I don't know if the site is
mulfunctioning, but i only could get an index in pdf both in "deoque" and
in "poeti di italia". In the latter some texts can be seen and eventually
copy pasted as you suggest...
2014-10-14 12:11 GMT-03:00 Kyle P. Johnson [email protected]:
> This site will be challenging to crawl. I don't want to invest the time
> right now to get this data.
>
> That said, we should keep this site in mind as a good project for a
> non-programmer to essentially do a copy-paste walk-through of the site,
> making an index along the way. There's a lot of unusual stuff in here.
>
> —
> Reply to this email directly or view it on GitHub
> https://github.com/kylepjohnson/cltk/issues/46#issuecomment-59061742.
@marpozzi Would you have any interest in writing to the owner of these sites at [email protected] ? I think this would be a good first step.
@souravsingh has obtained permission for Poeti d’Italia. Assigning to him.
@souravsingh How much work to finish this issue on the Poeti d’Italia corpus?
The work is being done in the repository here-https://github.com/souravsingh/latin_text_poeti_ditalia
Pinging @souravsingh Is this corpus ready to move over? I took a look again and it looks pretty good
Sorry for being inactive @kylepjohnson. I need to properly format the JSON files before it is pushed to upstream branch. Might take a day or two.
Ok, thank you for the fast response. Get it as good as you can, but this doesn't need to 100% perfect.
@kylepjohnson I have transferred the repository to cltk latin team.
| 2016-10-08T09:15:45 |
|
cltk/cltk | 400 | cltk__cltk-400 | [
"266"
] | 82a2bfd55416756fa1df728aad248a6dcab9b2cd | diff --git a/cltk/stop/old_english/__init__.py b/cltk/stop/old_english/__init__.py
new file mode 100644
--- /dev/null
+++ b/cltk/stop/old_english/__init__.py
@@ -0,0 +1 @@
+
diff --git a/cltk/stop/old_english/stops.py b/cltk/stop/old_english/stops.py
new file mode 100644
--- /dev/null
+++ b/cltk/stop/old_english/stops.py
@@ -0,0 +1,232 @@
+"""This stopword list is adapted from the
+Introduction to Old English website at ``https://lrc.la.utexas.edu/eieol/engol``.
+"""
+
+__author__ = 'Sourav Singh <[email protected]>'
+__license__ = 'GPL License.'
+
+STOPS_LIST = ['and',
+ 'on',
+ 'þonne',
+ 'wið',
+ 'to',
+ 'þæt',
+ 'þe',
+ 'ne',
+ 'ic',
+ 'me',
+ 'heo',
+ 'him',
+ 'he',
+ 'swa'
+ 'þis'
+ 'mid'
+ 'þu'
+ 'ofer'
+ 'his'
+ 'þriwa'
+ 'seo'
+ 'hit'
+ 'se'
+ 'þas'
+ 'cweð'
+ 'þæs'
+ 'in'
+ 'sy'
+ 'ða'
+ 'ðy'
+ 'ær'
+ 'ðonne'
+ 'næfre'
+ 'þone'
+ 'ge'
+ 'ðas'
+ 'þære'
+ 'þam'
+ 'is'
+ 'of'
+ 'gif'
+ 'þæm'
+ 'nu'
+ 'under'
+ 'wiþ'
+ 'geond'
+ 'æfter'
+ 'ðis'
+ 'do'
+ 'hwæt'
+ 'her'
+ 'þurh'
+ 'þus'
+ 'lytel'
+ 'æt'
+ 'ðin'
+ 'willian'
+ 'cume'
+ 'þeos'
+ 'þara'
+ 'are'
+ 'cuman'
+ 'com'
+ 'ænig'
+ 'þon'
+ 'for'
+ 'us'
+ 'ac'
+ 'bot'
+ 'ende'
+ 'wæs',
+ 'wǣre',
+ 'wes',
+ 'wǣron',
+ 'wǣren',
+ 'wesað',
+ 'ic',
+ 'wit',
+ 'wē',
+ 'mīn',
+ 'uncer',
+ 'ūser',
+ 'ūre',
+ 'mē',
+ 'unc',
+ 'ūs',
+ 'mec',
+ 'uncit',
+ 'ūsic',
+ 'ðū',
+ 'git',
+ 'gē',
+ 'ðīn',
+ 'incer',
+ 'ēower',
+ 'ēowre',
+ 'ðē',
+ 'inc',
+ 'ēow',
+ 'ðec',
+ 'incit',
+ 'ēowic',
+ 'hē',
+ 'hēo',
+ 'hīe',
+ 'hit',
+ 'hyt',
+ 'hī',
+ 'hȳ',
+ 'hire',
+ 'hira',
+ 'heora',
+ 'hiera',
+ 'heom',
+ 'hine',
+ 'nǣr',
+ 'nǣfre',
+ 'nǣnig',
+ 'nolde',
+ 'noldon',
+ 'be',
+ 'beforan',
+ 'betweox',
+ 'for',
+ 'from',
+ 'fram',
+ 'mid',
+ 'tō',
+ 'geond',
+ 'oð',
+ 'þurh',
+ 'ofer',
+ 'under',
+ 'bēo',
+ 'bist',
+ 'biþ',
+ 'bēoþ',
+ 'bēon',
+ 'ēom',
+ 'sīe',
+ 'eart',
+ 'sī',
+ 'is',
+ 'sēo',
+ 'sindon',
+ 'sint',
+ 'nēom',
+ 'neart',
+ 'nis',
+ 'sīo',
+ 'ðæt',
+ 'tæt',
+ 'ðæs',
+ 'ðǣre',
+ 'ðǣm',
+ 'ðām',
+ 'ðone',
+ 'ðā',
+ 'ðȳ',
+ 'ðē',
+ 'ðon',
+ 'ðāra',
+ 'ðǣra',
+ 'ðes',
+ 'ðēos',
+ 'ðisse',
+ 'ðeosse',
+ 'ðises',
+ 'ðisses',
+ 'ðisum',
+ 'ðissum',
+ 'ðisne',
+ 'ðās',
+ 'ðīs',
+ 'ðȳs',
+ 'ðissa',
+ 'ðeossa',
+ 'ðeosum',
+ 'ðeossum',
+ 'twēgen',
+ 'twā',
+ 'tū',
+ 'twēgra',
+ 'twǣm',
+ 'þrīe',
+ 'þrēo',
+ 'þrēora',
+ 'þrīm',
+ 'endlefan',
+ 'twelf',
+ 'twēntig',
+ 'þrēotīene',
+ 'þrītig',
+ 'fēower',
+ 'fēowertīene',
+ 'fēowertig',
+ 'fīf',
+ 'fīftīene',
+ 'fīftig',
+ 'siex',
+ 'siextīene',
+ 'siextig',
+ 'seofon',
+ 'seofontīene',
+ 'seofontig',
+ 'eahta',
+ 'eahtatīene',
+ 'eahtatig',
+ 'nigon',
+ 'nigontīene',
+ 'nigontig',
+ 'tīen',
+ 'hund',
+ 'gā',
+ 'gǣst',
+ 'gǣð',
+ 'gāð',
+ 'gān',
+ 'gānde',
+ 'gangende',
+ 'gegān',
+ 'ēode',
+ 'ēodest',
+ 'ēodon',
+ 'ēoden']
| Make stopwords list for Old English
To generalize, I observe that there are different approaches to making stopword lists, based either on statistics (most common words, variously calculated) or grammar (definite and indefinite articles, pronouns, etc.) (or some combination).
In doing this ticket, I would like you to do a little research on whether there exist any good lists for OE. If there is one, let's just take it. If not, we can do a little more research about what's right.
| Now that the Old-English corpus is finished, I can start the work on this issue
Ok, go for it. Since I opened this issue, I have decided that I only want grammar-based stopwords. That is, no nouns, adjectives, verbs, adverbs, or participles, but only definite and indefinite articles (determiners), pronouns, particles/interjections, prepositions, and conjunctions.
Two steps: to build this list.
1. Look for someone else's Old English stopword list and copy it. We can discuss its pros and cons.
2. Take a Modern English stopword list and adjust for Old English.
The second step will certainly force you to acquaint yourself with the basics of the languages.
@kylepjohnson I can go with the second step for building the list
Please start with step 1., since you and I are not Old English experts. It's always better to use the work of a specialist.
I am not able to find a good list of stopwords for Old English online. I will ask some scholars on this matter
@kylepjohnson I have done a small bit of analysis to obtain a list of most common words from the corpus (I only took Metrical Charms for my testing purposes) - http://paste.ubuntu.com/23082398/
Further checking needs to be done manually for stopwords. I would like know your views on the list.
Ping @kylepjohnson
Thanks, Sourav. My dilemma is that I don't know what most of the words on the list mean. Let's forget about the corpus approach and just go by grammar. The following I know should be in our stops list:
- wesan: https://lrc.la.utexas.edu/eieol/engol/10#grammar_1060
- personal pronouns: https://lrc.la.utexas.edu/eieol/engol/20#grammar_1062
- ne and its contractions: https://lrc.la.utexas.edu/eieol/engol/20#grammar_1063
- prepositions: https://lrc.la.utexas.edu/eieol/engol/20#grammar_1064
- the verb beon: https://lrc.la.utexas.edu/eieol/engol/40#grammar_1072
- the verb eom: https://lrc.la.utexas.edu/eieol/engol/40#grammar_1073
- all misc. demonstratives and pronouns: https://lrc.la.utexas.edu/eieol/engol/50#grammar_1077
- numerals: https://lrc.la.utexas.edu/eieol/engol/50#grammar_1078
- the verb gan: https://lrc.la.utexas.edu/eieol/engol/60#grammar_1082
A few important notes:
1. Remember to get all declension forms for pronouns, demonstatives, etc
2. There are spelling inconsistencies with the eth ð with thorn þ -- I don't know what to do about these. We'll maybe need a converter like we do with J and V in Latin.
@kylepjohnson I created a small list of probable stop words by manually searching them from a dictionary- http://paste.ubuntu.com/23085680/
This second, dictionary-based approach is better. But I believe we still need the grammar-based approach I just wrote about. Let's perhaps use your latest list to supplement and improve our results.
I too believe that the grammar-based approach is better for stopword, but I wanted to get started somewhere. So I did a bit of analysis and after obtaining a list of common words, used a dictionary to check the word list generated in the first list.
Oh yes, sorry for continuing off with the approach. I didn't see the links that you gave until after an hour.
@souravsingh What's happening with Old English stop words?
You have a lot of old issues under your name. I think it's best that you let most of them go.
@kylepjohnson Sorry for being silent on the issue. I will send a PR for the issue ASAP
| 2016-10-08T11:49:30 |
|
cltk/cltk | 418 | cltk__cltk-418 | [
"417"
] | 688e5b74a90b39e4d6fcd5dd859963357704237c | diff --git a/cltk/lemmatize/latin/backoff.py b/cltk/lemmatize/latin/backoff.py
old mode 100644
new mode 100755
--- a/cltk/lemmatize/latin/backoff.py
+++ b/cltk/lemmatize/latin/backoff.py
@@ -214,6 +214,8 @@ def __init__(self, regexps=None, backoff=None):
"""
SequentialBackoffLemmatizer.__init__(self, backoff)
RegexpTagger.__init__(self, regexps, backoff)
+ self._check = re.compile('|'.join('(?:%s)' % r[0] for r in regexps))
+ self._regexs = [(re.compile(regexp), pattern,) for regexp, pattern in regexps]
def choose_lemma(self, tokens, index, history):
"""Use regular expressions for rules-based lemmatizing based on word endings;
@@ -224,11 +226,12 @@ def choose_lemma(self, tokens, index, history):
:param history: List with tokens that have already been lemmatized
:return: Str with concatenated lemma
"""
- for regexp, pattern in self._regexs:
- m = re.match(regexp, tokens[index])
- if m:
- return (m.group(1)) + pattern
-
+ if self._check.match(tokens[index]):
+ for regexp, pattern in self._regexs:
+ m = re.match(regexp, tokens[index])
+ if m:
+ return (m.group(1)) + pattern
+
class PPLemmatizer(RegexpLemmatizer):
"""Customization of the RegexpLemmatizer for Latin. The RegexpLemmatizer is
@@ -245,6 +248,7 @@ def __init__(self, regexps=None, pps=None, backoff=None):
# Note different compile to make use of principal parts dictionary structure; also, note
# that the PP dictionary has been set up so that principal parts match their traditional
# numbering, i.e. present stem is indexed as 1. The 0 index is used for the lemma.
+ self._check = re.compile('|'.join('(?:%s)' % r[0] for r in regexps))
self._regexs = [(re.compile(regexp), num) for regexp, num in
regexps]
self.pps = pps
@@ -259,16 +263,17 @@ def choose_lemma(self, tokens, index, history):
:param index: Int with current token
:param history: List with tokens that have already been lemmatized
:return: Str with index[0] from the dictionary value, see above about '0 index'
- """
- for regexp in self._regexs:
- m = re.match(regexp[0], tokens[index])
- if m:
- root = m.group(1)
- match = [lemma for (lemma, pp) in self.pps.items() if root == pp[regexp[1]]]
- if not match:
- pass
- else:
- return match[0] # Lemma is indexed at zero in PP dictionary
+ """
+ if self._check.match(tokens[index]):
+ for regexp in self._regexs:
+ m = re.match(regexp[0], tokens[index])
+ if m:
+ root = m.group(1)
+ match = [lemma for (lemma, pp) in self.pps.items() if root == pp[regexp[1]]]
+ if not match:
+ pass
+ else:
+ return match[0] # Lemma is indexed at zero in PP dictionary
class RomanNumeralLemmatizer(RegexpLemmatizer):
@@ -276,6 +281,7 @@ class RomanNumeralLemmatizer(RegexpLemmatizer):
def __init__(self, regexps=rn_patterns, backoff=None):
"""RomanNumeralLemmatizer"""
RegexpLemmatizer.__init__(self, regexps, backoff)
+ self._regexs = [(re.compile(regexp), pattern,) for regexp, pattern in regexps]
def choose_lemma(self, tokens, index, history):
"""Test case for customized rules-based improvements to lemmatizer using regex; differs
@@ -542,11 +548,12 @@ def _define_lemmatizer(self):
backoff1 = IdentityLemmatizer()
backoff2 = TrainLemmatizer(model=self.LATIN_OLD_MODEL, backoff=backoff1)
backoff3 = PPLemmatizer(regexps=self.latin_verb_patterns, pps=self.latin_pps, backoff=backoff2)
- backoff4 = UnigramLemmatizer(self.train_sents, backoff=backoff3)
+ backoff4 = UnigramLemmatizer(self.train_sents, backoff=backoff3)
backoff5 = RegexpLemmatizer(self.latin_misc_patterns, backoff=backoff4)
- backoff6 = TrainLemmatizer(model=self.LATIN_MODEL, backoff=backoff5)
+ backoff6 = TrainLemmatizer(model=self.LATIN_MODEL, backoff=backoff5)
backoff7 = BigramPOSLemmatizer(self.pos_train_sents, include=['cum'], backoff=backoff6)
- lemmatizer = backoff7
+ #lemmatizer = backoff7
+ lemmatizer = backoff6
return lemmatizer
def lemmatize(self, tokens):
| Regex lemmatizers in backoff.py run slowly
RegexLemmatizer (and related PPLemmatizer, RomanNumeralLemmatizer, etc.) run slowly because they iterate unnecessarily over both all unlemmatized tokens *and* all regex patterns. Yet the lemmatizer only returns a lemma in a small number of cases. Proposed solution: limit the number of iterations over regex patterns.
| 2016-11-09T20:20:59 |
||
cltk/cltk | 443 | cltk__cltk-443 | [
"417"
] | 91fbfbe5acf8265e074f6e5530c7fd615ee20dcd | diff --git a/cltk/lemmatize/latin/backoff.py b/cltk/lemmatize/latin/backoff.py
--- a/cltk/lemmatize/latin/backoff.py
+++ b/cltk/lemmatize/latin/backoff.py
@@ -475,8 +475,9 @@ class BackoffLatinLemmatizer(object):
### For comparison, there is also a TrainLemmatizer that replicates the
### original Latin lemmatizer from cltk.stem
"""
- def __init__(self, train):
+ def __init__(self, train, seed=3):
self.train = train
+ self.seed = seed
rel_path = os.path.join('~/cltk_data/latin/model/latin_models_cltk/lemmata/backoff')
path = os.path.expanduser(rel_path)
@@ -531,8 +532,9 @@ def __init__(self, train):
self.latin_pps = {}
print('The file %s is not available in cltk_data' % file)
- def _randomize_data(train):
+ def _randomize_data(train, seed):
import random
+ random.seed(seed)
random.shuffle(train)
pos_train_sents = train[:4000]
lem_train_sents = [[(item[0], item[1]) for item in sent] for sent in train]
@@ -541,7 +543,7 @@ def _randomize_data(train):
return pos_train_sents, train_sents, test_sents
- self.pos_train_sents, self.train_sents, self.test_sents = _randomize_data(self.train)
+ self.pos_train_sents, self.train_sents, self.test_sents = _randomize_data(self.train, self.seed)
def _define_lemmatizer(self):
backoff0 = None
@@ -551,7 +553,7 @@ def _define_lemmatizer(self):
backoff4 = UnigramLemmatizer(self.train_sents, backoff=backoff3)
backoff5 = RegexpLemmatizer(self.latin_misc_patterns, backoff=backoff4)
backoff6 = TrainLemmatizer(model=self.LATIN_MODEL, backoff=backoff5)
- backoff7 = BigramPOSLemmatizer(self.pos_train_sents, include=['cum'], backoff=backoff6)
+ #backoff7 = BigramPOSLemmatizer(self.pos_train_sents, include=['cum'], backoff=backoff6)
#lemmatizer = backoff7
lemmatizer = backoff6
return lemmatizer
| Regex lemmatizers in backoff.py run slowly
RegexLemmatizer (and related PPLemmatizer, RomanNumeralLemmatizer, etc.) run slowly because they iterate unnecessarily over both all unlemmatized tokens *and* all regex patterns. Yet the lemmatizer only returns a lemma in a small number of cases. Proposed solution: limit the number of iterations over regex patterns.
| 2017-02-06T15:30:09 |
||
cltk/cltk | 533 | cltk__cltk-533 | [
"531"
] | a80704e66fdce60a8fb5a95acfffc882bf3ade6d | diff --git a/cltk/tokenize/sentence.py b/cltk/tokenize/sentence.py
--- a/cltk/tokenize/sentence.py
+++ b/cltk/tokenize/sentence.py
@@ -15,7 +15,7 @@
'internal': (',', '·'),
'file': 'greek.pickle', },
'latin':
- {'external': ('.', '?', ':'),
+ {'external': ('.', '?', '!', ':'),
'internal': (',', ';'),
'file': 'latin.pickle', }}
| diff --git a/cltk/tests/test_tokenize.py b/cltk/tests/test_tokenize.py
--- a/cltk/tests/test_tokenize.py
+++ b/cltk/tests/test_tokenize.py
@@ -38,17 +38,16 @@ def setUp(self):
else:
corpus_importer.import_corpus('latin_models_cltk')
self.assertTrue(file_exists)
-
-
def test_sentence_tokenizer_latin(self):
"""Test tokenizing Latin sentences."""
- sentences = "Itaque cum M. Aurelio et P. Minidio et Cn. Cornelio ad apparationem balistarum et scorpionem reliquorumque tormentorum refectionem fui praesto et cum eis commoda accepi, quae cum primo mihi tribuisiti recognitionem, per sorosis commendationem servasti. Cum ergo eo beneficio essem obligatus, ut ad exitum vitae non haberem inopiae timorem, haec tibi scribere coepi, quod animadverti multa te aedificavisse et nunc aedificare, reliquo quoque tempore et publicorum et privatorum aedificiorum, pro amplitudine rerum gestarum ut posteris memoriae traderentur curam habiturum." # pylint: disable=line-too-long
- good_tokenized_sentences = ['Itaque cum M. Aurelio et P. Minidio et Cn. Cornelio ad apparationem balistarum et scorpionem reliquorumque tormentorum refectionem fui praesto et cum eis commoda accepi, quae cum primo mihi tribuisiti recognitionem, per sorosis commendationem servasti.', 'Cum ergo eo beneficio essem obligatus, ut ad exitum vitae non haberem inopiae timorem, haec tibi scribere coepi, quod animadverti multa te aedificavisse et nunc aedificare, reliquo quoque tempore et publicorum et privatorum aedificiorum, pro amplitudine rerum gestarum ut posteris memoriae traderentur curam habiturum.'] # pylint: disable=line-too-long
+ text = "O di inmortales! ubinam gentium sumus? in qua urbe vivimus? quam rem publicam habemus? Hic, hic sunt in nostro numero, patres conscripti, in hoc orbis terrae sanctissimo gravissimoque consilio, qui de nostro omnium interitu, qui de huius urbis atque adeo de orbis terrarum exitio cogitent! Hos ego video consul et de re publica sententiam rogo et, quos ferro trucidari oportebat, eos nondum voce volnero! Fuisti igitur apud Laecam illa nocte, Catilina, distribuisti partes Italiae, statuisti, quo quemque proficisci placeret, delegisti, quos Romae relinqueres, quos tecum educeres, discripsisti urbis partes ad incendia, confirmasti te ipsum iam esse exiturum, dixisti paulum tibi esse etiam nunc morae, quod ego viverem." # pylint: disable=line-too-long
+ target = ['O di inmortales!', 'ubinam gentium sumus?', 'in qua urbe vivimus?', 'quam rem publicam habemus?', 'Hic, hic sunt in nostro numero, patres conscripti, in hoc orbis terrae sanctissimo gravissimoque consilio, qui de nostro omnium interitu, qui de huius urbis atque adeo de orbis terrarum exitio cogitent!', 'Hos ego video consul et de re publica sententiam rogo et, quos ferro trucidari oportebat, eos nondum voce volnero!', 'Fuisti igitur apud Laecam illa nocte, Catilina, distribuisti partes Italiae, statuisti, quo quemque proficisci placeret, delegisti, quos Romae relinqueres, quos tecum educeres, discripsisti urbis partes ad incendia, confirmasti te ipsum iam esse exiturum, dixisti paulum tibi esse etiam nunc morae, quod ego viverem.'] # pylint: disable=line-too-long
tokenizer = TokenizeSentence('latin')
- tokenized_sentences = tokenizer.tokenize_sentences(sentences)
- self.assertEqual(tokenized_sentences, good_tokenized_sentences)
+ tokenized_sentences = tokenizer.tokenize_sentences(text)
+ self.assertEqual(tokenized_sentences, target)
+
'''
def test_sentence_tokenizer_greek(self):
@@ -61,6 +60,7 @@ def test_sentence_tokenizer_greek(self):
tokenized_sentences = tokenizer.tokenize_sentences(sentences)
self.assertEqual(len(tokenized_sentences), len(good_tokenized_sentences))
'''
+
def test_latin_word_tokenizer(self):
"""Test Latin-specific word tokenizer."""
| External punctuation stopped working on Latin sent tokenizer
Recently reviewing the tokenizer, and it is not capturing exclamation points. I'll look to see the NLTK has changed anything.
``` python
In [12]: text = """quam penitus maestas exedit cura medullas! ut tibi tunc toto
...: pectore sollicitae sensibus ereptis mens excidit! at ego certe cognoram
...: a parva virgine magnanimam. Mam. Aemilius ad castra venit."""
In [13]: tokenizer.tokenize_sentences(text)
Out[13]:
['quam penitus maestas exedit cura medullas! ut tibi tunc toto pectore sollicitae sensibus ereptis mens excidit! at ego certe cognoram a parva virgine magnanimam.',
'Mam. Aemilius ad castra venit.']
```
| 2017-05-26T20:19:08 |
|
cltk/cltk | 536 | cltk__cltk-536 | [
"534"
] | 9f887c1cc36a08383dc7260f49a4220a95bc23ad | diff --git a/cltk/corpus/arabic/alphabet.py b/cltk/corpus/arabic/alphabet.py
--- a/cltk/corpus/arabic/alphabet.py
+++ b/cltk/corpus/arabic/alphabet.py
@@ -1,12 +1,10 @@
-"""Arabic alphabet
-source 1 : pyarabic 'https://github.com/linuxscout/pyarabic'
-source 2 : arabicstemmer 'https://github.com/assem-ch/arabicstemmer/blob/master/algorithm/stemmer.sbl'
+"""
+ Arabic alphabet
+ source 1 : pyarabic 'https://github.com/linuxscout/pyarabic'
+ source 2 : arabicstemmer 'https://github.com/assem-ch/arabicstemmer/blob/master/algorithm/stemmer.sbl'
"""
__author__ = 'Lakhdar Benzahia <lakhdar[.]benzahia[at]gmail[.]com>'
-#Arabic digits from 0 to 9
-DIGITS = ['۰', '۱', '۲', '۳', '٤', '۵', '٦', '۷', '۸', '۹']
-
# Arabic letters
# Hamza letter
@@ -253,9 +251,7 @@
FULL_STOP = u'\u06d4'
BYTE_ORDER_MARK = u'\ufeff'
-# Diacritics
-SHORT_VOWELS = ['َ', 'ِ', 'ُ']
-
+#Diacritics
FATHATAN = u'\u064b'
DAMMATAN = u'\u064c'
KASRATAN = u'\u064d'
@@ -464,11 +460,6 @@
PUNCTUATION_MARKS = [COMMA, SEMICOLON, QUESTION]
-# The Persian Alphabet
-LONG_VOWELS = ['ا', 'و', 'ي']
-CONSONANTS = ['ب', 'ت', 'ث', 'ج', 'ح', 'خ', 'د', 'ذ', 'ر', 'ز', 'س', 'ش', 'ص', 'ض', 'ط', 'ظ', 'ع', 'غ', 'ف', 'ق', 'ک', 'ل', 'م', 'ن', 'ه']
-# Special Characters
-SPECIAL = ['ّ', 'ﻻ']
WESTERN_ARABIC_NUMERALS = ['0','1','2','3','4','5','6','7','8','9']
diff --git a/cltk/corpus/arabic/corpora.py b/cltk/corpus/arabic/corpora.py
old mode 100644
new mode 100755
--- a/cltk/corpus/arabic/corpora.py
+++ b/cltk/corpus/arabic/corpora.py
@@ -1,22 +1,24 @@
"""Arabic corpora available for download."""
+__author__ = 'Lakhdar Benzahia <lakhdar[.]benzahia[at]gmail[.]com>'
+
ARABIC_CORPORA = [
{'name': 'arabic_text_perseus',
- 'markup': 'xml',
- 'origin': 'https://github.com/LBenzahia/arabic_text_perseus',
+ 'markup':'xml',
+ 'origin': 'https://github.com/cltk/arabic_text_perseus',
'location': 'remote',
'type': 'text',
'RomanizationType': 'Buckwalter',
},
{'name': 'quranic-corpus',
- 'markup': 'xml',
- 'origin': 'https://github.com/LBenzahia/quranic-corpus',
+ 'markup':'xml',
+ 'origin': 'https://github.com/cltk/arabic_text_quranic_corpus',
'location': 'remote',
'type': 'text',
'RomanizationType': 'none',
},
{'name': 'quranic-corpus-morphology',
- 'origin': 'https://github.com/LBenzahia/quranic-corpus-morphology',
+ 'origin': 'https://github.com/cltk/arabic_morphology_quranic-corpus',
'location': 'remote',
'type': 'text',
'RomanizationType': 'Buckwalter',
| Move 3 Arabic corpora to CLTK organization
@LBenzahia : I have merged your corpus code for Arabic.
https://github.com/cltk/cltk/blob/master/cltk/corpus/arabic/corpora.py
Please transfer ownership of these three libraries to the `cltk` organization.
Then one of use can update the above `corpora.py`.
Thank you!
| 2017-06-04T23:29:18 |
||
cltk/cltk | 549 | cltk__cltk-549 | [
"435"
] | 5ba8dd7be9bda8127554d09e24f81650b9452c03 | diff --git a/cltk/corpus/greek/alphabet.py b/cltk/corpus/greek/alphabet.py
--- a/cltk/corpus/greek/alphabet.py
+++ b/cltk/corpus/greek/alphabet.py
@@ -7,6 +7,7 @@
'\u0391',# Α Greek Capital Letter Alpha
'\u0395', # Ε Greek Capital Letter Epsilon
'\u0397', # Η Greek Capital Letter Eta
+ '\u0370', # Ͱ Greek Capital Letter Heta
'\u0399', # Ι Greek Capital Letter Iota
'\u039f', # Ο Greek Capital Letter Omicron
'\u03a5', # Υ Greek Capital Letter Upsilon
@@ -160,6 +161,7 @@
'\u03b1', # α Greek Small Letter Alpha
'\u03b5', # ε Greek Small Letter Epsilon
'\u03b7', # η Greek Small Letter Eta
+'\u0371', # ͱ Greek Small Letter Heta
'\u03b9', # ι Greek Small Letter Iota
'\u03bf', # ο Greek Small Letter Omicron
'\u03c5', # υ Greek Small Letter Upsilon
@@ -356,6 +358,9 @@
LOWER_RHO_ROUGH = '\u1fe5' # ῥ Greek Small Letter Rho With Dasia
+UPPER_RHO = '\u03a1' # Ρ Greek Capital Letter Rho
+
+UPPER_RHO_ROUGH = '\u1fec' # Ῥ Greek Capital Letter Rho with Dasia
# Consonants --------------------------
@@ -363,9 +368,13 @@
'\u0392', # Β Greek Capital Letter Beta
'\u0393', # Γ Greek Capital Letter Gamma
'\u0394', # Δ Greek Capital Letter Delta
+'\u03dc', # Ϝ Greek Letter Digamma
+'\u0376', # Ͷ Greek Capital Letter Pamphylian Digamma
'\u0396', # Ζ Greek Capital Letter Zeta
'\u0398', # Θ Greek Capital Letter Theta
'\u039a', # Κ Greek Capital Letter Kappa
+'\u03d8', # Ϙ Greek Letter Archaic Koppa
+'\u03de', # Ϟ Greek Letter Koppa
'\u039b', # Λ Greek Capital Letter Lamda
'\u039c', # Μ Greek Capital Letter Mu
'\u039d', # Ν Greek Capital Letter Nu
@@ -373,6 +382,11 @@
'\u03a0', # Π Greek Capital Letter Pi
'\u03a1', # Ρ Greek Capital Letter Rho
'\u03a3', # Σ Greek Capital Letter Sigma
+'\u03da', # Ϛ Greek Letter Stigma
+'\u03e0', # Ϡ Greek Letter Sampi
+'\u0372', # Ͳ Greek Capital Letter Archaic Sampi
+'\u03f6', # Ϻ Greek Capital Letter San
+'\u03f7', # Ϸ Greek Capital Letter Sho
'\u03a4', # Τ Greek Capital Letter Tau
'\u03a6', # Φ Greek Capital Letter Phi
'\u03a7', # Χ Greek Capital Letter Chi
@@ -383,9 +397,13 @@
'\u03b2', # β Greek Small Letter Beta
'\u03b3', # γ Greek Small Letter Gamma
'\u03b4', # δ Greek Small Letter Delta
+'\u03dd', # ϝ Greek Small Letter Digamma
+'\u0377', # ͷ Greek Small Letter Pamphylian Digamma
'\u03b6', # ζ Greek Small Letter Zeta
'\u03b8', # θ Greek Small Letter Theta
'\u03ba', # κ Greek Small Letter Kappa
+'\u03d9', # ϙ Greek Small Letter Archaic Koppa
+'\u03df', # ϟ Greek Small Letter Koppa
'\u03bb', # λ Greek Small Letter Lamda
'\u03bc', # μ Greek Small Letter Mu
'\u03bd', # ν Greek Small Letter Nu
@@ -393,12 +411,48 @@
'\u03c0', # π Greek Small Letter Pi
'\u03c1', # ρ Greek Small Letter Rho
'\u03c3', # σ Greek Small Letter Sigma
+'\u03c2', # ς Greek Small Letter Final Sigma
+'\u03db', # ϛ Greek Small Letter Stigma
+'\u03e1', # ϡ Greek Small Letter Sampi
+'\u0373', # ͳ Greek Small Letter Archaic Sampi
+'\u03fb', # ϻ Greek Small Letter San
+'\u03f8', # ϸ Greek Small Letter Sho
'\u03c4', # τ Greek Small Letter Tau
-'\u03d5', # ϕ Greek Phi Symbol
+'\u03c6', # φ Greek Small Letter Phi
'\u03c7', # χ Greek Small Letter Chi
'\u03c8' # ψ Greek Small Letter Psi
]
+# Numeral Signs and Accents
+
+NUMERAL_SIGNS = [
+ '\u0374', # ʹ Greek Numeral Sign
+ '\u0375' # ͵ Greek Lower Numeral Sign
+]
+
+ACCENTS = [
+ '\u0376', # ͺ Greek Ypogegrammeni
+ '\u0384', # ΄ Greek Tonos
+ '\u0385', # ΅ Greek Dialytika Tonos
+ '\u0387', # · Greek Ano Teleia
+ '\u1fbd', # ᾽ Greek Koronis
+ '\u1fbe', # ι Greek Prosgegrammeni
+ '\u1fbf', # ᾿ Greek Psili
+ '\u1fc0', # ῀ Greek Perispomeni
+ '\u1fc1', # ῁ Greek Dialytika and Perispomeni
+ '\u1fcd', # ῍ Greek Psili and Varia
+ '\u1fce', # ῎ Greek Psili and Oxia
+ '\u1fcf', # ῏ Greek Psili and Perispomeni
+ '\u1fdd', # ῝ Greek Dasia and Varia
+ '\u1fde', # ῞ Greek Dasia and Oxia
+ '\u1fdf', # ῟ Greek Dasia and Perispomeni
+ '\u1fed', # ῭ Greek Dialytika and Varia
+ '\u1fee', # ΅ Greek Dialytika and Oxia
+ '\u1fef', # ` Greek Varia
+ '\u1ffd', # ´ Greek Oxia
+ '\u1ffe' # ´ Greek Dasia
+]
+
MAP_SUBSCRIPT_NO_SUB = {'Ἄ': 'ᾌΙ',
'ᾀ': 'ἀΙ',
'ᾁ': 'ἁΙ',
@@ -477,3 +531,16 @@ def expand_iota_subscript(input_str, lowercase=False):
if lowercase:
new_str = new_str.lower()
return new_str
+
+def filter_non_greek(input_str):
+ """
+ input: string with mixed characters
+ return: string with ancient greek characters
+
+ It conserves whitespace.
+ """
+ greek_alphabet = LOWER + LOWER_ACUTE + LOWER_BREVE + LOWER_CIRCUMFLEX + LOWER_CONSONANTS + LOWER_DIAERESIS + LOWER_DIAERESIS_ACUTE + LOWER_DIAERESIS_CIRCUMFLEX + LOWER_DIAERESIS_GRAVE + LOWER_GRAVE + LOWER_MACRON + [LOWER_RHO] + LOWER_ROUGH + [LOWER_RHO_ROUGH] + [LOWER_RHO_SMOOTH] + LOWER_ROUGH_ACUTE + LOWER_ROUGH_CIRCUMFLEX + LOWER_ROUGH_GRAVE + LOWER_SMOOTH + LOWER_SMOOTH_ACUTE + LOWER_SMOOTH_CIRCUMFLEX + LOWER_SMOOTH_GRAVE + UPPER + UPPER_ACUTE + UPPER_BREVE + UPPER_CONSONANTS + UPPER_DIAERESIS + UPPER_GRAVE + UPPER_MACRON + [UPPER_RHO] + UPPER_ROUGH + [UPPER_RHO_ROUGH] + UPPER_ROUGH_ACUTE + UPPER_ROUGH_CIRCUMFLEX + UPPER_ROUGH_GRAVE + UPPER_SMOOTH + UPPER_SMOOTH_ACUTE + UPPER_SMOOTH_CIRCUMFLEX + UPPER_SMOOTH_GRAVE + NUMERAL_SIGNS + ACCENTS
+ greek_string = "".join([lem for lem in input_str if lem in greek_alphabet or lem == " "])
+ #
+ return greek_string.strip()
+
| diff --git a/cltk/tests/test_corpus.py b/cltk/tests/test_corpus.py
--- a/cltk/tests/test_corpus.py
+++ b/cltk/tests/test_corpus.py
@@ -1,6 +1,7 @@
"""Test cltk.corpus."""
from cltk.corpus.greek.alphabet import expand_iota_subscript
+from cltk.corpus.greek.alphabet import filter_non_greek
from cltk.corpus.greek.beta_to_unicode import Replacer
from cltk.corpus.greek.tlg.parse_tlg_indices import get_female_authors
from cltk.corpus.greek.tlg.parse_tlg_indices import get_epithet_index
@@ -585,6 +586,16 @@ def test_expand_iota_subscript_lower(self):
expanded = expand_iota_subscript(unexpanded, lowercase=True)
target = 'εἰ δὲ καὶ τῶι ἡγεμόνι πιστεύσομεν ὃν ἂν κῦρος διδῶι'
self.assertEqual(expanded, target)
+ #
+ def test_filter_non_greek(self):
+ """
+ Test filter non greek characters in a mixed string.
+ """
+ test_input_string = "[Ἑκα]τόμανδ[ρος Αἰσχ]ρίωνος ⋮ Ἀρ[ιστείδη..c5..]" # PH247029, line 2
+ comparison_string = "Ἑκατμανδρος Αἰσχρωνος Ἀριστεδη"
+ test_result_string = filter_non_greek(test_input_string)
+ #
+ self.assertEqual(test_result_string, comparison_string)
class TestUnicode(unittest.TestCase):
| Greek Phi Symbol or Greek Phi Letter
I was working with the `alphabet.py` and I noticed that for phi letter, we use "Greek Phi Symbol U+03D5, ϕ" instead of "Greek Small Letter Phi U+03C6, φ" which is strange because other consonants seem to belong to Small Letter group rather than Symbol group. Was there a specific reason behind this? They also don't seem to be compatible even in the case of a `unicodedata.normalize("NFKD", str)`
| Thank you this is a mistake. The person who wrote this did not know Greek well. Would you like to make the PR or should I?
I can make the PR
@kylepjohnson This has been taking some time for the following reason: While i was doing some tests on the character lists with `unicodedata`, i saw that some characters pass as `str` instead of `chr`. Direct result of this was the `TypeError: name() argument 1 must be a unicode character, not str` for the `unicodedata.name()` method. So i decided to make some changes to ensure that every character is a `chr`. [Here](https://gist.github.com/D-K-E/b72f32af09c0ec556b4feb25c485a719) is the work in progress. Any suggestions ?
Hi I'm looking at the gist you sent -- I just see some Unicode points.
The TypeError you get -- is that from `unicodedata.normalize()`? Please send me a code snippet whereby I can cause the error for myself.
[Here](https://gist.github.com/D-K-E/a666dd9ce243ddf2d45abed536123658) is the original list that caused the error. Now that I reexamined the structure of the list, i see that there is a space character in `UPPER_SMOOTH_CIRCUMFLEX[4]`that should not be there. This is probably why `unicodedata` was giving a TypeError. I could just delete the space and change the phi letter for the sake of not diverging a lot from the created issue. Then I thought it would be more explicit for a future reviewer and easier to maintain if i give the unicode points for each character along with their canonical name and representation. That would prevent the future errors that can be caused from the characters that look alike but have different code points such as the Greek Phi Symbol and Greek Phi Letter
I have https://github.com/jtauber/greek-utils/blob/master/greekutils/beta2unicode.py if it's of any help (even if just to check things). (There's of course greek-accentuation too which has some character handling stuff)
Closing w/ PR #441 | 2017-07-04T13:50:02 |
cltk/cltk | 575 | cltk__cltk-575 | [
"574"
] | a18aa5bf7a40a8fc2a0d213284d961661a4fc43b | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -36,7 +36,7 @@
name='cltk',
packages=find_packages(),
url='https://github.com/cltk/cltk',
- version='0.1.63',
+ version='0.1.64',
zip_safe=True,
test_suite='cltk.tests.test_cltk',
)
| Double-check code and data for new French PR
This issue is for @nat1881 to follow up on her large PR #571 for Old and Middle French.
Natasha, I would like you to do the following steps, to be certain that the code works as you intended:
* Start a brand new clone of (this) cltk repo.
* Make a new virtual env
* Mk source tarball and install (this should install all dependencies, too): `python setup.py sdist install`
* Temporarily rename your `~/cltk_data` dir (eg, `mv ~/cltk_data ~/cltk_data_backup`)
* Import the french corpora and make sure they appear as they should
* Check in ipython all of your commands that you have added to the docs. Copy-paste these exactly as they are in the docs.
* Follow up on any bugs in your own updated branch ([this is what I recommend for updating your branch](https://github.com/cltk/cltk/wiki/Example-Git-and-Python-workflow))
* Bump the version in `setup.py` and make PR for this
* Then @diyclassics or I will push the code to PyPI
You may be tired of this, but you're getting close! :weary:
cc @mlj
| Corpora download in the right place, and ipython tests all work (I've had to add a line to the stop filtering docs so the output prints). Should I add French to the the docs index?
> Should I add French to the the docs index?
Yes, good idea.
Also, I see that some of the text here is not formatted as you wanted: http://docs.cltk.org/en/latest/french.html#lemmatizer | 2017-09-01T16:19:51 |
|
cltk/cltk | 611 | cltk__cltk-611 | [
"610"
] | 6a744498d849c08d492ac740a4f384d22ceac591 | diff --git a/cltk/stem/latin/syllabifier.py b/cltk/stem/latin/syllabifier.py
--- a/cltk/stem/latin/syllabifier.py
+++ b/cltk/stem/latin/syllabifier.py
@@ -27,6 +27,7 @@
'vowels': [
"a", "e", "i", "o", "u",
"á", "é", "í", "ó", "ú",
+ "ā", "ē", "ī", "ō", "ū",
"æ", "œ",
"ǽ", # no accented œ in unicode?
"y"
| diff --git a/cltk/tests/test_stem.py b/cltk/tests/test_stem.py
--- a/cltk/tests/test_stem.py
+++ b/cltk/tests/test_stem.py
@@ -201,6 +201,19 @@ def test_latin_syllabifier(self):
syllables = syllabifier.syllabify(word)
target = ['si', 'de', 're']
self.assertEqual(syllables, target)
+ # tests for macronized words
+ macronized_word = 'audītū'
+ macronized_syllables = syllabifier.syllabify(macronized_word)
+ macronized_target = ['au', 'dī', 'tū']
+ self.assertEqual(macronized_syllables, macronized_target)
+ macronized_word2 = 'conjiciō'
+ macronized_syllables2 = syllabifier.syllabify(macronized_word2)
+ macronized_target2 = ['con', 'ji', 'ci', 'ō']
+ self.assertEqual(macronized_syllables2, macronized_target2)
+ macronized_word3 = 'ā'
+ macronized_syllables3 = syllabifier.syllabify(macronized_word3)
+ macronized_target3 = ['ā']
+ self.assertEqual(macronized_syllables3, macronized_target3)
def test_syllabify(self):
"""Test Indic Syllabifier method"""
| Latin Syllabifier Fails on Macrons
The Latin syllabifier seems to fail on words with macrons. For example, `audītū` becomes `['aud', 'ītū']` rather than `['au', 'dī', tū']`.
It looks like we can fix this by just adding the macronized vowels to the [vowels property](https://github.com/cltk/cltk/blob/master/cltk/stem/latin/syllabifier.py#L27).
@kylepjohnson I can submit a PR for this if it sounds reasonable.
| Definitely reasonable! Pinging @lukehollis who was the module's author. One trap to watch out for is detecting [combined](https://en.wikipedia.org/wiki/Combining_character) or [precomosed](https://en.wikipedia.org/wiki/Precomposed_character) characters.
Here's one fix that we threw in to normalize input: https://github.com/cltk/cltk/blob/master/cltk/corpus/utils/formatter.py#L119
I forget what the options mean (in fact, this really needs a docstring!), but it may help.
Awesome ok! Thanks for the heads up on combined/precomposed characters.
Re: normalize input, yeah I'm not too sure what NFC/NFKC means but I'll look into that.
Sure, thank you!
If you get tangled up in the normalization stuff, just reach out and I can dig up my notes on why we made this as we did. KJ | 2017-12-01T19:27:28 |
cltk/cltk | 629 | cltk__cltk-629 | [
"628"
] | 43396058c512c1732db43494a67795d765cf9335 | diff --git a/cltk/lemmatize/latin/backoff.py b/cltk/lemmatize/latin/backoff.py
--- a/cltk/lemmatize/latin/backoff.py
+++ b/cltk/lemmatize/latin/backoff.py
@@ -518,13 +518,14 @@ def _randomize_data(train, seed):
self.pos_train_sents, self.train_sents, self.test_sents = _randomize_data(self.train, self.seed)
def _define_lemmatizer(self):
+ # Suggested backoff chain--should be tested for optimal order
backoff0 = None
backoff1 = IdentityLemmatizer()
backoff2 = TrainLemmatizer(model=self.LATIN_OLD_MODEL, backoff=backoff1)
backoff3 = PPLemmatizer(regexps=self.latin_verb_patterns, pps=self.latin_pps, backoff=backoff2)
- backoff4 = UnigramLemmatizer(self.train_sents, backoff=backoff3)
- backoff5 = RegexpLemmatizer(self.latin_sub_patterns, backoff=backoff4)
- backoff6 = TrainLemmatizer(model=self.LATIN_MODEL, backoff=backoff5)
+ backoff4 = RegexpLemmatizer(self.latin_sub_patterns, backoff=backoff3)
+ backoff5 = UnigramLemmatizer(self.train_sents, backoff=backoff4)
+ backoff6 = TrainLemmatizer(model=self.LATIN_MODEL, backoff=backoff5)
#backoff7 = BigramPOSLemmatizer(self.pos_train_sents, include=['cum'], backoff=backoff6)
#lemmatizer = backoff7
lemmatizer = backoff6
diff --git a/cltk/tokenize/word.py b/cltk/tokenize/word.py
--- a/cltk/tokenize/word.py
+++ b/cltk/tokenize/word.py
@@ -8,6 +8,7 @@
import re
+# Cleanup these imports—most are not used!
from nltk.data import load
from nltk.tokenize.casual import (TweetTokenizer, casual_tokenize)
from nltk.tokenize.mwe import MWETokenizer
@@ -41,20 +42,28 @@ def __init__(self, language):
"""Take language as argument to the class. Check availability and
setup class variables."""
self.language = language
- self.available_languages = ['arabic', 'latin', 'french', 'old_norse']
+ self.available_languages = ['arabic',
+ 'french',
+ 'greek',
+ 'latin',
+ 'old_norse']
assert self.language in self.available_languages, \
"Specific tokenizer not available for '{0}'. Only available for: '{1}'.".format(self.language, # pylint: disable=line-too-long
- self.available_languages) # pylint: disable=line-too-long
+ self.available_languages) # pylint: disable=line-too-long
+ # ^^^ Necessary? since we have an 'else' in `tokenize`
+
def tokenize(self, string):
"""Tokenize incoming string."""
-
- if self.language == 'latin':
- tokens = tokenize_latin_words(string)
+
+ if self.language == 'arabic':
+ tokens = tokenize_arabic_words(string)
elif self.language == 'french':
tokens = tokenize_french_words(string)
- elif self.language == 'arabic':
- tokens = tokenize_arabic_words(string)
+ elif self.language == 'greek':
+ tokens = tokenize_greek_words(string)
+ elif self.language == 'latin':
+ tokens = tokenize_latin_words(string)
elif self.language == 'old_norse':
tokens = tokenize_old_norse_words(string)
else:
@@ -101,6 +110,56 @@ def nltk_tokenize_words(string, attached_period=False, language=None):
return new_tokens
+def tokenize_arabic_words(text):
+
+ """
+ Tokenize text into words
+ @param text: the input text.
+ @type text: unicode.
+ @return: list of words.
+ @rtype: list.
+ """
+ specific_tokens = []
+ if not text:
+ return specific_tokens
+ else:
+ specific_tokens = araby.tokenize(text)
+ return specific_tokens
+
+
+def tokenize_french_words(string):
+ assert isinstance(string, str), "Incoming string must be type str."
+
+ # normalize apostrophes
+
+ text = re.sub(r"’", r"'", string)
+
+ # Dealing with punctuation
+ text = re.sub(r"\'", r"' ", text)
+ text = re.sub("(?<=.)(?=[.!?)(\";:,«»\-])", " ", text)
+
+ results = str.split(text)
+ return (results)
+
+
+def tokenize_greek_words(text):
+ """
+ Tokenizer divides the string into a list of substrings. This is a placeholder
+ function that returns the default NLTK word tokenizer until
+ Greek-specific options are added.
+
+ Example:
+ >>> text = 'Θουκυδίδης Ἀθηναῖος ξυνέγραψε τὸν πόλεμον τῶν Πελοποννησίων καὶ Ἀθηναίων,'
+ >>> tokenize_greek_words(text)
+ ['Θουκυδίδης', 'Ἀθηναῖος', 'ξυνέγραψε', 'τὸν', 'πόλεμον', 'τῶν', 'Πελοποννησίων', 'καὶ', 'Ἀθηναίων', ',']
+
+ :param string: This accepts the string value that needs to be tokenized
+ :returns: A list of substrings extracted from the string
+ """
+
+ return nltk_tokenize_words(text) # Simplest implementation to start
+
+
def tokenize_latin_words(string):
"""
Tokenizer divides the string into a list of substrings
@@ -211,38 +270,6 @@ def replace(m):
return specific_tokens
-def tokenize_french_words(string):
- assert isinstance(string, str), "Incoming string must be type str."
-
- # normalize apostrophes
-
- text = re.sub(r"’", r"'", string)
-
- # Dealing with punctuation
- text = re.sub(r"\'", r"' ", text)
- text = re.sub("(?<=.)(?=[.!?)(\";:,«»\-])", " ", text)
-
- results = str.split(text)
- return (results)
-
-
-def tokenize_arabic_words(text):
-
- """
- Tokenize text into words
- @param text: the input text.
- @type text: unicode.
- @return: list of words.
- @rtype: list.
- """
- specific_tokens = []
- if not text:
- return specific_tokens
- else:
- specific_tokens = araby.tokenize(text)
- return specific_tokens
-
-
def tokenize_old_norse_words(text):
"""
| diff --git a/cltk/tests/test_tokenize.py b/cltk/tests/test_tokenize.py
--- a/cltk/tests/test_tokenize.py
+++ b/cltk/tests/test_tokenize.py
@@ -61,6 +61,23 @@ def test_sentence_tokenizer_greek(self):
self.assertEqual(len(tokenized_sentences), len(good_tokenized_sentences))
'''
+
+ def test_greek_word_tokenizer(self):
+ """Test Latin-specific word tokenizer."""
+ word_tokenizer = WordTokenizer('greek')
+
+ # Test sources:
+ # - Thuc. 1.1.1
+
+ test = "Θουκυδίδης Ἀθηναῖος ξυνέγραψε τὸν πόλεμον τῶν Πελοποννησίων καὶ Ἀθηναίων, ὡς ἐπολέμησαν πρὸς ἀλλήλους, ἀρξάμενος εὐθὺς καθισταμένου καὶ ἐλπίσας μέγαν τε ἔσεσθαι καὶ ἀξιολογώτατον τῶν προγεγενημένων, τεκμαιρόμενος ὅτι ἀκμάζοντές τε ᾖσαν ἐς αὐτὸν ἀμφότεροι παρασκευῇ τῇ πάσῃ καὶ τὸ ἄλλο Ἑλληνικὸν ὁρῶν ξυνιστάμενον πρὸς ἑκατέρους, τὸ μὲν εὐθύς, τὸ δὲ καὶ διανοούμενον."
+
+ target = ['Θουκυδίδης', 'Ἀθηναῖος', 'ξυνέγραψε', 'τὸν', 'πόλεμον', 'τῶν', 'Πελοποννησίων', 'καὶ', 'Ἀθηναίων', ',', 'ὡς', 'ἐπολέμησαν', 'πρὸς', 'ἀλλήλους', ',', 'ἀρξάμενος', 'εὐθὺς', 'καθισταμένου', 'καὶ', 'ἐλπίσας', 'μέγαν', 'τε', 'ἔσεσθαι', 'καὶ', 'ἀξιολογώτατον', 'τῶν', 'προγεγενημένων', ',', 'τεκμαιρόμενος', 'ὅτι', 'ἀκμάζοντές', 'τε', 'ᾖσαν', 'ἐς', 'αὐτὸν', 'ἀμφότεροι', 'παρασκευῇ', 'τῇ', 'πάσῃ', 'καὶ', 'τὸ', 'ἄλλο', 'Ἑλληνικὸν', 'ὁρῶν', 'ξυνιστάμενον', 'πρὸς', 'ἑκατέρους', ',', 'τὸ', 'μὲν', 'εὐθύς', ',', 'τὸ', 'δὲ', 'καὶ', 'διανοούμενον', '.']
+
+ result = word_tokenizer.tokenize(test)
+
+ self.assertEqual(result, target)
+
+
def test_latin_word_tokenizer(self):
"""Test Latin-specific word tokenizer."""
word_tokenizer = WordTokenizer('latin')
@@ -213,7 +230,7 @@ def test_old_norse_word_tokenizer(self):
'vilja', 'þeira', '.']
word_tokenizer = WordTokenizer('old_norse')
result = word_tokenizer.tokenize(text)
- print(result)
+ #print(result)
self.assertTrue(result == target)
if __name__ == '__main__':
| Greek not among word tokenizers
Hi! I attempted to use the Greek word tokenizer for a really quick search of a plain text file, as follows:
<pre>
from cltk.tokenize.word import WordTokenizer
word_tokenizer = WordTokenizer('greek')
for word in word_tokenizer.tokenize('files/plain_text/1-23_1.1-42.txt'):
if word.endswith('εαι'):
print(word)
</pre>
When I tried to call the script, I got the following error:
`AssertionError: Specific tokenizer not available for 'greek'. Only available for: '['arabic', 'latin', 'french', 'old_norse']'.`
Thanks!
| 2018-01-28T03:57:26 |
|
cltk/cltk | 631 | cltk__cltk-631 | [
"630"
] | c1a33e8481aab171e2208d8e8963e2a279c127a6 | diff --git a/cltk/tokenize/word.py b/cltk/tokenize/word.py
--- a/cltk/tokenize/word.py
+++ b/cltk/tokenize/word.py
@@ -1,40 +1,19 @@
-# -*-coding:utf-8-*-
"""Language-specific word tokenizers. Primary purpose is to handle enclitics."""
-import re
+__author__ = ['Patrick J. Burns <[email protected]>',
+ 'Kyle P. Johnson <[email protected]>',
+ 'Natasha Voake <[email protected]>']
+# Author info for Arabic, Old Norse?
-from nltk.tokenize.punkt import PunktLanguageVars
-from nltk.tokenize.punkt import PunktSentenceTokenizer, PunktParameters
+__license__ = 'MIT License. See LICENSE.'
import re
-# Cleanup these imports—most are not used!
-from nltk.data import load
-from nltk.tokenize.casual import (TweetTokenizer, casual_tokenize)
-from nltk.tokenize.mwe import MWETokenizer
-from nltk.tokenize.punkt import PunktSentenceTokenizer
-from nltk.tokenize.regexp import (RegexpTokenizer, WhitespaceTokenizer,
- BlanklineTokenizer, WordPunctTokenizer,
- wordpunct_tokenize, regexp_tokenize,
- blankline_tokenize)
-#from nltk.tokenize.repp import ReppTokenizer
-from nltk.tokenize.sexpr import SExprTokenizer, sexpr_tokenize
-from nltk.tokenize.simple import (SpaceTokenizer, TabTokenizer, LineTokenizer,
- line_tokenize)
-from nltk.tokenize.stanford import StanfordTokenizer
-from nltk.tokenize.texttiling import TextTilingTokenizer
-#from nltk.tokenize.toktok import ToktokTokenizer
-from nltk.tokenize.treebank import TreebankWordTokenizer
-from nltk.tokenize.util import string_span_tokenize, regexp_span_tokenize
-from nltk.tokenize.stanford_segmenter import StanfordSegmenter
+from nltk.tokenize.punkt import PunktLanguageVars
+from nltk.tokenize.punkt import PunktSentenceTokenizer, PunktParameters
import cltk.corpus.arabic.utils.pyarabic.araby as araby
-__author__ = ['Patrick J. Burns <[email protected]>', 'Kyle P. Johnson <[email protected]>',
- 'Natasha Voake <[email protected]>']
-__license__ = 'MIT License. See LICENSE.'
-
-
class WordTokenizer: # pylint: disable=too-few-public-methods
"""Tokenize according to rules specific to a given language."""
| Unused imports in word tokenizer module
The word tokenizer module has a large number of imports from NLTK that are not used anywhere in the module. Removing them 1. cleans up the code, and 2. speeds up testing.
| 2018-01-28T16:34:43 |
||
cltk/cltk | 639 | cltk__cltk-639 | [
"628"
] | ebeaa35c4bf6a2fa5bda645bdf418d356fd1c1e4 | diff --git a/cltk/corpus/kannada/alphabet.py b/cltk/corpus/kannada/alphabet.py
--- a/cltk/corpus/kannada/alphabet.py
+++ b/cltk/corpus/kannada/alphabet.py
@@ -20,14 +20,14 @@
YOGAVAAHAKAS = ['ಅಂ', 'ಅಃ']
-STRUCTUREDCONSONANTS = [
+STRUCTURED_CONSONANTS = [
'ಕ', 'ಖ', 'ಗ', 'ಘ', 'ಙಚ',
'ಚ', 'ಛ', 'ಜ', 'ಝ', 'ಞ',
'ಟ', 'ಠ', 'ಡ', 'ಢ', 'ಣ',
'ತ', 'ಥ', 'ದ', 'ಧ', 'ನ',
'ಪ', 'ಫ', 'ಬ', 'ಭ', 'ಮ']
-UNSTRUCTURESCONSONANTS = [
+UNSTRUCTURED_CONSONANTS = [
'ಯ', 'ರ', 'ಱ', 'ಲ', 'ವ', 'ಶ',
'ಷ', 'ಸ', 'ಹ', 'ಳ', 'ೞ']
@@ -37,7 +37,7 @@
'೦', '೧', '೨', '೩', '೪',
'೫', '೬', '೭', '೮', '೯']
-VOWELSIGNS = [
+VOWEL_SIGNS = [
'', 'ಾ', 'ಿ', 'ೀ', 'ು',
'ೂ', 'ೃ', 'ೆ', 'ೇ', 'ೈ',
'ೊ', 'ೋ', 'ೌ', 'ಂ', 'ಃ']
| Greek not among word tokenizers
Hi! I attempted to use the Greek word tokenizer for a really quick search of a plain text file, as follows:
<pre>
from cltk.tokenize.word import WordTokenizer
word_tokenizer = WordTokenizer('greek')
for word in word_tokenizer.tokenize('files/plain_text/1-23_1.1-42.txt'):
if word.endswith('εαι'):
print(word)
</pre>
When I tried to call the script, I got the following error:
`AssertionError: Specific tokenizer not available for 'greek'. Only available for: '['arabic', 'latin', 'french', 'old_norse']'.`
Thanks!
| 2018-01-29T02:36:21 |
||
cltk/cltk | 644 | cltk__cltk-644 | [
"628"
] | 75141da22b761200e91116d96e70f41190240e04 | diff --git a/cltk/stop/classical_hindi/__init__.py b/cltk/stop/classical_hindi/__init__.py
new file mode 100644
diff --git a/cltk/stop/classical_hindi/stops.py b/cltk/stop/classical_hindi/stops.py
new file mode 100644
--- /dev/null
+++ b/cltk/stop/classical_hindi/stops.py
@@ -0,0 +1,108 @@
+""" Classical Hindi Stopwords
+This list is composed from 100 most frequently occuring words in classical_hindi corpus <https://github.com/cltk/hindi_text_ltrc> in CLTK.
+source code : <https://gist.github.com/inishchith/ad4bc0da200110de638f5408c64bb14c>
+"""
+__author__ = 'Nishchith Shetty <inishchith[at]gmail[.]com>'
+
+STOP_LIST = ["हें", # yes
+ "है", # is
+ "हैं", # there
+ "हि", # this
+ "ही", # only
+ "हो", # may
+ "हे", #
+ "से", # from
+ "अत", # so
+ "के", # of
+ "रहे", # are
+ "का", # of
+ "की", # of
+ "कि", # that
+ "तो", # so
+ "ने", # has
+ "एक", # one
+ "नहीं", # no
+ "पे", # on
+ "में", # in
+ "वाले", # ones
+ "सकते", # can
+ "वह", # he
+ "वे" , # they
+ "कई", # many
+ "होती", # was
+ "आप", # you
+ "यह", # this
+ "और", # and
+ "एवं", # and
+ "को", # to
+ "मे", # in
+ "दो", # two
+ "थे", # were
+ "यदि", # if
+ "उनके", # their
+ "थी" , # was
+ "पर", # on
+ "इस", # this
+ "साथ", # with
+ "लिए", # for
+ "जो", # that
+ "होता", # happen
+ "या", # or
+ "लिये", # for
+ "द्वारा", # by
+ "हुई", # was done
+ "जब", # when
+ "होते", # were
+ "व", # and
+ "न", # not
+ "उनकी", # their
+ "आदि", # more
+ "सकता", # can
+ "उनका", # their
+ "इतयादि", # e.t.c
+ "इतना", # this much
+ "जिस", # which
+ "उस", # that
+ "कैसे",# how
+ "हूँ", # am
+ "ना", # no
+ "कहि", # say
+ "सम", # even
+ "र्", # the
+ "कहँ", # where
+ "बस", # enough
+ "अपना", # ours
+ "यही", # this only
+ "कहीं", # somewhere
+ "हाँ", # yes
+ "मैंने", # i
+ "जहँ", # where
+ "सब", # all
+ "यह", # this
+ "था", # was
+ "तुम", # you
+ "ये", # these
+ "जे", # which
+ "भी", # as well / also
+ "हम", # we
+ "अब", # now
+ "ऐसे", # such
+ "वहाँ", # there
+ "क्या", # what
+ "ओर", # and
+ "इसी", # this
+ "सके", # could
+ "कभी", # sometimes
+ "हर", # every
+ "मेरी", # my
+ "कम", # less
+ "सा",
+ "उन्हें", # them
+ "मेरे", # my
+ "उन", # those
+ "कुछ", # some
+ "इन", # these
+ "ऐसा", # like these
+ "जहा", # where
+ "तीन", # three
+]
| diff --git a/cltk/tests/test_corpus.py b/cltk/tests/test_corpus.py
--- a/cltk/tests/test_corpus.py
+++ b/cltk/tests/test_corpus.py
@@ -729,7 +729,7 @@ def test_in_coordinated_range(self):
def test_is_indiclang_char(self):
self.assertTrue(is_indiclang_char('क', 'hi'))
-
+
def test_swadesh_greek(self):
swadesh = Swadesh('gr')
first_word = 'ἐγώ'
| Greek not among word tokenizers
Hi! I attempted to use the Greek word tokenizer for a really quick search of a plain text file, as follows:
<pre>
from cltk.tokenize.word import WordTokenizer
word_tokenizer = WordTokenizer('greek')
for word in word_tokenizer.tokenize('files/plain_text/1-23_1.1-42.txt'):
if word.endswith('εαι'):
print(word)
</pre>
When I tried to call the script, I got the following error:
`AssertionError: Specific tokenizer not available for 'greek'. Only available for: '['arabic', 'latin', 'french', 'old_norse']'.`
Thanks!
| 2018-01-31T15:43:07 |
|
cltk/cltk | 661 | cltk__cltk-661 | [
"646"
] | 88b88dfc964e3753f556290cadd10ef37c9390dc | diff --git a/cltk/corpus/swadesh.py b/cltk/corpus/swadesh.py
--- a/cltk/corpus/swadesh.py
+++ b/cltk/corpus/swadesh.py
@@ -7,6 +7,8 @@
swadesh_gr = ['ἐγώ', 'σύ', 'αὐτός, οὗ, ὅς, ὁ, οὗτος', 'ἡμεῖς', 'ὑμεῖς', 'αὐτοί', 'ὅδε', 'ἐκεῖνος', 'ἔνθα, ἐνθάδε, ἐνταῦθα', 'ἐκεῖ', 'τίς', 'τί', 'ποῦ, πόθι', 'πότε, πῆμος', 'πῶς', 'οὐ, μή', 'πᾶς, ἅπᾱς', 'πολύς', 'τις', 'ὀλίγος, βαιός, παῦρος', 'ἄλλος, ἕτερος', 'εἷς', 'δύο', 'τρεῖς', 'τέσσαρες', 'πέντε', 'μέγας', 'μακρός', 'εὐρύς', 'πυκνός', 'βαρύς', 'μῑκρός', 'βραχύς', 'στενός', 'μανός', 'γυνή', 'ἀνήρ', 'ἄνθρωπος', 'τέκνον, παῖς, παιδίον', 'γυνή', 'ἀνήρ', 'μήτηρ', 'πατήρ', 'ζῷον', 'ἰχθύς', 'ὄρνις, πετεινόν', 'κύων', 'φθείρ', 'ὄφις', 'ἑρπετόν, σκώληξ, ἕλμινς', 'δένδρον', 'ὕλη', 'βακτηρία, ῥάβδος', 'καρπός', 'σπέρμα', 'φύλλον', 'ῥίζα', 'φλοιός', 'ἄνθος', 'χλόη', 'δεσμός, σχοινίον', 'δέρμα', 'κρέας', 'αἷμα', 'ὀστοῦν', 'δημός', 'ᾠόν', 'κέρας', 'οὐρά, κέρκος', 'πτερόν', 'θρίξ, κόμη', 'κεφαλή', 'οὖς', 'ὀφθαλμός', 'ῥίς', 'στόμα', 'ὀδούς', 'γλῶσσα', 'ὄνυξ', 'πούς', 'κῶλον, σκέλος', 'γόνυ', 'χείρ', 'πτέρυξ', 'γαστήρ, κοιλία', 'ἔντερα, σπλάγχνα', 'αὐχήν, τράχηλος', 'νῶτον', 'μαστός, στῆθος', 'καρδία', 'ἧπαρ', 'πίνω', 'ἐσθίω, ἔφαγον', 'δάκνω', 'σπάω', 'πτύω', 'ἐμέω', 'φυσάω', 'πνέω', 'γελάω', 'βλέπω, ὁράω, εἶδον', 'ἀκούω, ἀΐω', 'οἶδα, γιγνώσκω', 'νομίζω, δοκέω, νοέω, οἴομαι', 'ὀσφραίνομαι', 'φοβέομαι', 'καθεύδω, εὕδω, εὐνάζομαι, κοιμάομαι, ἰαύω', 'ζάω, βιόω, οἰκέω', 'ἀποθνῄσκω, θνῄσκω, τελευτάω, ὄλομαι', 'ἀποκτείνω, ἔπεφνον', 'μάχομαι', 'θηρεύω, θηράω, ἰχνεύω, κυνηγετέω, κυνηγέω, σεύω', 'τύπτω', 'τέμνω', 'σχίζω', 'κεντέω', 'κνάω', 'ὀρύσσω, σκᾰ́πτω', 'νέω, κολυμβάω', 'πέτομαι', 'περιπατέω, πατέω, στείχω, βαίνω, βαδίζω, πεζεύω, πορεύω', 'ἱκνέομαι, ἵκω, ἔρχομαι, εἶμι', 'κεῖμαι', 'καθίζω', 'ἵστημι', 'τρέπω', 'πίπτω', 'παρέχω, δίδωμι', 'ἔχω', 'πιέζω', 'τρίβω', 'λούω, πλύνω, νίπτω', 'ἀπομάσσω', 'ἕλκω', 'ὠθέω', 'ῥίπτω, βάλλω', 'δέω', 'ῥάπτω', 'ἀριθμέω', 'φημί, λέγω, ἐνέπω', 'ἀείδω', 'παίζω', 'νέω', 'ῥέω', 'πήγνυμαι', 'αὐξάνω', 'ἥλιος', 'σελήνη', 'ἀστήρ', 'ὕδωρ', 'ὑετός, βροχή', 'ποταμός', 'λίμνη', 'θάλασσα, πέλαγος, πόντος', 'ἅλς', 'λίθος', 'ἄμμος', 'κόνις', 'γῆ, χθών', 'νέφος', 'ὀμίχλη', 'οὐρανός', 'ἄνεμος', 'χιών', 'κρύσταλλος', 'καπνός', 'πῦρ', 'τέφρα', 'καίω', 'ὁδός', 'ἄκρα, ὄρος, βουνός', 'ἐρυθρός, πυρρός', 'χλωρός', 'ξανθός', 'λευκός', 'μέλας', 'νύξ', 'ἡμέρα, ἦμαρ', 'ἔτος', 'θερμός', 'ψυχρός', 'μεστός, πλήρης', 'νέος', 'παλαιός', 'ἀγαθός', 'κακός', 'σαπρός', 'θολερός', 'εὐθύς, ὀρθός', 'κυκλοτερής', 'τομός, ὀξύς', 'ἀμβλύς, βαρύς', 'λεῖος', 'ὑγρός', 'ξηρός', 'δίκαιος', 'ἐγγύς', 'μακράν', 'δεξιός', 'ἀριστερός, εὐώνυμος', 'ἐν', 'ἐν', 'μετά, σύν', 'καί, τε', 'εἰ', 'ὅτι', 'ὄνομα']
+swadesh_txb = ['ñäś', 'tuwe', 'su', 'wes', 'yes', 'cey', 'se', 'su, samp', 'tane', 'tane, omp', 'kᵤse', 'kᵤse', 'ente', 'ente', 'mäkte', 'mā', 'poñc', 'māka', 'ṣemi', 'totka', 'allek', 'ṣe', 'wi', 'trey', 'śtwer', 'piś', 'orotstse', 'pärkare', 'aurtstse', '', 'kramartse', 'lykaśke, totka', '', '', '', 'klyiye, śana', 'eṅkwe', 'śaumo', 'śamaśke', 'śana', 'petso', 'mācer', 'pācer', 'luwo', 'laks', 'salamo luwo', 'ku', 'pärśeriñ', 'arṣāklo, auk', 'yel', 'stām', 'wartto, karāś', 'śakātai', 'oko', 'sārm, śäktālye', 'pilta', 'witsako', 'enmetre', 'pyāpyo', 'atiyai', '', 'ewe, yetse', 'misa', 'yasar', 'āy, āsta pl', 'ṣalype', '', 'krorīyai', 'pako', 'paruwa', 'matsi', 'āśce', 'klautso', 'ek', 'meli', 'koyṃ', 'keme', 'kantwo', '', 'paiyye', 'ckāckai', 'keni', 'ṣar', '', 'kātso', 'kātso', 'kor', 'sark', 'päścane', 'arañce', 'wästarye', 'yokäṃ', 'śuwaṃ', '', '', 'pitke', 'aṅkaiṃ', 'pinaṣṣnäṃ', 'anāṣṣäṃ, satāṣṣäṃ', 'ker-', 'lkāṣṣäṃ', 'klyauṣäṃ', 'aiśtär, kärsanaṃ', 'pälskanaṃ', 'warṣṣäṃ', 'prāskaṃ', 'kläntsaṃ', 'śaiṃ', 'sruketär', 'kauṣäṃ', 'witāre', 'śerītsi', 'karnäṣṣäṃ', 'karsnaṃ, latkanaṃ', 'kautanaṃ', 'tsopäṃ', '', 'rapanaṃ', 'nāṣṣäṃ', 'pluṣäṃ', 'yaṃ', 'känmaṣṣäṃ', 'lyaśäṃ', 'ṣamäṃ, āṣṣäṃ', 'kaltär', 'kluttaṅktär, sporttotär', 'kloyotär', 'aiṣṣäṃ', '', 'klupnātär, nuskaṣṣäṃ', 'lyuwetär, kantanatär', 'laikanatär', 'lyyāstär', 'slaṅktär', 'nätkanaṃ', 'karṣṣäṃ, saläṣṣäṃ', 'śanmästär, kärkaṣṣäṃ', '', 'ṣäṃṣtär', 'weṣṣäṃ', 'piyaṃ', 'kāñmäṃ', 'pluṣäṃ', 'reṣṣäṃ', '', 'staukkanatär', 'kauṃ', 'meñe', 'ścirye', 'war', 'swese', 'cake', 'lyam', 'samudtär', 'salyiye', 'kärweñe', 'warañc', 'tweye, taur', 'keṃ', 'tarkär', '', 'iprer', 'yente', 'śiñcatstse', '', '', 'puwar', 'taur, tweye', 'tsakṣtär,pälketär', 'ytārye', 'ṣale', 'ratre', 'motartstse', 'tute', 'ārkwi', 'erkent-', 'yṣiye', 'kauṃ', 'pikul', 'emalle', 'krośce', 'ite', 'ñuwe', 'ktsaitstse', 'kartse', 'yolo, pakwāre', 'āmpau', 'sal, kraketstse', '', '', 'mātre, akwatse', 'mālle', 'ṣmare', 'karītstse', 'asāre', '', 'akartte, ysape, etsuwai', 'lau, lauke', 'saiwai', 'śwālyai', '-ne', '-ne', 'śle', 'ṣp', 'krui, ente', 'kuce, mäkte', 'ñem']
+
swadesh_pt_old = ['eu', 'tu', 'ele', 'nos', 'vos', 'eles', 'esto, aquesto', 'aquelo', 'aqui', 'ali', 'quen', 'que', 'u', 'quando', 'como', 'non', 'todo', 'muito', 'algũus', 'pouco', 'outro', 'un, ũu', 'dous', 'tres', 'quatro', 'cinco', 'grande, gran', 'longo', 'ancho', 'grosso', 'pesado', 'pequeno', 'curto', 'estreito', 'magro', 'moller, dona', 'ome', 'ome, pessõa', 'infante, meninno, creatura', 'moller', 'marido', 'madre, mãi', 'padre, pai', 'besta, bestia, bescha', 'peixe', 'ave', 'can', 'peollo', 'coobra', 'vermen', 'arvor', 'furesta, mata, monte', 'baston, pao', 'fruita, fruito', 'semente', 'folla', 'raiz', 'cortiça', 'fror, flor', 'erva', 'corda', 'pele', 'carne', 'sangui, sangue', 'osso', 'gordura', 'ovo', 'corno', 'rabo', 'pena', 'cabelo', 'cabeça', 'orella', 'ollo', 'nariz', 'boca', 'dente', 'lingua', 'unna, unlla', 'pee, pe', 'perna', 'gẽollo', 'mão', 'aa', 'ventre', 'tripas', 'colo', 'costas', 'peito, sẽo', 'coraçon', 'figado', 'bever', 'comer', 'morder', 'mamar', 'cospir', '', 'soprar', '', 'riir', 'veer', 'ouvir, oir, ascuitar', 'saber', 'pensar', 'cheirar', 'temer', 'dormir', 'viver', 'morrer', 'matar', 'pelejar', 'caçar', 'bater', 'cortar, partir', '', 'acuitelar', 'rascar', 'cavar', 'nadar', 'voar', 'andar', 'vĩir', 'jazer, deitar', 'sentar', 'levantar', '', 'caer', 'dar', 'tẽer', 'apertar', '', 'lavar', 'terger, enxugar', 'puxar', 'empuxar', 'lançar', 'atar', 'coser', 'contar', 'contar, dizer, falar', 'cantar', 'jogar', 'boiar', 'correr', 'gelar, *gear', 'inchar', 'sol', 'lũa', 'estrela', 'agua', 'chuvia', 'rio', 'lago', 'mar', 'sal', 'pedra', 'arẽa', 'poo', 'terra', 'nuve', 'nevoeiro', 'ceo', 'vento', 'neve', 'geo', 'fumo, fumaz', 'fogo', 'cĩisa', 'queimar, arder', 'caminno, via', 'montanna, monte', 'vermello', 'verde', 'amarelo', 'branco', 'negro', 'noite', 'dia', 'ano', 'caente', 'frio', 'chẽo', 'novo', 'vello, antigo', 'bon, bõo', 'mal, mao', 'podre', 'lixoso', 'estreito', 'redondo', 'amoado', 'romo', 'chão', 'mollado', 'seco', 'reito, dereito', 'preto', 'longe', 'dereita', 'sẽestra', 'a', 'en', 'con', 'e', 'se', 'porque', 'nome']
class Swadesh():
@@ -18,6 +20,7 @@ def words(self):
return swadesh_la
elif self.language == 'gr':
return swadesh_gr
+ elif self.language == 'txb':
+ return swadesh_txb
elif self.language == 'pt_old':
return swadesh_pt_old
-
| diff --git a/cltk/tests/test_corpus.py b/cltk/tests/test_corpus.py
--- a/cltk/tests/test_corpus.py
+++ b/cltk/tests/test_corpus.py
@@ -743,6 +743,12 @@ def test_swadesh_latin(self):
match = swadesh.words()[0]
self.assertEqual(first_word, match)
+ def test_swadesh_tocharianB(self):
+ swadesh = Swadesh('txb')
+ first_word = 'ñäś'
+ match = swadesh.words()[0]
+ self.assertEqual(first_word, match)
+
def test_swadesh_old_portuguese(self):
swadesh = Swadesh('pt_old')
| Add Swadesh list for Tocharian B
Data here: https://en.wiktionary.org/wiki/Appendix:Tocharian_B_Swadesh_list
Follow the pattern here: https://github.com/cltk/cltk/blob/4bf42fc9a19cf711f7eb1e908850fb64c65b0582/cltk/corpus/swadesh.py#L6
Call the list `swadesh_txb`.
| Hello,
I would like to take this up. Will send PR soon.
comments:
* Some word has texts like pl (?) or POS tag like a noun, third-person plural preterite. I removed those. If it is required, I will update the PR.
* For some words, translation doesn't exist. For those, I kept an empty string, to preserve the order across all swadesh lists. | 2018-02-04T22:21:03 |
cltk/cltk | 666 | cltk__cltk-666 | [
"654"
] | bc104c45d77a187ed8b58d4fb7d820fada9c8663 | diff --git a/cltk/corpus/swadesh.py b/cltk/corpus/swadesh.py
--- a/cltk/corpus/swadesh.py
+++ b/cltk/corpus/swadesh.py
@@ -7,6 +7,8 @@
swadesh_gr = ['ἐγώ', 'σύ', 'αὐτός, οὗ, ὅς, ὁ, οὗτος', 'ἡμεῖς', 'ὑμεῖς', 'αὐτοί', 'ὅδε', 'ἐκεῖνος', 'ἔνθα, ἐνθάδε, ἐνταῦθα', 'ἐκεῖ', 'τίς', 'τί', 'ποῦ, πόθι', 'πότε, πῆμος', 'πῶς', 'οὐ, μή', 'πᾶς, ἅπᾱς', 'πολύς', 'τις', 'ὀλίγος, βαιός, παῦρος', 'ἄλλος, ἕτερος', 'εἷς', 'δύο', 'τρεῖς', 'τέσσαρες', 'πέντε', 'μέγας', 'μακρός', 'εὐρύς', 'πυκνός', 'βαρύς', 'μῑκρός', 'βραχύς', 'στενός', 'μανός', 'γυνή', 'ἀνήρ', 'ἄνθρωπος', 'τέκνον, παῖς, παιδίον', 'γυνή', 'ἀνήρ', 'μήτηρ', 'πατήρ', 'ζῷον', 'ἰχθύς', 'ὄρνις, πετεινόν', 'κύων', 'φθείρ', 'ὄφις', 'ἑρπετόν, σκώληξ, ἕλμινς', 'δένδρον', 'ὕλη', 'βακτηρία, ῥάβδος', 'καρπός', 'σπέρμα', 'φύλλον', 'ῥίζα', 'φλοιός', 'ἄνθος', 'χλόη', 'δεσμός, σχοινίον', 'δέρμα', 'κρέας', 'αἷμα', 'ὀστοῦν', 'δημός', 'ᾠόν', 'κέρας', 'οὐρά, κέρκος', 'πτερόν', 'θρίξ, κόμη', 'κεφαλή', 'οὖς', 'ὀφθαλμός', 'ῥίς', 'στόμα', 'ὀδούς', 'γλῶσσα', 'ὄνυξ', 'πούς', 'κῶλον, σκέλος', 'γόνυ', 'χείρ', 'πτέρυξ', 'γαστήρ, κοιλία', 'ἔντερα, σπλάγχνα', 'αὐχήν, τράχηλος', 'νῶτον', 'μαστός, στῆθος', 'καρδία', 'ἧπαρ', 'πίνω', 'ἐσθίω, ἔφαγον', 'δάκνω', 'σπάω', 'πτύω', 'ἐμέω', 'φυσάω', 'πνέω', 'γελάω', 'βλέπω, ὁράω, εἶδον', 'ἀκούω, ἀΐω', 'οἶδα, γιγνώσκω', 'νομίζω, δοκέω, νοέω, οἴομαι', 'ὀσφραίνομαι', 'φοβέομαι', 'καθεύδω, εὕδω, εὐνάζομαι, κοιμάομαι, ἰαύω', 'ζάω, βιόω, οἰκέω', 'ἀποθνῄσκω, θνῄσκω, τελευτάω, ὄλομαι', 'ἀποκτείνω, ἔπεφνον', 'μάχομαι', 'θηρεύω, θηράω, ἰχνεύω, κυνηγετέω, κυνηγέω, σεύω', 'τύπτω', 'τέμνω', 'σχίζω', 'κεντέω', 'κνάω', 'ὀρύσσω, σκᾰ́πτω', 'νέω, κολυμβάω', 'πέτομαι', 'περιπατέω, πατέω, στείχω, βαίνω, βαδίζω, πεζεύω, πορεύω', 'ἱκνέομαι, ἵκω, ἔρχομαι, εἶμι', 'κεῖμαι', 'καθίζω', 'ἵστημι', 'τρέπω', 'πίπτω', 'παρέχω, δίδωμι', 'ἔχω', 'πιέζω', 'τρίβω', 'λούω, πλύνω, νίπτω', 'ἀπομάσσω', 'ἕλκω', 'ὠθέω', 'ῥίπτω, βάλλω', 'δέω', 'ῥάπτω', 'ἀριθμέω', 'φημί, λέγω, ἐνέπω', 'ἀείδω', 'παίζω', 'νέω', 'ῥέω', 'πήγνυμαι', 'αὐξάνω', 'ἥλιος', 'σελήνη', 'ἀστήρ', 'ὕδωρ', 'ὑετός, βροχή', 'ποταμός', 'λίμνη', 'θάλασσα, πέλαγος, πόντος', 'ἅλς', 'λίθος', 'ἄμμος', 'κόνις', 'γῆ, χθών', 'νέφος', 'ὀμίχλη', 'οὐρανός', 'ἄνεμος', 'χιών', 'κρύσταλλος', 'καπνός', 'πῦρ', 'τέφρα', 'καίω', 'ὁδός', 'ἄκρα, ὄρος, βουνός', 'ἐρυθρός, πυρρός', 'χλωρός', 'ξανθός', 'λευκός', 'μέλας', 'νύξ', 'ἡμέρα, ἦμαρ', 'ἔτος', 'θερμός', 'ψυχρός', 'μεστός, πλήρης', 'νέος', 'παλαιός', 'ἀγαθός', 'κακός', 'σαπρός', 'θολερός', 'εὐθύς, ὀρθός', 'κυκλοτερής', 'τομός, ὀξύς', 'ἀμβλύς, βαρύς', 'λεῖος', 'ὑγρός', 'ξηρός', 'δίκαιος', 'ἐγγύς', 'μακράν', 'δεξιός', 'ἀριστερός, εὐώνυμος', 'ἐν', 'ἐν', 'μετά, σύν', 'καί, τε', 'εἰ', 'ὅτι', 'ὄνομα']
+swadesh_sa = ['अहम्' , 'त्वम्', 'स', 'वयम्, नस्', 'यूयम्, वस्', 'ते', 'इदम्', 'तत्', 'अत्र', 'तत्र', 'क', 'किम्', 'कुत्र', 'कदा', 'कथम्', 'न', 'सर्व', 'बहु', 'किञ्चिद्', 'अल्प', 'अन्य', 'एक', 'द्वि', 'त्रि', 'चतुर्', 'पञ्चन्', 'महत्', 'दीर्घ', 'उरु', 'घन', 'गुरु', 'अल्प', 'ह्रस्व', 'अंहु', 'तनु', 'स्त्री', 'पुरुष, नर', 'मनुष्य, मानव', 'बाल, शिशु', 'पत्नी, भार्या', 'पति', 'मातृ', 'पितृ', 'पशु', 'मत्स्य', 'वि, पक्षिन्', 'श्वन्', 'यूका', 'सर्प', 'कृमि', 'वृक्ष, तरु', 'वन', 'दण्ड', 'फल', 'बीज', 'पत्त्र', 'मूल', 'त्वच्', 'पुष्प', 'तृण', 'रज्जु', 'चर्मन्, त्वच्', 'मांस', 'रक्त, असृज्', 'अस्थि', 'पीवस्, मेदस्', 'अण्ड', 'शृङ्ग', 'पुच्छ', 'पर्ण', 'केश', 'शिरस्', 'कर्ण', 'अक्षि', 'नासा', 'वक्त्र, मुख', 'दन्त', 'जिह्वा', 'नख', 'पद', 'जङ्घ', 'जानु', 'हस्त, पाणि', 'पक्ष', 'उदर', 'अन्त्र, आन्त्र, गुद', 'गल, ग्रीवा', 'पृष्ठ', 'स्तन', 'हृदय', 'यकृत्', 'पिबति', 'खादति, अत्ति', 'दशति', 'धयति', 'ष्ठीवति', 'वमति', 'वाति', 'अनिति', 'स्मयते, हसति', 'पश्यति, √दृश्', 'शृणोति', 'जानाति', 'मन्यते, चिन्तयति', 'जिघ्रति', 'बिभेति, भयते', 'स्वपिति', 'जीवति', 'म्रियते', 'हन्ति', 'युध्यते', 'वेति', 'हन्ति, ताडयति', 'कृन्तति', 'भिनत्ति', 'विधति', 'लिखति', 'खनति', 'प्लवते', 'पतति', 'एति, गच्छति, चरति', 'आगच्छति', 'शेते', 'सीदति', 'तिष्ठति', 'वर्तते', 'पद्यते', 'ददाति', 'धरति', 'मृद्नाति', 'घर्षति', 'क्षालयति', 'मार्ष्टि', 'कर्षति', 'नुदति', 'क्षिपति', 'बध्नाति, बन्धति', 'सीव्यति', 'गणयति, कलते', 'वक्ति', 'गायति', 'दीव्यति', 'प्लवते', 'सरति, क्षरति', 'शीयते', 'श्वयति', 'सूर्य, रवि, सूर, भास्कर', 'मास, चन्द्रमस्, चन्द्र', 'नक्षत्र, स्तृ, तारा', 'जल, अप्, पानीय, वारि, उदन्, तोज', 'वर्ष', 'नदी', 'सरस्', 'समुद्र', 'लवण', 'अश्मन्', 'पांसु, शिकता', 'रेणु', 'क्षम्, पृथ्वी', 'नभस्, मेघ', 'मिह्', 'आकाश', 'वायु, वात', 'हिम, तुषार, तुहिन', 'हिम', 'धूम', 'अग्नि', 'आस', 'दहति', 'पथ, अध्वन्, मार्ग', 'गिरि, पर्वत', 'रक्त, रोहित', 'हरित्, हरित, पालाश, पलाश', 'पीत, पीतल', 'श्वेत', 'कृष्ण', 'रात्रि, नक्ति, क्षप्, रजनी', 'दिन, अहर्, दिवस', 'वर्ष, संवत्सर', 'तप्त', 'शीत', 'पूर्ण', 'नव, नूतन', 'जीर्ण, वृद्ध, पुरातन', 'वसु, भद्र', 'पाप, दुष्ट', 'पूति', 'मलिन, समल', 'ऋजु, साधु', 'वृत्त, वर्तुल', 'तीक्ष्ण', 'कुण्ठ', 'श्लक्ष्ण, स्निग्ध', 'आर्द्र, क्लिन्न', 'शुष्क', 'शुद्ध, सत्य', 'नेद, प्रति', 'दूर', 'दक्षिण', 'सव्य', 'काश्यां', 'अंतरे, मध्ये', 'सह', 'च', 'यदि', 'हि', 'नामन्']
+
swadesh_txb = ['ñäś', 'tuwe', 'su', 'wes', 'yes', 'cey', 'se', 'su, samp', 'tane', 'tane, omp', 'kᵤse', 'kᵤse', 'ente', 'ente', 'mäkte', 'mā', 'poñc', 'māka', 'ṣemi', 'totka', 'allek', 'ṣe', 'wi', 'trey', 'śtwer', 'piś', 'orotstse', 'pärkare', 'aurtstse', '', 'kramartse', 'lykaśke, totka', '', '', '', 'klyiye, śana', 'eṅkwe', 'śaumo', 'śamaśke', 'śana', 'petso', 'mācer', 'pācer', 'luwo', 'laks', 'salamo luwo', 'ku', 'pärśeriñ', 'arṣāklo, auk', 'yel', 'stām', 'wartto, karāś', 'śakātai', 'oko', 'sārm, śäktālye', 'pilta', 'witsako', 'enmetre', 'pyāpyo', 'atiyai', '', 'ewe, yetse', 'misa', 'yasar', 'āy, āsta pl', 'ṣalype', '', 'krorīyai', 'pako', 'paruwa', 'matsi', 'āśce', 'klautso', 'ek', 'meli', 'koyṃ', 'keme', 'kantwo', '', 'paiyye', 'ckāckai', 'keni', 'ṣar', '', 'kātso', 'kātso', 'kor', 'sark', 'päścane', 'arañce', 'wästarye', 'yokäṃ', 'śuwaṃ', '', '', 'pitke', 'aṅkaiṃ', 'pinaṣṣnäṃ', 'anāṣṣäṃ, satāṣṣäṃ', 'ker-', 'lkāṣṣäṃ', 'klyauṣäṃ', 'aiśtär, kärsanaṃ', 'pälskanaṃ', 'warṣṣäṃ', 'prāskaṃ', 'kläntsaṃ', 'śaiṃ', 'sruketär', 'kauṣäṃ', 'witāre', 'śerītsi', 'karnäṣṣäṃ', 'karsnaṃ, latkanaṃ', 'kautanaṃ', 'tsopäṃ', '', 'rapanaṃ', 'nāṣṣäṃ', 'pluṣäṃ', 'yaṃ', 'känmaṣṣäṃ', 'lyaśäṃ', 'ṣamäṃ, āṣṣäṃ', 'kaltär', 'kluttaṅktär, sporttotär', 'kloyotär', 'aiṣṣäṃ', '', 'klupnātär, nuskaṣṣäṃ', 'lyuwetär, kantanatär', 'laikanatär', 'lyyāstär', 'slaṅktär', 'nätkanaṃ', 'karṣṣäṃ, saläṣṣäṃ', 'śanmästär, kärkaṣṣäṃ', '', 'ṣäṃṣtär', 'weṣṣäṃ', 'piyaṃ', 'kāñmäṃ', 'pluṣäṃ', 'reṣṣäṃ', '', 'staukkanatär', 'kauṃ', 'meñe', 'ścirye', 'war', 'swese', 'cake', 'lyam', 'samudtär', 'salyiye', 'kärweñe', 'warañc', 'tweye, taur', 'keṃ', 'tarkär', '', 'iprer', 'yente', 'śiñcatstse', '', '', 'puwar', 'taur, tweye', 'tsakṣtär,pälketär', 'ytārye', 'ṣale', 'ratre', 'motartstse', 'tute', 'ārkwi', 'erkent-', 'yṣiye', 'kauṃ', 'pikul', 'emalle', 'krośce', 'ite', 'ñuwe', 'ktsaitstse', 'kartse', 'yolo, pakwāre', 'āmpau', 'sal, kraketstse', '', '', 'mātre, akwatse', 'mālle', 'ṣmare', 'karītstse', 'asāre', '', 'akartte, ysape, etsuwai', 'lau, lauke', 'saiwai', 'śwālyai', '-ne', '-ne', 'śle', 'ṣp', 'krui, ente', 'kuce, mäkte', 'ñem']
swadesh_pt_old = ['eu', 'tu', 'ele', 'nos', 'vos', 'eles', 'esto, aquesto', 'aquelo', 'aqui', 'ali', 'quen', 'que', 'u', 'quando', 'como', 'non', 'todo', 'muito', 'algũus', 'pouco', 'outro', 'un, ũu', 'dous', 'tres', 'quatro', 'cinco', 'grande, gran', 'longo', 'ancho', 'grosso', 'pesado', 'pequeno', 'curto', 'estreito', 'magro', 'moller, dona', 'ome', 'ome, pessõa', 'infante, meninno, creatura', 'moller', 'marido', 'madre, mãi', 'padre, pai', 'besta, bestia, bescha', 'peixe', 'ave', 'can', 'peollo', 'coobra', 'vermen', 'arvor', 'furesta, mata, monte', 'baston, pao', 'fruita, fruito', 'semente', 'folla', 'raiz', 'cortiça', 'fror, flor', 'erva', 'corda', 'pele', 'carne', 'sangui, sangue', 'osso', 'gordura', 'ovo', 'corno', 'rabo', 'pena', 'cabelo', 'cabeça', 'orella', 'ollo', 'nariz', 'boca', 'dente', 'lingua', 'unna, unlla', 'pee, pe', 'perna', 'gẽollo', 'mão', 'aa', 'ventre', 'tripas', 'colo', 'costas', 'peito, sẽo', 'coraçon', 'figado', 'bever', 'comer', 'morder', 'mamar', 'cospir', '', 'soprar', '', 'riir', 'veer', 'ouvir, oir, ascuitar', 'saber', 'pensar', 'cheirar', 'temer', 'dormir', 'viver', 'morrer', 'matar', 'pelejar', 'caçar', 'bater', 'cortar, partir', '', 'acuitelar', 'rascar', 'cavar', 'nadar', 'voar', 'andar', 'vĩir', 'jazer, deitar', 'sentar', 'levantar', '', 'caer', 'dar', 'tẽer', 'apertar', '', 'lavar', 'terger, enxugar', 'puxar', 'empuxar', 'lançar', 'atar', 'coser', 'contar', 'contar, dizer, falar', 'cantar', 'jogar', 'boiar', 'correr', 'gelar, *gear', 'inchar', 'sol', 'lũa', 'estrela', 'agua', 'chuvia', 'rio', 'lago', 'mar', 'sal', 'pedra', 'arẽa', 'poo', 'terra', 'nuve', 'nevoeiro', 'ceo', 'vento', 'neve', 'geo', 'fumo, fumaz', 'fogo', 'cĩisa', 'queimar, arder', 'caminno, via', 'montanna, monte', 'vermello', 'verde', 'amarelo', 'branco', 'negro', 'noite', 'dia', 'ano', 'caente', 'frio', 'chẽo', 'novo', 'vello, antigo', 'bon, bõo', 'mal, mao', 'podre', 'lixoso', 'estreito', 'redondo', 'amoado', 'romo', 'chão', 'mollado', 'seco', 'reito, dereito', 'preto', 'longe', 'dereita', 'sẽestra', 'a', 'en', 'con', 'e', 'se', 'porque', 'nome']
@@ -25,6 +27,8 @@ def words(self):
return swadesh_la
elif self.language == 'gr':
return swadesh_gr
+ elif self.language == 'sa':
+ return swadesh_sa
elif self.language == 'txb':
return swadesh_txb
elif self.language == 'pt_old':
| diff --git a/cltk/tests/test_corpus.py b/cltk/tests/test_corpus.py
--- a/cltk/tests/test_corpus.py
+++ b/cltk/tests/test_corpus.py
@@ -756,5 +756,11 @@ def test_swadesh_old_portuguese(self):
match = swadesh.words()[0]
self.assertEqual(first_word, match)
+ def test_swadesh_sanskrit(self):
+ swadesh = Swadesh('sa')
+ first_word = 'अहम्'
+ match = swadesh.words()[0]
+ self.assertEqual(first_word, match)
+
if __name__ == '__main__':
unittest.main()
| Add Swadesh list for Sanskrit
Data here: https://en.wiktionary.org/wiki/Appendix:Sanskrit_Swadesh_list
Please be sure to cite the author of this in a `# comment`
Follow the pattern here: https://github.com/cltk/cltk/blob/4bf42fc9a19cf711f7eb1e908850fb64c65b0582/cltk/corpus/swadesh.py#L6
Call the list `swadesh_sa`.
| I would love to work on this @kylepjohnson . Just one question: should I create a `swadesh.py` or add the list in the `cltk/cltk/corpus/swadesh.py`? | 2018-02-09T00:43:21 |
cltk/cltk | 673 | cltk__cltk-673 | [
"628"
] | 4b165be3f765137dfbeaecbac58638a1c3254ea5 | diff --git a/cltk/corpus/swadesh.py b/cltk/corpus/swadesh.py
--- a/cltk/corpus/swadesh.py
+++ b/cltk/corpus/swadesh.py
@@ -3,21 +3,24 @@
__author__ = ['Patrick J. Burns <[email protected]>']
__license__ = 'MIT License. See LICENSE.'
-swadesh_la = ['ego', 'tū', 'is, ea, id', 'nōs', 'vōs', 'eī, iī, eae, ea', 'hic, haec, hoc', 'ille, illa, illud', 'hīc', 'illic, ibi', 'quis, quae', 'quid', 'ubi', 'cum', 'quōmodō', 'nōn, nē', 'omnēs, omnia', 'multī, multae, multa', 'aliquī, aliqua, aliquod', 'paucī, paucae, pauca', 'alter, alius', 'ūnus', 'duō', 'trēs', 'quattuor', 'quīnque', 'magnus', 'longus', 'lātus', 'crassus', 'gravis', 'parvus', 'brevis', 'angustus', 'gracilis', 'fēmina', 'vir', 'homō', 'puer', 'uxor, mulier', 'marītus', 'māter', 'pater', 'animal', 'piscis', 'avis', 'canis', 'pēdīculus', 'serpens', 'vermis', 'arbor', 'silva', 'hasta, pālus', 'fructus', 'sēmen', 'folium', 'rādix', 'cortex', 'flōs', 'herba', 'chorda', 'cutis', 'carō', 'sanguis', 'os', 'pinguāmen', 'ōvum', 'cornū', 'cauda', 'penna', 'pilus', 'caput', 'auris', 'oculus', 'nāsus, nāris', 'ōs', 'dens', 'lingua', 'unguis', 'pēs', 'crūs', 'genū', 'manus', 'āla', 'venter, abdōmen', 'viscera', 'cervix', 'dorsum', 'mamma', 'cor', 'iecur', 'bibere', 'edere', 'mordēre', 'sūgere', 'spuere', 'vomere', 'īnflāre', 'respīrāre', 'rīdēre', 'vidēre', 'audīre', 'scīre', 'cōgitāre, putāre, existimāre', 'olfacere', 'timēre', 'dormīre', 'vīvere', 'morī', 'necāre', 'luctārī', 'vēnārī', 'pellere', 'secāre', 'dīvidere', 'pungere', 'scabere', 'fodere', 'nāre, natāre', 'volāre', 'ambulāre', 'venīre', 'cubāre', 'sedēre', 'stāre', 'vertere', 'cadere', 'dare', 'tenēre', 'exprimere', 'fricāre', 'lavāre', 'tergēre', 'trahere', 'pellere', 'iacere', 'ligāre', 'cōnsuere', 'computāre, numerāre', 'dīcere', 'canere', 'ludere', 'fluctuāre', 'fluere', 'gelāre', 'augēre', 'sol', 'lūna', 'stella', 'aqua', 'pluvia', 'flūmen, fluvius, amnis', 'lacus', 'mare', 'sal', 'saxum, lapis, petra', 'harēna', 'pulvis', 'humus, terra, ager', 'nūbēs, nebula', 'cālīgō, nebula, tenebrae', 'caelum', 'ventus', 'nix', 'gelū', 'fūmus', 'ignis', 'cinis', 'ūrere, flammāre', 'via', 'mons', 'ruber, rūfus', 'viridis', 'croceus', 'albus, candidus', 'āter, niger', 'nox', 'dies', 'annus', 'calidus', 'frigidus', 'plēnus', 'novus', 'vetus', 'bonus', 'malus', 'putridus', 'immundus', 'rectus', 'rotundus', 'acūtus', 'hebes', 'suāvis', 'humidus, aqueus', 'siccus', 'rectus', 'propinquus, proximus', 'longus', 'dexter', 'laevus, sinister', 'ad, in', 'in', 'cum', 'et, -que', 'si', 'quod', 'nōmen']
-swadesh_gr = ['ἐγώ', 'σύ', 'αὐτός, οὗ, ὅς, ὁ, οὗτος', 'ἡμεῖς', 'ὑμεῖς', 'αὐτοί', 'ὅδε', 'ἐκεῖνος', 'ἔνθα, ἐνθάδε, ἐνταῦθα', 'ἐκεῖ', 'τίς', 'τί', 'ποῦ, πόθι', 'πότε, πῆμος', 'πῶς', 'οὐ, μή', 'πᾶς, ἅπᾱς', 'πολύς', 'τις', 'ὀλίγος, βαιός, παῦρος', 'ἄλλος, ἕτερος', 'εἷς', 'δύο', 'τρεῖς', 'τέσσαρες', 'πέντε', 'μέγας', 'μακρός', 'εὐρύς', 'πυκνός', 'βαρύς', 'μῑκρός', 'βραχύς', 'στενός', 'μανός', 'γυνή', 'ἀνήρ', 'ἄνθρωπος', 'τέκνον, παῖς, παιδίον', 'γυνή', 'ἀνήρ', 'μήτηρ', 'πατήρ', 'ζῷον', 'ἰχθύς', 'ὄρνις, πετεινόν', 'κύων', 'φθείρ', 'ὄφις', 'ἑρπετόν, σκώληξ, ἕλμινς', 'δένδρον', 'ὕλη', 'βακτηρία, ῥάβδος', 'καρπός', 'σπέρμα', 'φύλλον', 'ῥίζα', 'φλοιός', 'ἄνθος', 'χλόη', 'δεσμός, σχοινίον', 'δέρμα', 'κρέας', 'αἷμα', 'ὀστοῦν', 'δημός', 'ᾠόν', 'κέρας', 'οὐρά, κέρκος', 'πτερόν', 'θρίξ, κόμη', 'κεφαλή', 'οὖς', 'ὀφθαλμός', 'ῥίς', 'στόμα', 'ὀδούς', 'γλῶσσα', 'ὄνυξ', 'πούς', 'κῶλον, σκέλος', 'γόνυ', 'χείρ', 'πτέρυξ', 'γαστήρ, κοιλία', 'ἔντερα, σπλάγχνα', 'αὐχήν, τράχηλος', 'νῶτον', 'μαστός, στῆθος', 'καρδία', 'ἧπαρ', 'πίνω', 'ἐσθίω, ἔφαγον', 'δάκνω', 'σπάω', 'πτύω', 'ἐμέω', 'φυσάω', 'πνέω', 'γελάω', 'βλέπω, ὁράω, εἶδον', 'ἀκούω, ἀΐω', 'οἶδα, γιγνώσκω', 'νομίζω, δοκέω, νοέω, οἴομαι', 'ὀσφραίνομαι', 'φοβέομαι', 'καθεύδω, εὕδω, εὐνάζομαι, κοιμάομαι, ἰαύω', 'ζάω, βιόω, οἰκέω', 'ἀποθνῄσκω, θνῄσκω, τελευτάω, ὄλομαι', 'ἀποκτείνω, ἔπεφνον', 'μάχομαι', 'θηρεύω, θηράω, ἰχνεύω, κυνηγετέω, κυνηγέω, σεύω', 'τύπτω', 'τέμνω', 'σχίζω', 'κεντέω', 'κνάω', 'ὀρύσσω, σκᾰ́πτω', 'νέω, κολυμβάω', 'πέτομαι', 'περιπατέω, πατέω, στείχω, βαίνω, βαδίζω, πεζεύω, πορεύω', 'ἱκνέομαι, ἵκω, ἔρχομαι, εἶμι', 'κεῖμαι', 'καθίζω', 'ἵστημι', 'τρέπω', 'πίπτω', 'παρέχω, δίδωμι', 'ἔχω', 'πιέζω', 'τρίβω', 'λούω, πλύνω, νίπτω', 'ἀπομάσσω', 'ἕλκω', 'ὠθέω', 'ῥίπτω, βάλλω', 'δέω', 'ῥάπτω', 'ἀριθμέω', 'φημί, λέγω, ἐνέπω', 'ἀείδω', 'παίζω', 'νέω', 'ῥέω', 'πήγνυμαι', 'αὐξάνω', 'ἥλιος', 'σελήνη', 'ἀστήρ', 'ὕδωρ', 'ὑετός, βροχή', 'ποταμός', 'λίμνη', 'θάλασσα, πέλαγος, πόντος', 'ἅλς', 'λίθος', 'ἄμμος', 'κόνις', 'γῆ, χθών', 'νέφος', 'ὀμίχλη', 'οὐρανός', 'ἄνεμος', 'χιών', 'κρύσταλλος', 'καπνός', 'πῦρ', 'τέφρα', 'καίω', 'ὁδός', 'ἄκρα, ὄρος, βουνός', 'ἐρυθρός, πυρρός', 'χλωρός', 'ξανθός', 'λευκός', 'μέλας', 'νύξ', 'ἡμέρα, ἦμαρ', 'ἔτος', 'θερμός', 'ψυχρός', 'μεστός, πλήρης', 'νέος', 'παλαιός', 'ἀγαθός', 'κακός', 'σαπρός', 'θολερός', 'εὐθύς, ὀρθός', 'κυκλοτερής', 'τομός, ὀξύς', 'ἀμβλύς, βαρύς', 'λεῖος', 'ὑγρός', 'ξηρός', 'δίκαιος', 'ἐγγύς', 'μακράν', 'δεξιός', 'ἀριστερός, εὐώνυμος', 'ἐν', 'ἐν', 'μετά, σύν', 'καί, τε', 'εἰ', 'ὅτι', 'ὄνομα']
+swadesh_eng_old = ['ic, iċċ, ih', 'þū', 'hē', 'wē', 'ġē', 'hīe', 'þēs, þēos, þis', 'sē, sēo, þæt', 'hēr', 'þār, þāra, þǣr, þēr', 'hwā','hwā, hwæt', 'hwǣr', 'hwanne, hwænne, hwenne', 'hū', 'ne', 'eall', 'maniġ, feola, fela', 'sum', 'fēaw, lyt', 'ōþer', 'ān', 'twēġen, twā, tū', 'þrīe, þrēo', 'fēower', 'fīf', 'grēat, stōr', 'lang, long', 'wīd, brād', 'þicce', 'hefiġ', 'smæl', 'scort, sceort', 'eng, nearu', 'þynn', 'ides, cwēn, wīfmann', 'wer, guma', 'mann', 'ċild, bearn, umbor', 'wīf', 'bunda, banda, hūsbonda', 'mōdor', 'fæder', 'dēor', 'fisc', 'fugol', 'hund', 'lūs', 'snaca', 'wyrm', 'trēo, bēam', 'weald, fyrhþ', 'sticca', 'wæstm, blǣd, ofett', 'sǣd', 'blæd, lēaf', 'wyrt', 'rind', 'blǣd, blōstma', 'græs, gærs', 'rāp, līne, sāl', 'hȳd', 'flǣsc', 'blōd', 'bān', 'fǣtt', 'ǣġ','horn', 'steort, tæġl', 'feþer', 'hǣr, hēr', 'hēafod, hafola', 'ēare', 'ēaġe', 'nosu', 'mūþ', 'tōþ', 'tunge', 'fingernæġel', 'fōt', 'scanca', 'cnēo', 'hand', 'feþera', 'būc', 'þearm', 'heals, hnecca', 'hryċġ, bæc', 'brēost', 'heorte', 'lifer', 'drincan', 'etan', 'bītan', 'sūgan, sūcan', 'spittan, hrǣċan', 'spīwan', 'blāwan', 'ōþian, ēþian', 'hliehhan', 'sēon', 'hīeran', 'witan, cnāwan', 'þenċan', 'ēþian, stincan', 'andrǣdan', 'slǣpan', 'libban, lifian', 'steorfan', 'cwellan', 'feohtan', 'huntian', 'hittan, slēan', 'snīþan', 'splātan, clēofan', 'snǣsan, stingan, stician', 'screpan, clifrian, pliċġan, clāwian', 'grafan', 'swimman, flēotan', 'flēoġan', 'gangan, onsteppan', 'cuman', 'liċġan', 'sittan', 'standan', 'ċierran, hwierfan', 'feallan', 'ġiefan', 'healdan', 'þringan, cwȳsan', 'gnīdan', 'wascan', 'wīpian', 'dragan, pullian', 'scūfan, þyddan, hrindan, potian', 'weorpan', 'bindan, tīeġan', 'sīwian, sēowian', 'tellan', 'cweþan, seċġan', 'singan', 'lācan, pleġan', 'flēotan, flotian, floterian', 'flōwan', 'frēosan', 'swellan', 'sōl, sunne', 'mōna', 'steorra, tungol', 'wæter', 'reġn', 'ēa, flōd, strēam', 'mere, lacu', 'sǣ', 'sealt', 'stān', 'sand', 'dūst, dust', 'eorþe', 'wolcen', 'mist', 'rodor, lyft', 'wind', 'snāw', 'īs', 'rēc, smoca', 'fȳr', 'æsc', 'beornan, biernan, bærnan, ǣlan', 'weġ, pæþ', 'beorg', 'rēad', 'grēne', 'ġeolu', 'hwīt', 'sweart, blæc', 'neaht, niht', 'dōgor, dæġ', 'ġēar', 'wearm', 'ceald','full', 'nēowe, nīwe', 'gamol, eald', 'gōd', 'yfel', 'fūl', 'ādeliht, sol', 'ġerād, ġereclīc', 'hwyrflede, seonuwealt', 'scearp', 'dol, dwæs','slīc, slieht, smēþe, smōþ','wǣt','sēar, drȳġe', 'riht','nēah','wīd, feor','reht, riht','winstre','on, æt','in', 'mid', 'and', 'ġif', 'forþon', 'nama']
-swadesh_sa = ['अहम्' , 'त्वम्', 'स', 'वयम्, नस्', 'यूयम्, वस्', 'ते', 'इदम्', 'तत्', 'अत्र', 'तत्र', 'क', 'किम्', 'कुत्र', 'कदा', 'कथम्', 'न', 'सर्व', 'बहु', 'किञ्चिद्', 'अल्प', 'अन्य', 'एक', 'द्वि', 'त्रि', 'चतुर्', 'पञ्चन्', 'महत्', 'दीर्घ', 'उरु', 'घन', 'गुरु', 'अल्प', 'ह्रस्व', 'अंहु', 'तनु', 'स्त्री', 'पुरुष, नर', 'मनुष्य, मानव', 'बाल, शिशु', 'पत्नी, भार्या', 'पति', 'मातृ', 'पितृ', 'पशु', 'मत्स्य', 'वि, पक्षिन्', 'श्वन्', 'यूका', 'सर्प', 'कृमि', 'वृक्ष, तरु', 'वन', 'दण्ड', 'फल', 'बीज', 'पत्त्र', 'मूल', 'त्वच्', 'पुष्प', 'तृण', 'रज्जु', 'चर्मन्, त्वच्', 'मांस', 'रक्त, असृज्', 'अस्थि', 'पीवस्, मेदस्', 'अण्ड', 'शृङ्ग', 'पुच्छ', 'पर्ण', 'केश', 'शिरस्', 'कर्ण', 'अक्षि', 'नासा', 'वक्त्र, मुख', 'दन्त', 'जिह्वा', 'नख', 'पद', 'जङ्घ', 'जानु', 'हस्त, पाणि', 'पक्ष', 'उदर', 'अन्त्र, आन्त्र, गुद', 'गल, ग्रीवा', 'पृष्ठ', 'स्तन', 'हृदय', 'यकृत्', 'पिबति', 'खादति, अत्ति', 'दशति', 'धयति', 'ष्ठीवति', 'वमति', 'वाति', 'अनिति', 'स्मयते, हसति', 'पश्यति, √दृश्', 'शृणोति', 'जानाति', 'मन्यते, चिन्तयति', 'जिघ्रति', 'बिभेति, भयते', 'स्वपिति', 'जीवति', 'म्रियते', 'हन्ति', 'युध्यते', 'वेति', 'हन्ति, ताडयति', 'कृन्तति', 'भिनत्ति', 'विधति', 'लिखति', 'खनति', 'प्लवते', 'पतति', 'एति, गच्छति, चरति', 'आगच्छति', 'शेते', 'सीदति', 'तिष्ठति', 'वर्तते', 'पद्यते', 'ददाति', 'धरति', 'मृद्नाति', 'घर्षति', 'क्षालयति', 'मार्ष्टि', 'कर्षति', 'नुदति', 'क्षिपति', 'बध्नाति, बन्धति', 'सीव्यति', 'गणयति, कलते', 'वक्ति', 'गायति', 'दीव्यति', 'प्लवते', 'सरति, क्षरति', 'शीयते', 'श्वयति', 'सूर्य, रवि, सूर, भास्कर', 'मास, चन्द्रमस्, चन्द्र', 'नक्षत्र, स्तृ, तारा', 'जल, अप्, पानीय, वारि, उदन्, तोज', 'वर्ष', 'नदी', 'सरस्', 'समुद्र', 'लवण', 'अश्मन्', 'पांसु, शिकता', 'रेणु', 'क्षम्, पृथ्वी', 'नभस्, मेघ', 'मिह्', 'आकाश', 'वायु, वात', 'हिम, तुषार, तुहिन', 'हिम', 'धूम', 'अग्नि', 'आस', 'दहति', 'पथ, अध्वन्, मार्ग', 'गिरि, पर्वत', 'रक्त, रोहित', 'हरित्, हरित, पालाश, पलाश', 'पीत, पीतल', 'श्वेत', 'कृष्ण', 'रात्रि, नक्ति, क्षप्, रजनी', 'दिन, अहर्, दिवस', 'वर्ष, संवत्सर', 'तप्त', 'शीत', 'पूर्ण', 'नव, नूतन', 'जीर्ण, वृद्ध, पुरातन', 'वसु, भद्र', 'पाप, दुष्ट', 'पूति', 'मलिन, समल', 'ऋजु, साधु', 'वृत्त, वर्तुल', 'तीक्ष्ण', 'कुण्ठ', 'श्लक्ष्ण, स्निग्ध', 'आर्द्र, क्लिन्न', 'शुष्क', 'शुद्ध, सत्य', 'नेद, प्रति', 'दूर', 'दक्षिण', 'सव्य', 'काश्यां', 'अंतरे, मध्ये', 'सह', 'च', 'यदि', 'हि', 'नामन्']
+swadesh_fr_old = ['jo, jou, je, ge', 'tu', 'il', 'nos, nous', 'vos, vous', 'il, eles', 'cist, cest, cestui', 'ci', 'la', 'qui, cui, ki', 'que, ke', 'u, ou', 'quant', 'coment, comant, comment', 'ne', 'tot, tut', 'mult, molt, mout, mot, mut', 'poi, po', 'autre, altre', 'un', 'deus', 'trois, troi', 'quatre', 'cinc', 'grant', 'lonc', 'lé', 'espés', 'pesant', 'petit', 'cort', 'estroit, estreit', 'meigre', 'dame, damoisele', 'hom, home, om, homme', 'enfant', 'feme, fame, femme,', 'mari', 'mere', 'pere', 'beste', 'poisson', 'oisel', 'chien, chen', 'püil', 'serpent', 'verm', 'arbre', 'forest', 'baston', 'fruit, fruict', 'fueille, foille', 'racine', 'escorce', 'flor, flur, fleur', 'erbe, herbe', 'corde', 'pel', 'char', 'sanc', 'os', 'grese, graisse', 'oef, uef', 'corne', 'cue', 'plume', 'chevol, cheveu', 'chief, chef', 'oreille', 'oel, oeil, ueil', 'nés', 'boche', 'dent', 'langue, lingue', 'ungle', 'pié', 'jambe, gambe', 'genol, genoil', 'main', 'ele', 'ventre, vantre', 'col', 'dos', 'cuer, coer', 'foie', 'boivre', 'mangier, mengier', 'mordre', 'escupir', 'vomir', 'respirer', 'rire', 'veoir, veir, veer', 'oïr', 'savoir, conoistre', 'penser', 'creindre, redoubter', 'dormir', 'vivre', 'morir', 'ocire, ocirre', '(se) batre', 'chacier', 'batre', 'colper, coper, couper', 'brisier, briser', 'voler', 'se pasmer', 'venir', 'gesir', 'retorner', 'chair', 'doner', 'tenir', 'laver', 'traire, treire', 'jeter, geter', 'lier', 'cosdre', 'conter', 'dire', 'chanter, canter', 'joer, juer', 'floter', 'geler', 'enfler', 'soleil, soloil', 'lune', 'estoile', 'iaue, eve', 'pluie, plovoir', 'riviere, flueve', 'lac', 'mer', 'sel', 'pierre, piere', 'sablon', 'terre, tere', 'ciel, cel', 'vent, vant', 'fum', 'fu, feu', 'cendre', 'ardre, ardoir, brusler', 'chemin, voie, rote', 'montaigne', 'roge', 'vert', 'jaune', 'blanc', 'noir', 'noit, nuit', 'jor, jur', 'an', 'chaut, chaloir', 'froit, froidure', 'plain, plein', 'novel', 'viel, ancien', 'bon', 'malvais, mauvais', 'sale', 'reont, ront', 'trenchant', 'sec', 'bon, juste', 'loing', 'destre', 'senestre', 'a, ad', 'en', 'avoec, avuec', 'et, e', 'se, si', 'kar, quar, qar', 'non, nom, num, nun']
-swadesh_txb = ['ñäś', 'tuwe', 'su', 'wes', 'yes', 'cey', 'se', 'su, samp', 'tane', 'tane, omp', 'kᵤse', 'kᵤse', 'ente', 'ente', 'mäkte', 'mā', 'poñc', 'māka', 'ṣemi', 'totka', 'allek', 'ṣe', 'wi', 'trey', 'śtwer', 'piś', 'orotstse', 'pärkare', 'aurtstse', '', 'kramartse', 'lykaśke, totka', '', '', '', 'klyiye, śana', 'eṅkwe', 'śaumo', 'śamaśke', 'śana', 'petso', 'mācer', 'pācer', 'luwo', 'laks', 'salamo luwo', 'ku', 'pärśeriñ', 'arṣāklo, auk', 'yel', 'stām', 'wartto, karāś', 'śakātai', 'oko', 'sārm, śäktālye', 'pilta', 'witsako', 'enmetre', 'pyāpyo', 'atiyai', '', 'ewe, yetse', 'misa', 'yasar', 'āy, āsta pl', 'ṣalype', '', 'krorīyai', 'pako', 'paruwa', 'matsi', 'āśce', 'klautso', 'ek', 'meli', 'koyṃ', 'keme', 'kantwo', '', 'paiyye', 'ckāckai', 'keni', 'ṣar', '', 'kātso', 'kātso', 'kor', 'sark', 'päścane', 'arañce', 'wästarye', 'yokäṃ', 'śuwaṃ', '', '', 'pitke', 'aṅkaiṃ', 'pinaṣṣnäṃ', 'anāṣṣäṃ, satāṣṣäṃ', 'ker-', 'lkāṣṣäṃ', 'klyauṣäṃ', 'aiśtär, kärsanaṃ', 'pälskanaṃ', 'warṣṣäṃ', 'prāskaṃ', 'kläntsaṃ', 'śaiṃ', 'sruketär', 'kauṣäṃ', 'witāre', 'śerītsi', 'karnäṣṣäṃ', 'karsnaṃ, latkanaṃ', 'kautanaṃ', 'tsopäṃ', '', 'rapanaṃ', 'nāṣṣäṃ', 'pluṣäṃ', 'yaṃ', 'känmaṣṣäṃ', 'lyaśäṃ', 'ṣamäṃ, āṣṣäṃ', 'kaltär', 'kluttaṅktär, sporttotär', 'kloyotär', 'aiṣṣäṃ', '', 'klupnātär, nuskaṣṣäṃ', 'lyuwetär, kantanatär', 'laikanatär', 'lyyāstär', 'slaṅktär', 'nätkanaṃ', 'karṣṣäṃ, saläṣṣäṃ', 'śanmästär, kärkaṣṣäṃ', '', 'ṣäṃṣtär', 'weṣṣäṃ', 'piyaṃ', 'kāñmäṃ', 'pluṣäṃ', 'reṣṣäṃ', '', 'staukkanatär', 'kauṃ', 'meñe', 'ścirye', 'war', 'swese', 'cake', 'lyam', 'samudtär', 'salyiye', 'kärweñe', 'warañc', 'tweye, taur', 'keṃ', 'tarkär', '', 'iprer', 'yente', 'śiñcatstse', '', '', 'puwar', 'taur, tweye', 'tsakṣtär,pälketär', 'ytārye', 'ṣale', 'ratre', 'motartstse', 'tute', 'ārkwi', 'erkent-', 'yṣiye', 'kauṃ', 'pikul', 'emalle', 'krośce', 'ite', 'ñuwe', 'ktsaitstse', 'kartse', 'yolo, pakwāre', 'āmpau', 'sal, kraketstse', '', '', 'mātre, akwatse', 'mālle', 'ṣmare', 'karītstse', 'asāre', '', 'akartte, ysape, etsuwai', 'lau, lauke', 'saiwai', 'śwālyai', '-ne', '-ne', 'śle', 'ṣp', 'krui, ente', 'kuce, mäkte', 'ñem']
+swadesh_gr = ['ἐγώ', 'σύ', 'αὐτός, οὗ, ὅς, ὁ, οὗτος', 'ἡμεῖς', 'ὑμεῖς', 'αὐτοί', 'ὅδε', 'ἐκεῖνος', 'ἔνθα, ἐνθάδε, ἐνταῦθα', 'ἐκεῖ', 'τίς', 'τί', 'ποῦ, πόθι', 'πότε, πῆμος', 'πῶς', 'οὐ, μή', 'πᾶς, ἅπᾱς', 'πολύς', 'τις', 'ὀλίγος, βαιός, παῦρος', 'ἄλλος, ἕτερος', 'εἷς', 'δύο', 'τρεῖς', 'τέσσαρες', 'πέντε', 'μέγας', 'μακρός', 'εὐρύς', 'πυκνός', 'βαρύς', 'μῑκρός', 'βραχύς', 'στενός', 'μανός', 'γυνή', 'ἀνήρ', 'ἄνθρωπος', 'τέκνον, παῖς, παιδίον', 'γυνή', 'ἀνήρ', 'μήτηρ', 'πατήρ', 'ζῷον', 'ἰχθύς', 'ὄρνις, πετεινόν', 'κύων', 'φθείρ', 'ὄφις', 'ἑρπετόν, σκώληξ, ἕλμινς', 'δένδρον', 'ὕλη', 'βακτηρία, ῥάβδος', 'καρπός', 'σπέρμα', 'φύλλον', 'ῥίζα', 'φλοιός', 'ἄνθος', 'χλόη', 'δεσμός, σχοινίον', 'δέρμα', 'κρέας', 'αἷμα', 'ὀστοῦν', 'δημός', 'ᾠόν', 'κέρας', 'οὐρά, κέρκος', 'πτερόν', 'θρίξ, κόμη', 'κεφαλή', 'οὖς', 'ὀφθαλμός', 'ῥίς', 'στόμα', 'ὀδούς', 'γλῶσσα', 'ὄνυξ', 'πούς', 'κῶλον, σκέλος', 'γόνυ', 'χείρ', 'πτέρυξ', 'γαστήρ, κοιλία', 'ἔντερα, σπλάγχνα', 'αὐχήν, τράχηλος', 'νῶτον', 'μαστός, στῆθος', 'καρδία', 'ἧπαρ', 'πίνω', 'ἐσθίω, ἔφαγον', 'δάκνω', 'σπάω', 'πτύω', 'ἐμέω', 'φυσάω', 'πνέω', 'γελάω', 'βλέπω, ὁράω, εἶδον', 'ἀκούω, ἀΐω', 'οἶδα, γιγνώσκω', 'νομίζω, δοκέω, νοέω, οἴομαι', 'ὀσφραίνομαι', 'φοβέομαι', 'καθεύδω, εὕδω, εὐνάζομαι, κοιμάομαι, ἰαύω', 'ζάω, βιόω, οἰκέω', 'ἀποθνῄσκω, θνῄσκω, τελευτάω, ὄλομαι', 'ἀποκτείνω, ἔπεφνον', 'μάχομαι', 'θηρεύω, θηράω, ἰχνεύω, κυνηγετέω, κυνηγέω, σεύω', 'τύπτω', 'τέμνω', 'σχίζω', 'κεντέω', 'κνάω', 'ὀρύσσω, σκᾰ́πτω', 'νέω, κολυμβάω', 'πέτομαι', 'περιπατέω, πατέω, στείχω, βαίνω, βαδίζω, πεζεύω, πορεύω', 'ἱκνέομαι, ἵκω, ἔρχομαι, εἶμι', 'κεῖμαι', 'καθίζω', 'ἵστημι', 'τρέπω', 'πίπτω', 'παρέχω, δίδωμι', 'ἔχω', 'πιέζω', 'τρίβω', 'λούω, πλύνω, νίπτω', 'ἀπομάσσω', 'ἕλκω', 'ὠθέω', 'ῥίπτω, βάλλω', 'δέω', 'ῥάπτω', 'ἀριθμέω', 'φημί, λέγω, ἐνέπω', 'ἀείδω', 'παίζω', 'νέω', 'ῥέω', 'πήγνυμαι', 'αὐξάνω', 'ἥλιος', 'σελήνη', 'ἀστήρ', 'ὕδωρ', 'ὑετός, βροχή', 'ποταμός', 'λίμνη', 'θάλασσα, πέλαγος, πόντος', 'ἅλς', 'λίθος', 'ἄμμος', 'κόνις', 'γῆ, χθών', 'νέφος', 'ὀμίχλη', 'οὐρανός', 'ἄνεμος', 'χιών', 'κρύσταλλος', 'καπνός', 'πῦρ', 'τέφρα', 'καίω', 'ὁδός', 'ἄκρα, ὄρος, βουνός', 'ἐρυθρός, πυρρός', 'χλωρός', 'ξανθός', 'λευκός', 'μέλας', 'νύξ', 'ἡμέρα, ἦμαρ', 'ἔτος', 'θερμός', 'ψυχρός', 'μεστός, πλήρης', 'νέος', 'παλαιός', 'ἀγαθός', 'κακός', 'σαπρός', 'θολερός', 'εὐθύς, ὀρθός', 'κυκλοτερής', 'τομός, ὀξύς', 'ἀμβλύς, βαρύς', 'λεῖος', 'ὑγρός', 'ξηρός', 'δίκαιος', 'ἐγγύς', 'μακράν', 'δεξιός', 'ἀριστερός, εὐώνυμος', 'ἐν', 'ἐν', 'μετά, σύν', 'καί, τε', 'εἰ', 'ὅτι', 'ὄνομα']
-swadesh_pt_old = ['eu', 'tu', 'ele', 'nos', 'vos', 'eles', 'esto, aquesto', 'aquelo', 'aqui', 'ali', 'quen', 'que', 'u', 'quando', 'como', 'non', 'todo', 'muito', 'algũus', 'pouco', 'outro', 'un, ũu', 'dous', 'tres', 'quatro', 'cinco', 'grande, gran', 'longo', 'ancho', 'grosso', 'pesado', 'pequeno', 'curto', 'estreito', 'magro', 'moller, dona', 'ome', 'ome, pessõa', 'infante, meninno, creatura', 'moller', 'marido', 'madre, mãi', 'padre, pai', 'besta, bestia, bescha', 'peixe', 'ave', 'can', 'peollo', 'coobra', 'vermen', 'arvor', 'furesta, mata, monte', 'baston, pao', 'fruita, fruito', 'semente', 'folla', 'raiz', 'cortiça', 'fror, flor', 'erva', 'corda', 'pele', 'carne', 'sangui, sangue', 'osso', 'gordura', 'ovo', 'corno', 'rabo', 'pena', 'cabelo', 'cabeça', 'orella', 'ollo', 'nariz', 'boca', 'dente', 'lingua', 'unna, unlla', 'pee, pe', 'perna', 'gẽollo', 'mão', 'aa', 'ventre', 'tripas', 'colo', 'costas', 'peito, sẽo', 'coraçon', 'figado', 'bever', 'comer', 'morder', 'mamar', 'cospir', '', 'soprar', '', 'riir', 'veer', 'ouvir, oir, ascuitar', 'saber', 'pensar', 'cheirar', 'temer', 'dormir', 'viver', 'morrer', 'matar', 'pelejar', 'caçar', 'bater', 'cortar, partir', '', 'acuitelar', 'rascar', 'cavar', 'nadar', 'voar', 'andar', 'vĩir', 'jazer, deitar', 'sentar', 'levantar', '', 'caer', 'dar', 'tẽer', 'apertar', '', 'lavar', 'terger, enxugar', 'puxar', 'empuxar', 'lançar', 'atar', 'coser', 'contar', 'contar, dizer, falar', 'cantar', 'jogar', 'boiar', 'correr', 'gelar, *gear', 'inchar', 'sol', 'lũa', 'estrela', 'agua', 'chuvia', 'rio', 'lago', 'mar', 'sal', 'pedra', 'arẽa', 'poo', 'terra', 'nuve', 'nevoeiro', 'ceo', 'vento', 'neve', 'geo', 'fumo, fumaz', 'fogo', 'cĩisa', 'queimar, arder', 'caminno, via', 'montanna, monte', 'vermello', 'verde', 'amarelo', 'branco', 'negro', 'noite', 'dia', 'ano', 'caente', 'frio', 'chẽo', 'novo', 'vello, antigo', 'bon, bõo', 'mal, mao', 'podre', 'lixoso', 'estreito', 'redondo', 'amoado', 'romo', 'chão', 'mollado', 'seco', 'reito, dereito', 'preto', 'longe', 'dereita', 'sẽestra', 'a', 'en', 'con', 'e', 'se', 'porque', 'nome']
+swadesh_hi = ['मैं', 'तू', 'वह', 'हम', 'तुम', 'वे', 'यह', 'वह', 'यहाँ', 'वहाँ', 'कौन', 'क्या', 'कहाँ', 'कब', 'कैसा', 'नहीं', 'सब', 'बहुत', 'कुछ', 'थोड़ा', 'दूसरा', 'एक', 'दो', 'तीन', 'चार', 'पाँच', 'बड़ा', 'लम्बा', 'चौड़ा', 'गाढ़ा', 'भारी', 'छोटा', 'छोटा', 'तंग', 'पतला', 'औरत', 'आदमी', 'इंसान', 'बच्चा', 'पत्नी', 'पति', 'माता', 'पिता', 'जानवर', 'मछली', 'चिड़िया', 'कुत्ता', 'जूँ', 'साँप', 'कीड़ा', 'पेड़', 'जंगल', 'डण्डा', 'फल', 'बीज', 'पत्ता', 'जड़', 'छाल', 'फूल', 'घास', 'रस्सी', 'त्वचा', 'माँस', 'ख़ून', 'हड्डी', 'चरबी', 'अंडा', 'सींग', 'पूँछ', 'पंख', 'बाल', 'सर', 'कान', 'आँख', 'नाक', 'मुँह', 'दाँत', 'जीभ', 'नाख़ुन', 'पैर', 'टांग', 'घुटना', 'हाथ', 'पंख', 'पेट', 'अंतड़ी', 'गरदन', 'पीठ', 'छाती', 'दिल', 'जिगर', 'पीना', 'खाना', 'काटना', 'चूसना', 'थूकना', 'उल्टी करना', 'फूँक मारना', 'साँस लेना', 'हँसना', 'देखना', 'सुनना', 'जानना', 'सोचना', 'सूंघना', '(से) डरना ((se) ḍarnā', 'सोना', 'जीना', 'मरना', 'मारना', 'लड़ना', 'शिकार करना', 'मारना', 'काटना', 'बंटना', 'भोंकना', 'खरोंचना', 'खोदना', 'तैरना', 'उड़ना', 'चलना', 'आना', 'लेटना', 'बैठना', 'खड़ा होना', 'मुड़ना', 'गिरना', 'देना', 'पकड़ना', 'घुसा देना', 'मलना', 'धोना', 'पोंछना', 'खींचना', 'धक्का देना', 'फेंकना', 'बाँधना', 'सीना', 'गिनना', 'कहना', 'गाना', 'खेलना', 'तैरना', 'बहना', 'जमना', 'सूजना', 'सूरज', 'चांद', 'तारा', 'पानी', 'बारिश', 'नदी', 'झील', 'समन्दर', 'नमक', 'पत्थर', 'रेत', 'धूल', 'धरती', 'बादल', 'धुंध', 'आसमान', 'हवा', 'बर्फ़', 'बर्फ़', 'धुआँ', 'आग', 'राख', 'जलना', 'सड़क', 'पहाड़', 'लाल', 'हरा', 'पीला', 'सफ़ेद', 'काला', 'रात', 'दिन', 'साल', 'गर्म', 'ठंडा', 'पूरा', 'नया', 'पुराना', 'अच्छा', 'बुरा', 'सड़ा', 'गन्दा', 'सीधा', 'गोल', 'तीखा', 'कुंद', 'चिकना', 'गीला', 'सूखा', 'सही', 'नज़दीक', 'दूर', 'दायाँ', 'बायाँ', 'पे', 'में', 'के साथ', 'और', 'अगर', 'क्योंकि', 'नाम']
-swadesh_eng_old = ['ic, iċċ, ih', 'þū', 'hē', 'wē', 'ġē', 'hīe', 'þēs, þēos, þis', 'sē, sēo, þæt', 'hēr', 'þār, þāra, þǣr, þēr', 'hwā','hwā, hwæt', 'hwǣr', 'hwanne, hwænne, hwenne', 'hū', 'ne', 'eall', 'maniġ, feola, fela', 'sum', 'fēaw, lyt', 'ōþer', 'ān', 'twēġen, twā, tū', 'þrīe, þrēo', 'fēower', 'fīf', 'grēat, stōr', 'lang, long', 'wīd, brād', 'þicce', 'hefiġ', 'smæl', 'scort, sceort', 'eng, nearu', 'þynn', 'ides, cwēn, wīfmann', 'wer, guma', 'mann', 'ċild, bearn, umbor', 'wīf', 'bunda, banda, hūsbonda', 'mōdor', 'fæder', 'dēor', 'fisc', 'fugol', 'hund', 'lūs', 'snaca', 'wyrm', 'trēo, bēam', 'weald, fyrhþ', 'sticca', 'wæstm, blǣd, ofett', 'sǣd', 'blæd, lēaf', 'wyrt', 'rind', 'blǣd, blōstma', 'græs, gærs', 'rāp, līne, sāl', 'hȳd', 'flǣsc', 'blōd', 'bān', 'fǣtt', 'ǣġ','horn', 'steort, tæġl', 'feþer', 'hǣr, hēr', 'hēafod, hafola', 'ēare', 'ēaġe', 'nosu', 'mūþ', 'tōþ', 'tunge', 'fingernæġel', 'fōt', 'scanca', 'cnēo', 'hand', 'feþera', 'būc', 'þearm', 'heals, hnecca', 'hryċġ, bæc', 'brēost', 'heorte', 'lifer', 'drincan', 'etan', 'bītan', 'sūgan, sūcan', 'spittan, hrǣċan', 'spīwan', 'blāwan', 'ōþian, ēþian', 'hliehhan', 'sēon', 'hīeran', 'witan, cnāwan', 'þenċan', 'ēþian, stincan', 'andrǣdan', 'slǣpan', 'libban, lifian', 'steorfan', 'cwellan', 'feohtan', 'huntian', 'hittan, slēan', 'snīþan', 'splātan, clēofan', 'snǣsan, stingan, stician', 'screpan, clifrian, pliċġan, clāwian', 'grafan', 'swimman, flēotan', 'flēoġan', 'gangan, onsteppan', 'cuman', 'liċġan', 'sittan', 'standan', 'ċierran, hwierfan', 'feallan', 'ġiefan', 'healdan', 'þringan, cwȳsan', 'gnīdan', 'wascan', 'wīpian', 'dragan, pullian', 'scūfan, þyddan, hrindan, potian', 'weorpan', 'bindan, tīeġan', 'sīwian, sēowian', 'tellan', 'cweþan, seċġan', 'singan', 'lācan, pleġan', 'flēotan, flotian, floterian', 'flōwan', 'frēosan', 'swellan', 'sōl, sunne', 'mōna', 'steorra, tungol', 'wæter', 'reġn', 'ēa, flōd, strēam', 'mere, lacu', 'sǣ', 'sealt', 'stān', 'sand', 'dūst, dust', 'eorþe', 'wolcen', 'mist', 'rodor, lyft', 'wind', 'snāw', 'īs', 'rēc, smoca', 'fȳr', 'æsc', 'beornan, biernan, bærnan, ǣlan', 'weġ, pæþ', 'beorg', 'rēad', 'grēne', 'ġeolu', 'hwīt', 'sweart, blæc', 'neaht, niht', 'dōgor, dæġ', 'ġēar', 'wearm', 'ceald','full', 'nēowe, nīwe', 'gamol, eald', 'gōd', 'yfel', 'fūl', 'ādeliht, sol', 'ġerād, ġereclīc', 'hwyrflede, seonuwealt', 'scearp', 'dol, dwæs','slīc, slieht, smēþe, smōþ','wǣt','sēar, drȳġe', 'riht','nēah','wīd, feor','reht, riht','winstre','on, æt','in', 'mid', 'and', 'ġif', 'forþon', 'nama']
+swadesh_la = ['ego', 'tū', 'is, ea, id', 'nōs', 'vōs', 'eī, iī, eae, ea', 'hic, haec, hoc', 'ille, illa, illud', 'hīc', 'illic, ibi', 'quis, quae', 'quid', 'ubi', 'cum', 'quōmodō', 'nōn, nē', 'omnēs, omnia', 'multī, multae, multa', 'aliquī, aliqua, aliquod', 'paucī, paucae, pauca', 'alter, alius', 'ūnus', 'duō', 'trēs', 'quattuor', 'quīnque', 'magnus', 'longus', 'lātus', 'crassus', 'gravis', 'parvus', 'brevis', 'angustus', 'gracilis', 'fēmina', 'vir', 'homō', 'puer', 'uxor, mulier', 'marītus', 'māter', 'pater', 'animal', 'piscis', 'avis', 'canis', 'pēdīculus', 'serpens', 'vermis', 'arbor', 'silva', 'hasta, pālus', 'fructus', 'sēmen', 'folium', 'rādix', 'cortex', 'flōs', 'herba', 'chorda', 'cutis', 'carō', 'sanguis', 'os', 'pinguāmen', 'ōvum', 'cornū', 'cauda', 'penna', 'pilus', 'caput', 'auris', 'oculus', 'nāsus, nāris', 'ōs', 'dens', 'lingua', 'unguis', 'pēs', 'crūs', 'genū', 'manus', 'āla', 'venter, abdōmen', 'viscera', 'cervix', 'dorsum', 'mamma', 'cor', 'iecur', 'bibere', 'edere', 'mordēre', 'sūgere', 'spuere', 'vomere', 'īnflāre', 'respīrāre', 'rīdēre', 'vidēre', 'audīre', 'scīre', 'cōgitāre, putāre, existimāre', 'olfacere', 'timēre', 'dormīre', 'vīvere', 'morī', 'necāre', 'luctārī', 'vēnārī', 'pellere', 'secāre', 'dīvidere', 'pungere', 'scabere', 'fodere', 'nāre, natāre', 'volāre', 'ambulāre', 'venīre', 'cubāre', 'sedēre', 'stāre', 'vertere', 'cadere', 'dare', 'tenēre', 'exprimere', 'fricāre', 'lavāre', 'tergēre', 'trahere', 'pellere', 'iacere', 'ligāre', 'cōnsuere', 'computāre, numerāre', 'dīcere', 'canere', 'ludere', 'fluctuāre', 'fluere', 'gelāre', 'augēre', 'sol', 'lūna', 'stella', 'aqua', 'pluvia', 'flūmen, fluvius, amnis', 'lacus', 'mare', 'sal', 'saxum, lapis, petra', 'harēna', 'pulvis', 'humus, terra, ager', 'nūbēs, nebula', 'cālīgō, nebula, tenebrae', 'caelum', 'ventus', 'nix', 'gelū', 'fūmus', 'ignis', 'cinis', 'ūrere, flammāre', 'via', 'mons', 'ruber, rūfus', 'viridis', 'croceus', 'albus, candidus', 'āter, niger', 'nox', 'dies', 'annus', 'calidus', 'frigidus', 'plēnus', 'novus', 'vetus', 'bonus', 'malus', 'putridus', 'immundus', 'rectus', 'rotundus', 'acūtus', 'hebes', 'suāvis', 'humidus, aqueus', 'siccus', 'rectus', 'propinquus, proximus', 'longus', 'dexter', 'laevus, sinister', 'ad, in', 'in', 'cum', 'et, -que', 'si', 'quod', 'nōmen']
swadesh_old_norse = ["ek", "þú", "hann", "vér", "þér", "þeir", "sjá, þessi", "sá", "hér", "þar", "hvar", "hvat", "hvar", "hvenær", "hvé", "eigi", "allr", "margr", "nǫkkurr", "fár", "annarr", "einn", "tveir", "þrír", "fjórir", "fimm", "stórr", "langr", "breiðr", "þykkr", "þungr", "lítill", "stuttr", "mjór", "þunnr", "kona", "karl", "maðr", "barn", "kona", "bóndi", 'móðir', "faðir", "dýrr", "fiskr", "fugl", "hundr", "lús", "snókr", "ormr", "tré", "skógr", "stafr", "ávǫxtr", "fræ", "lauf", "rót", "bǫrkr", "blóm", "gras", "reip", "húð", "kjǫt", "blóð", "bein", "fita", "egg", "horn", "hali", "fjǫðr", "hár", "hǫfuð", "eyra", "auga", "nef", "munnr", "tǫnn", "tunga", "nagl", "fótr", "leggr", "kné", "hǫnd", "vængr", "magi", "iinyfli", "hals" , "bak", "brjóst", "hjarta", "lifr", "drekka", "eta", "bíta", "súga", "spýta", ", hrækja", None, "blása", "anda", "hlæja", "sjá", "heyra", "vita", "þýkkja", "þefa", "ugga", "sofa", "lifa", "deyja", "drepa", "hals", "bak", "berja", "skera", "kljúfa""stinga", "klóra", "grafa", "synda", "fljúga", "ganga", "koma", "liggja", "sitja", "standa", "snúa", "falla", "gefa", "halda", "kreista", "gnúa","þvá", "þurka", "draga", "ýta", "kasta", "kasta", "binda", "sauma", "telja", "segja", "syngja", "leika", "flóta", "streyma", "frjósa", "þrútna", "sól", "tungl", "stjarna", "vatn", "regn", "á", "vatn", "hav", "salt", "steinn", "sandr", "ryk", "jörð", "ský", "þoka", "himinn", "vindr", "snjór", "íss", "reykr", "ild", "eldr", "aska", "brenna", "vegr", "fjall", "rauðr", "grœnn", "gulr", "hvítr", "svartr", "nótt", "dagr", "ár", "heitr", "kaldr", "fullr", "nýr", "gamall", "góðr", "illr", "rottin", "skitinn", "beinn", "kringlóttr", "beittr", None, "sleipr", "blautr", " þurr", "réttr", "nálægr", "langr", "hœgr", "vinstri", "hjá","í", "með", "ok", "ef", "því at", "nafn"] # pylint: disable=line-too-long
-swadesh_fr_old = ['jo, jou, je, ge', 'tu', 'il', 'nos, nous', 'vos, vous', 'il, eles', 'cist, cest, cestui', 'ci', 'la', 'qui, cui, ki', 'que, ke', 'u, ou', 'quant', 'coment, comant, comment', 'ne', 'tot, tut', 'mult, molt, mout, mot, mut', 'poi, po', 'autre, altre', 'un', 'deus', 'trois, troi', 'quatre', 'cinc', 'grant', 'lonc', 'lé', 'espés', 'pesant', 'petit', 'cort', 'estroit, estreit', 'meigre', 'dame, damoisele', 'hom, home, om, homme', 'enfant', 'feme, fame, femme,', 'mari', 'mere', 'pere', 'beste', 'poisson', 'oisel', 'chien, chen', 'püil', 'serpent', 'verm', 'arbre', 'forest', 'baston', 'fruit, fruict', 'fueille, foille', 'racine', 'escorce', 'flor, flur, fleur', 'erbe, herbe', 'corde', 'pel', 'char', 'sanc', 'os', 'grese, graisse', 'oef, uef', 'corne', 'cue', 'plume', 'chevol, cheveu', 'chief, chef', 'oreille', 'oel, oeil, ueil', 'nés', 'boche', 'dent', 'langue, lingue', 'ungle', 'pié', 'jambe, gambe', 'genol, genoil', 'main', 'ele', 'ventre, vantre', 'col', 'dos', 'cuer, coer', 'foie', 'boivre', 'mangier, mengier', 'mordre', 'escupir', 'vomir', 'respirer', 'rire', 'veoir, veir, veer', 'oïr', 'savoir, conoistre', 'penser', 'creindre, redoubter', 'dormir', 'vivre', 'morir', 'ocire, ocirre', '(se) batre', 'chacier', 'batre', 'colper, coper, couper', 'brisier, briser', 'voler', 'se pasmer', 'venir', 'gesir', 'retorner', 'chair', 'doner', 'tenir', 'laver', 'traire, treire', 'jeter, geter', 'lier', 'cosdre', 'conter', 'dire', 'chanter, canter', 'joer, juer', 'floter', 'geler', 'enfler', 'soleil, soloil', 'lune', 'estoile', 'iaue, eve', 'pluie, plovoir', 'riviere, flueve', 'lac', 'mer', 'sel', 'pierre, piere', 'sablon', 'terre, tere', 'ciel, cel', 'vent, vant', 'fum', 'fu, feu', 'cendre', 'ardre, ardoir, brusler', 'chemin, voie, rote', 'montaigne', 'roge', 'vert', 'jaune', 'blanc', 'noir', 'noit, nuit', 'jor, jur', 'an', 'chaut, chaloir', 'froit, froidure', 'plain, plein', 'novel', 'viel, ancien', 'bon', 'malvais, mauvais', 'sale', 'reont, ront', 'trenchant', 'sec', 'bon, juste', 'loing', 'destre', 'senestre', 'a, ad', 'en', 'avoec, avuec', 'et, e', 'se, si', 'kar, quar, qar', 'non, nom, num, nun']
+swadesh_pt_old = ['eu', 'tu', 'ele', 'nos', 'vos', 'eles', 'esto, aquesto', 'aquelo', 'aqui', 'ali', 'quen', 'que', 'u', 'quando', 'como', 'non', 'todo', 'muito', 'algũus', 'pouco', 'outro', 'un, ũu', 'dous', 'tres', 'quatro', 'cinco', 'grande, gran', 'longo', 'ancho', 'grosso', 'pesado', 'pequeno', 'curto', 'estreito', 'magro', 'moller, dona', 'ome', 'ome, pessõa', 'infante, meninno, creatura', 'moller', 'marido', 'madre, mãi', 'padre, pai', 'besta, bestia, bescha', 'peixe', 'ave', 'can', 'peollo', 'coobra', 'vermen', 'arvor', 'furesta, mata, monte', 'baston, pao', 'fruita, fruito', 'semente', 'folla', 'raiz', 'cortiça', 'fror, flor', 'erva', 'corda', 'pele', 'carne', 'sangui, sangue', 'osso', 'gordura', 'ovo', 'corno', 'rabo', 'pena', 'cabelo', 'cabeça', 'orella', 'ollo', 'nariz', 'boca', 'dente', 'lingua', 'unna, unlla', 'pee, pe', 'perna', 'gẽollo', 'mão', 'aa', 'ventre', 'tripas', 'colo', 'costas', 'peito, sẽo', 'coraçon', 'figado', 'bever', 'comer', 'morder', 'mamar', 'cospir', '', 'soprar', '', 'riir', 'veer', 'ouvir, oir, ascuitar', 'saber', 'pensar', 'cheirar', 'temer', 'dormir', 'viver', 'morrer', 'matar', 'pelejar', 'caçar', 'bater', 'cortar, partir', '', 'acuitelar', 'rascar', 'cavar', 'nadar', 'voar', 'andar', 'vĩir', 'jazer, deitar', 'sentar', 'levantar', '', 'caer', 'dar', 'tẽer', 'apertar', '', 'lavar', 'terger, enxugar', 'puxar', 'empuxar', 'lançar', 'atar', 'coser', 'contar', 'contar, dizer, falar', 'cantar', 'jogar', 'boiar', 'correr', 'gelar, *gear', 'inchar', 'sol', 'lũa', 'estrela', 'agua', 'chuvia', 'rio', 'lago', 'mar', 'sal', 'pedra', 'arẽa', 'poo', 'terra', 'nuve', 'nevoeiro', 'ceo', 'vento', 'neve', 'geo', 'fumo, fumaz', 'fogo', 'cĩisa', 'queimar, arder', 'caminno, via', 'montanna, monte', 'vermello', 'verde', 'amarelo', 'branco', 'negro', 'noite', 'dia', 'ano', 'caente', 'frio', 'chẽo', 'novo', 'vello, antigo', 'bon, bõo', 'mal, mao', 'podre', 'lixoso', 'estreito', 'redondo', 'amoado', 'romo', 'chão', 'mollado', 'seco', 'reito, dereito', 'preto', 'longe', 'dereita', 'sẽestra', 'a', 'en', 'con', 'e', 'se', 'porque', 'nome']
+
+swadesh_sa = ['अहम्' , 'त्वम्', 'स', 'वयम्, नस्', 'यूयम्, वस्', 'ते', 'इदम्', 'तत्', 'अत्र', 'तत्र', 'क', 'किम्', 'कुत्र', 'कदा', 'कथम्', 'न', 'सर्व', 'बहु', 'किञ्चिद्', 'अल्प', 'अन्य', 'एक', 'द्वि', 'त्रि', 'चतुर्', 'पञ्चन्', 'महत्', 'दीर्घ', 'उरु', 'घन', 'गुरु', 'अल्प', 'ह्रस्व', 'अंहु', 'तनु', 'स्त्री', 'पुरुष, नर', 'मनुष्य, मानव', 'बाल, शिशु', 'पत्नी, भार्या', 'पति', 'मातृ', 'पितृ', 'पशु', 'मत्स्य', 'वि, पक्षिन्', 'श्वन्', 'यूका', 'सर्प', 'कृमि', 'वृक्ष, तरु', 'वन', 'दण्ड', 'फल', 'बीज', 'पत्त्र', 'मूल', 'त्वच्', 'पुष्प', 'तृण', 'रज्जु', 'चर्मन्, त्वच्', 'मांस', 'रक्त, असृज्', 'अस्थि', 'पीवस्, मेदस्', 'अण्ड', 'शृङ्ग', 'पुच्छ', 'पर्ण', 'केश', 'शिरस्', 'कर्ण', 'अक्षि', 'नासा', 'वक्त्र, मुख', 'दन्त', 'जिह्वा', 'नख', 'पद', 'जङ्घ', 'जानु', 'हस्त, पाणि', 'पक्ष', 'उदर', 'अन्त्र, आन्त्र, गुद', 'गल, ग्रीवा', 'पृष्ठ', 'स्तन', 'हृदय', 'यकृत्', 'पिबति', 'खादति, अत्ति', 'दशति', 'धयति', 'ष्ठीवति', 'वमति', 'वाति', 'अनिति', 'स्मयते, हसति', 'पश्यति, √दृश्', 'शृणोति', 'जानाति', 'मन्यते, चिन्तयति', 'जिघ्रति', 'बिभेति, भयते', 'स्वपिति', 'जीवति', 'म्रियते', 'हन्ति', 'युध्यते', 'वेति', 'हन्ति, ताडयति', 'कृन्तति', 'भिनत्ति', 'विधति', 'लिखति', 'खनति', 'प्लवते', 'पतति', 'एति, गच्छति, चरति', 'आगच्छति', 'शेते', 'सीदति', 'तिष्ठति', 'वर्तते', 'पद्यते', 'ददाति', 'धरति', 'मृद्नाति', 'घर्षति', 'क्षालयति', 'मार्ष्टि', 'कर्षति', 'नुदति', 'क्षिपति', 'बध्नाति, बन्धति', 'सीव्यति', 'गणयति, कलते', 'वक्ति', 'गायति', 'दीव्यति', 'प्लवते', 'सरति, क्षरति', 'शीयते', 'श्वयति', 'सूर्य, रवि, सूर, भास्कर', 'मास, चन्द्रमस्, चन्द्र', 'नक्षत्र, स्तृ, तारा', 'जल, अप्, पानीय, वारि, उदन्, तोज', 'वर्ष', 'नदी', 'सरस्', 'समुद्र', 'लवण', 'अश्मन्', 'पांसु, शिकता', 'रेणु', 'क्षम्, पृथ्वी', 'नभस्, मेघ', 'मिह्', 'आकाश', 'वायु, वात', 'हिम, तुषार, तुहिन', 'हिम', 'धूम', 'अग्नि', 'आस', 'दहति', 'पथ, अध्वन्, मार्ग', 'गिरि, पर्वत', 'रक्त, रोहित', 'हरित्, हरित, पालाश, पलाश', 'पीत, पीतल', 'श्वेत', 'कृष्ण', 'रात्रि, नक्ति, क्षप्, रजनी', 'दिन, अहर्, दिवस', 'वर्ष, संवत्सर', 'तप्त', 'शीत', 'पूर्ण', 'नव, नूतन', 'जीर्ण, वृद्ध, पुरातन', 'वसु, भद्र', 'पाप, दुष्ट', 'पूति', 'मलिन, समल', 'ऋजु, साधु', 'वृत्त, वर्तुल', 'तीक्ष्ण', 'कुण्ठ', 'श्लक्ष्ण, स्निग्ध', 'आर्द्र, क्लिन्न', 'शुष्क', 'शुद्ध, सत्य', 'नेद, प्रति', 'दूर', 'दक्षिण', 'सव्य', 'काश्यां', 'अंतरे, मध्ये', 'सह', 'च', 'यदि', 'हि', 'नामन्']
+
+swadesh_txb = ['ñäś', 'tuwe', 'su', 'wes', 'yes', 'cey', 'se', 'su, samp', 'tane', 'tane, omp', 'kᵤse', 'kᵤse', 'ente', 'ente', 'mäkte', 'mā', 'poñc', 'māka', 'ṣemi', 'totka', 'allek', 'ṣe', 'wi', 'trey', 'śtwer', 'piś', 'orotstse', 'pärkare', 'aurtstse', '', 'kramartse', 'lykaśke, totka', '', '', '', 'klyiye, śana', 'eṅkwe', 'śaumo', 'śamaśke', 'śana', 'petso', 'mācer', 'pācer', 'luwo', 'laks', 'salamo luwo', 'ku', 'pärśeriñ', 'arṣāklo, auk', 'yel', 'stām', 'wartto, karāś', 'śakātai', 'oko', 'sārm, śäktālye', 'pilta', 'witsako', 'enmetre', 'pyāpyo', 'atiyai', '', 'ewe, yetse', 'misa', 'yasar', 'āy, āsta pl', 'ṣalype', '', 'krorīyai', 'pako', 'paruwa', 'matsi', 'āśce', 'klautso', 'ek', 'meli', 'koyṃ', 'keme', 'kantwo', '', 'paiyye', 'ckāckai', 'keni', 'ṣar', '', 'kātso', 'kātso', 'kor', 'sark', 'päścane', 'arañce', 'wästarye', 'yokäṃ', 'śuwaṃ', '', '', 'pitke', 'aṅkaiṃ', 'pinaṣṣnäṃ', 'anāṣṣäṃ, satāṣṣäṃ', 'ker-', 'lkāṣṣäṃ', 'klyauṣäṃ', 'aiśtär, kärsanaṃ', 'pälskanaṃ', 'warṣṣäṃ', 'prāskaṃ', 'kläntsaṃ', 'śaiṃ', 'sruketär', 'kauṣäṃ', 'witāre', 'śerītsi', 'karnäṣṣäṃ', 'karsnaṃ, latkanaṃ', 'kautanaṃ', 'tsopäṃ', '', 'rapanaṃ', 'nāṣṣäṃ', 'pluṣäṃ', 'yaṃ', 'känmaṣṣäṃ', 'lyaśäṃ', 'ṣamäṃ, āṣṣäṃ', 'kaltär', 'kluttaṅktär, sporttotär', 'kloyotär', 'aiṣṣäṃ', '', 'klupnātär, nuskaṣṣäṃ', 'lyuwetär, kantanatär', 'laikanatär', 'lyyāstär', 'slaṅktär', 'nätkanaṃ', 'karṣṣäṃ, saläṣṣäṃ', 'śanmästär, kärkaṣṣäṃ', '', 'ṣäṃṣtär', 'weṣṣäṃ', 'piyaṃ', 'kāñmäṃ', 'pluṣäṃ', 'reṣṣäṃ', '', 'staukkanatär', 'kauṃ', 'meñe', 'ścirye', 'war', 'swese', 'cake', 'lyam', 'samudtär', 'salyiye', 'kärweñe', 'warañc', 'tweye, taur', 'keṃ', 'tarkär', '', 'iprer', 'yente', 'śiñcatstse', '', '', 'puwar', 'taur, tweye', 'tsakṣtär,pälketär', 'ytārye', 'ṣale', 'ratre', 'motartstse', 'tute', 'ārkwi', 'erkent-', 'yṣiye', 'kauṃ', 'pikul', 'emalle', 'krośce', 'ite', 'ñuwe', 'ktsaitstse', 'kartse', 'yolo, pakwāre', 'āmpau', 'sal, kraketstse', '', '', 'mātre, akwatse', 'mālle', 'ṣmare', 'karītstse', 'asāre', '', 'akartte, ysape, etsuwai', 'lau, lauke', 'saiwai', 'śwālyai', '-ne', '-ne', 'śle', 'ṣp', 'krui, ente', 'kuce, mäkte', 'ñem']
class Swadesh():
@@ -39,6 +42,7 @@ def words(self):
return swadesh_eng_old
elif self.language == 'old_norse':
return swadesh_old_norse
+ elif self.language == 'hi':
+ return swadesh_hi
elif self.language == 'fr_old':
return swadesh_fr_old
-
diff --git a/cltk/stop/classical_hindi/stops.py b/cltk/stop/classical_hindi/stops.py
--- a/cltk/stop/classical_hindi/stops.py
+++ b/cltk/stop/classical_hindi/stops.py
@@ -4,7 +4,7 @@
"""
__author__ = 'Nishchith Shetty <inishchith[at]gmail[.]com>'
-STOP_LIST = ["हें", # yes
+STOPS_LIST = ["हें", # yes
"है", # is
"हैं", # there
"हि", # this
| diff --git a/cltk/tests/test_corpus.py b/cltk/tests/test_corpus.py
--- a/cltk/tests/test_corpus.py
+++ b/cltk/tests/test_corpus.py
@@ -761,6 +761,12 @@ def test_swadesh_sanskrit(self):
first_word = 'अहम्'
match = swadesh.words()[0]
self.assertEqual(first_word, match)
+
+ def test_swadesh_hindi(self):
+ swadesh = Swadesh('hi')
+ first_word = 'मैं'
+ match = swadesh.words()[0]
+ self.assertEqual(first_word, match)
if __name__ == '__main__':
unittest.main()
diff --git a/cltk/tests/test_stop.py b/cltk/tests/test_stop.py
--- a/cltk/tests/test_stop.py
+++ b/cltk/tests/test_stop.py
@@ -7,8 +7,10 @@
from cltk.stop.greek.stops import STOPS_LIST as GREEK_STOPS
from cltk.stop.latin.stops import STOPS_LIST as LATIN_STOPS
from cltk.stop.french.stops import STOPS_LIST as FRENCH_STOPS
+from cltk.stop.classical_hindi.stops import STOPS_LIST as HINDI_STOPS
from cltk.stop.arabic.stopword_filter import stopwords_filter as arabic_stop_filter
from cltk.stop.old_norse.stops import STOPS_LIST as OLD_NORSE_STOPS
+from cltk.tokenize.indian_tokenizer import indian_punctuation_tokenize_regex
from nltk.tokenize.punkt import PunktLanguageVars
import os
import unittest
@@ -77,7 +79,6 @@ def test_french_stopwords(self):
target_list = ['pensé', 'talant', 'd', '’', 'yonec', 'die', 'avant', 'dunt', 'nez', ',', 'pere', 'cum', 'primes',
'mere','.']
self.assertEqual(no_stops, target_list)
-
def test_string_stop_list(self):
"""Test production of stoplists from a given string"""
@@ -102,6 +103,16 @@ def test_old_norse_stopwords(self):
target_list = ['var', 'einn', 'morgin', ',', 'karlsefni', 'rjóðrit', 'flekk', 'nökkurn', ',', 'glitraði']
self.assertEqual(no_stops, target_list)
+ def test_classical_hindi_stops(self):
+ """
+ Test filtering classical hindi stopwords
+ Sentence extracted from (https://github.com/cltk/hindi_text_ltrc/blob/master/miscellaneous/gandhi/main.txt)
+ """
+ sentence = " वह काबुली फिर वहां आकर खडा हो गया है "
+ tokens = indian_punctuation_tokenize_regex(sentence)
+ no_stops = [word for word in tokens if word not in HINDI_STOPS]
+ target_list = ['काबुली', 'फिर', 'वहां', 'आकर', 'खडा', 'गया']
+ self.assertEqual(no_stops, target_list)
if __name__ == '__main__':
unittest.main()
| Greek not among word tokenizers
Hi! I attempted to use the Greek word tokenizer for a really quick search of a plain text file, as follows:
<pre>
from cltk.tokenize.word import WordTokenizer
word_tokenizer = WordTokenizer('greek')
for word in word_tokenizer.tokenize('files/plain_text/1-23_1.1-42.txt'):
if word.endswith('εαι'):
print(word)
</pre>
When I tried to call the script, I got the following error:
`AssertionError: Specific tokenizer not available for 'greek'. Only available for: '['arabic', 'latin', 'french', 'old_norse']'.`
Thanks!
| 2018-02-12T14:57:19 |
|
cltk/cltk | 702 | cltk__cltk-702 | [
"628"
] | b35fa605d79b9af84e25ab88cf4d403eb3099af8 | diff --git a/cltk/corpus/swadesh.py b/cltk/corpus/swadesh.py
--- a/cltk/corpus/swadesh.py
+++ b/cltk/corpus/swadesh.py
@@ -22,7 +22,6 @@
swadesh_syc=['ܐܢܐ','ܐܢܬ, ܐܢܬܝ', 'ܗܘ', 'ܚܢܢ,, ܐܢܚܢܢ', 'ܐܢܬܘܢ , ܐܢܬܝܢ ', 'ܗܢܘܢ , ܗܢܝܢ', 'ܗܢܐ, ܗܕܐ', 'ܗܘ, ܗܝ', 'ܗܪܟܐ', 'ܬܡܢ', 'ܡܢ', 'ܡܐ, ܡܢ, ܡܢܐ, ܡܘܢ', 'ܐܝܟܐ', 'ܐܡܬܝ', 'ܐܝܟܢ,, ܐܝܟܢܐ', 'ܠܐ', 'ܟܠ', 'ܣܓܝ ', 'ܟܡܐ ', 'ܒܨܝܪܐ', 'ܐܚܪܢܐ, ܐܚܪܬܐ', 'ܚܕ , ܚܕܐ', 'ܬܪܝܢ, ܬܪܬܝܢ', 'ܬܠܬܐ, ܬܠܬ', 'ܐܪܒܥܐ, ܐܪܒܥ', 'ܚܡܫܐ, ܚܡܫ', 'ܪܒܐ, ܟܒܝܪܐ ', 'ܐܪܝܟܐ', 'ܪܘܝܚܐ, ܦܬܝܐ', 'ܥܒܝܛܐ', 'ܢܛܝܠܐ, ܝܩܘܪܐ ', 'ܙܥܘܪܐ', 'ܟܪܝܐ', 'ܥܝܩܐ', 'ܪܩܝܩܐ, ܛܠܝܚܐ', 'ܐܢܬܬܐ', 'ܓܒܪܐ', 'ܐܢܫܐ', 'ܝܠܘܕܐ', 'ܐܢܬܬܐ', 'ܒܥܠܐ', 'ܐܡܐ', 'ܐܒܐ', 'ܚܝܘܬܐ', 'ܢܘܢܐ', 'ܛܝܪܐ, ܨܦܪܐ', 'ܟܠܒܐ', 'ܩܠܡܐ', 'ܚܘܝܐ', 'ܬܘܠܥܐ', 'ܐܝܠܢܐ', 'ܥܒܐ', 'ܩܝܣܐ', 'ܦܐܪܐ', 'ܙܪܥܐ', 'ܛܪܦܐ', 'ܫܪܫܐ ', 'ܩܠܦܬܐ', 'ܗܒܒܐ', 'ܓܠܐ', 'ܚܒܠܐ', 'ܓܠܕܐ ', 'ܒܣܪܐ', 'ܕܡܐ', 'ܓܪܡܐ', 'ܕܗܢܐ, ܫܘܡܢܐ', 'ܒܝܥܬܐ', 'ܩܪܢܐ', 'ܕܘܢܒܐ', 'ܐܒܪܐ', 'ܣܥܪܐ', 'ܪܝܫܐ', 'ܐܕܢܐ', 'ܥܝܢܐ', 'ܢܚܝܪܐ ', 'ܦܘܡܐ', 'ܫܢܐ, ܟܟܐ', 'ܠܫܢܐ', 'ܛܦܪܐ ', 'ܥܩܠܐ', 'ܪܓܠܐ', 'ܒܘܪܟܐ', 'ܐܝܕܐ', 'ܟܢܦܐ ', 'ܒܛܢܐ, ܟܪܣܐ ', 'ܡܥܝܐ, ܓܘܐ', 'ܨܘܪܐ, ܩܕܠܐ', 'ܚܨܐ, ܒܣܬܪܐ', 'ܚܕܝܐ', 'ܠܒܐ', 'ܟܒܕܐ', 'ܫܬܐ', 'ܐܟܠ', 'ܢܟܬ', 'ܡܨ ', 'ܪܩ', 'ܓܥܛ', 'ܢܦܚ', 'ܢܦܫ, ܢܫܡ', 'ܓܚܟ ', 'ܚܙܐ', 'ܫܡܥ', 'ܝܕܥ', 'ܚܫܒ', 'ܡܚ, ܣܩ', 'ܕܚܠ, ܟܘܪ', 'ܕܡܟ', 'ܚܝܐ ', 'ܡܝܬ', 'ܩܛܠ', 'ܟܬܫ', 'ܨܝܕ ', 'ܡܚܐ, ܢܩܫ', 'ܓܕܡ, ܩܛܥ', 'ܫܪܩ, ܦܕܥ, ܦܪܬ', 'ܕܓܫ', 'ܚܟ, ܣܪܛ', 'ܚܦܪ', 'ܣܚܐ', 'ܦܪܚ ', 'ܗܠܟ ', 'ܐܬܐ ', 'ܫܟܒ, ܡܟ', 'ܝܬܒ', 'ܬܪܨ', 'ܦܢܐ, ܥܛܦ ', 'ܢܦܠ', 'ܝܗܒ, ܢܬܠ', 'ܐܚܕ', 'ܩܡܛ, ܥܨܪ', 'ܫܦ, ܚܟ', 'ܚܠܠ, ܦܝܥ', 'ܟܦܪ', 'ܓܪܫ', 'ܙܥܦ ', 'ܪܡܐ', 'ܐܣܪ, ܩܛܪ', 'ܚܝܛ', 'ܡܢܐ', 'ܐܡܪ', 'ܙܡܪ', 'ܫܥܐ', 'ܛܦ', 'ܪܣܡ, ܫܚܠ', 'ܓܠܕ, ܩܪܫ', 'ܙܘܐ, ܥܒܐ', 'ܫܡܫܐ', 'ܣܗܪܐ', 'ܟܘܟܒܐ', 'ܡܝܐ ', 'ܡܛܪܐ', 'ܢܗܪܐ', 'ܝܡܬܐ', 'ܝܡܐ', 'ܡܠܚܐ ', 'ܟܐܦܐ, ܐܒܢܐ, ܫܘܥܐ', 'ܚܠܐ', 'ܐܒܩܐ, ܕܩܬܐ', 'ܐܪܥܐ', 'ܥܢܢܐ, ܥܝܡܐ, ܥܝܒܐ', 'ܥܪܦܠܐ ', 'ܫܡܝܐ', 'ܪܘܚܐ ', 'ܬܠܓܐ', 'ܓܠܝܕܐ', 'ܬܢܢܐ ', 'ܢܘܪܐ, ܐܫܬܐ', 'ܩܛܡܐ ', 'ܝܩܕ', 'ܐܘܪܚܐ', 'ܛܘܪܐ', 'ܣܘܡܩܐ', 'ܝܘܪܩܐ', 'ܫܥܘܬܐ', 'ܚܘܪܐ', 'ܐܘܟܡܐ ', 'ܠܠܝܐ ', 'ܝܘܡܐ ', 'ܫܢܬܐ', 'ܫܚܝܢܐ', 'ܩܪܝܪܐ', 'ܡܠܝܐ', 'ܚܕܬܐ', 'ܥܬܝܩܐ', 'ܛܒܐ', 'ܒܝܫܐ', 'ܒܩܝܩܐ ܚܪܝܒܐ', 'ܫܘܚܬܢܐ', 'ܬܪܝܨܐ ', 'ܚܘܕܪܢܝܐ', 'ܚܪܝܦܐ', 'ܩܗܝܐ', 'ܦܫܝܩܐ', 'ܪܛܝܒܐ, ܬܠܝܠܐ', 'ܝܒܝܫܐ', 'ܬܪܝܨܐ ', 'ܩܪܝܒܐ', 'ܪܚܝܩܐ', 'ܝܡܝܢܐ', 'ܣܡܠܐ', 'ܒ-, ܠܘܬ', 'ܥܡ', 'ܐܢ', '-ܡܛܠ ܕ, ܒܥܠܬ', 'ܫܡܐ']
-
class Swadesh():
def __init__(self, language):
self.language = language
diff --git a/cltk/tokenize/indian_tokenizer.py b/cltk/tokenize/indian_tokenizer.py
deleted file mode 100644
--- a/cltk/tokenize/indian_tokenizer.py
+++ /dev/null
@@ -1,34 +0,0 @@
-"""Tokenizer for Indian languages/scripts."""
-
-import re
-import string
-
-__author__ = ['Anoop Kunchukuttan']
-__copyright = 'GPL'
-
-modified_punctuations = string.punctuation.replace("|","") # The replace , deletes the ' | ' from the punctuation string provided by the library
-
-indian_punctuation_pattern = re.compile('(['+modified_punctuations+'\u0964\u0965'+']|\|+)')
-
-
-def indian_punctuation_tokenize_regex(input_str):
- """A trivial tokenizer which just tokenizes on the punctuation boundaries.
- This also includes punctuation, namely the the purna virama ("|") and
- deergha virama ("॥"), for Indian language scripts.
-
- >>> indian_str = "प्रेमचन्द का जन्म ३१ जुलाई सन् १८८० को बनारस शहर।"
- >>> indian_punctuation_tokenize_regex(indian_str)
- ['प्रेमचन्द', 'का', 'जन्म', '३१', 'जुलाई', 'सन्', '१८८०', 'को', 'बनारस', 'शहर', '।']
-
- :param input_str: A string to be
- :type input_str: string
- :return List of word tokens.
- :rtype: list
- """
- tok_str = indian_punctuation_pattern.sub(r' \1 ',input_str.replace('\t',' '))
- return re.sub(r'[ ]+',u' ',tok_str).strip(' ').split(' ')
-
-
-if __name__ == '__main__':
- example = indian_punctuation_tokenize_regex("हिन्दी भारत की सबसे अधिक बोली और समझी जाने वाली भाषा है।")
- print(example)
diff --git a/cltk/tokenize/sentence.py b/cltk/tokenize/sentence.py
--- a/cltk/tokenize/sentence.py
+++ b/cltk/tokenize/sentence.py
@@ -1,6 +1,6 @@
"""Tokenize sentences."""
-__author__ = ['Kyle P. Johnson <[email protected]>']
+__author__ = ['Kyle P. Johnson <[email protected]>','Anoop Kunchukuttan']
__license__ = 'MIT License. See LICENSE.'
@@ -8,6 +8,8 @@
from nltk.tokenize.punkt import PunktLanguageVars
from nltk.tokenize.punkt import PunktSentenceTokenizer
import os
+import re
+import string
PUNCTUATION = {'greek':
@@ -19,6 +21,7 @@
'internal': (',', ';'),
'file': 'latin.pickle', }}
+INDIAN_LANGUAGES = ['bengali','hindi','marathi','sanskrit','telugu']
class TokenizeSentence(): # pylint: disable=R0903
"""Tokenize sentences for the language given as argument, e.g.,
@@ -31,8 +34,10 @@ def __init__(self: object, language: str):
:param language : Language for sentence tokenization.
"""
self.language = language.lower()
- self.internal_punctuation, self.external_punctuation, self.tokenizer_path = \
- self._setup_language_variables(self.language)
+
+ if self.language not in INDIAN_LANGUAGES :
+ self.internal_punctuation, self.external_punctuation, self.tokenizer_path = \
+ self._setup_language_variables(self.language)
def _setup_language_variables(self, lang: str):
"""Check for language availability and presence of tokenizer file,
@@ -88,9 +93,26 @@ def tokenize_sentences(self: object, untokenized_string: str):
for sentence in tokenizer.sentences_from_text(untokenized_string, realign_boundaries=True): # pylint: disable=C0301
tokenized_sentences.append(sentence)
return tokenized_sentences
-
+
+ def indian_punctuation_tokenize_regex(self: object, untokenized_string: str):
+ """A trivial tokenizer which just tokenizes on the punctuation boundaries.
+ This also includes punctuation, namely the the purna virama ("|") and
+ deergha virama ("॥"), for Indian language scripts.
+
+ :type untokenized_string: str
+ :param untokenized_string: A string containing one of more sentences.
+ :rtype : list of strings
+ """
+ modified_punctuations = string.punctuation.replace("|","") # The replace , deletes the ' | ' from the punctuation string provided by the library
+ indian_punctuation_pattern = re.compile('(['+modified_punctuations+'\u0964\u0965'+']|\|+)')
+ tok_str = indian_punctuation_pattern.sub(r' \1 ',untokenized_string.replace('\t',' '))
+ return re.sub(r'[ ]+',u' ',tok_str).strip(' ').split(' ')
+
def tokenize(self: object, untokenized_string: str):
# NLTK's PlaintextCorpusReader needs a function called tokenize
# in functions used as a parameter for sentence tokenization.
# So this is an alias for tokenize_sentences().
- return self.tokenize_sentences(untokenized_string)
+ if self.language in INDIAN_LANGUAGES:
+ return self.indian_punctuation_tokenize_regex(untokenized_string)
+ else:
+ return self.tokenize_sentences(untokenized_string)
| diff --git a/cltk/tests/test_stop.py b/cltk/tests/test_stop.py
--- a/cltk/tests/test_stop.py
+++ b/cltk/tests/test_stop.py
@@ -11,7 +11,7 @@
from cltk.stop.classical_hindi.stops import STOPS_LIST as HINDI_STOPS
from cltk.stop.arabic.stopword_filter import stopwords_filter as arabic_stop_filter
from cltk.stop.old_norse.stops import STOPS_LIST as OLD_NORSE_STOPS
-from cltk.tokenize.indian_tokenizer import indian_punctuation_tokenize_regex
+from cltk.tokenize.sentence import TokenizeSentence
from nltk.tokenize.punkt import PunktLanguageVars
from cltk.tokenize.word import WordTokenizer
import os
@@ -122,7 +122,8 @@ def test_classical_hindi_stops(self):
Sentence extracted from (https://github.com/cltk/hindi_text_ltrc/blob/master/miscellaneous/gandhi/main.txt)
"""
sentence = " वह काबुली फिर वहां आकर खडा हो गया है "
- tokens = indian_punctuation_tokenize_regex(sentence)
+ tokenizer = TokenizeSentence('hindi')
+ tokens = tokenizer.tokenize(sentence)
no_stops = [word for word in tokens if word not in HINDI_STOPS]
target_list = ['काबुली', 'फिर', 'वहां', 'आकर', 'खडा', 'गया']
self.assertEqual(no_stops, target_list)
| Greek not among word tokenizers
Hi! I attempted to use the Greek word tokenizer for a really quick search of a plain text file, as follows:
<pre>
from cltk.tokenize.word import WordTokenizer
word_tokenizer = WordTokenizer('greek')
for word in word_tokenizer.tokenize('files/plain_text/1-23_1.1-42.txt'):
if word.endswith('εαι'):
print(word)
</pre>
When I tried to call the script, I got the following error:
`AssertionError: Specific tokenizer not available for 'greek'. Only available for: '['arabic', 'latin', 'french', 'old_norse']'.`
Thanks!
| 2018-02-20T15:53:50 |
|
cltk/cltk | 749 | cltk__cltk-749 | [
"628"
] | 0dd1b50edfc986c804926825178717ecc5101dc5 | diff --git a/cltk/corpus/swadesh.py b/cltk/corpus/swadesh.py
--- a/cltk/corpus/swadesh.py
+++ b/cltk/corpus/swadesh.py
@@ -76,6 +76,7 @@
'अच्छा', 'बुरा', 'सड़ा', 'गन्दा', 'सीधा', 'गोल', 'तीखा', 'कुंद', 'चिकना', 'गीला', 'सूखा', 'सही', 'नज़दीक',
'दूर', 'दायाँ', 'बायाँ', 'पे', 'में', 'के साथ', 'और', 'अगर', 'क्योंकि', 'नाम']
+
swadesh_la = ['ego', 'tū', 'is, ea, id', 'nōs', 'vōs', 'eī, iī, eae, ea', 'hic, haec, hoc', 'ille, illa, illud', 'hīc',
'illic, ibi', 'quis, quae', 'quid', 'ubi', 'cum', 'quōmodō', 'nōn, nē', 'omnēs, omnia',
'multī, multae, multa', 'aliquī, aliqua, aliquod', 'paucī, paucae, pauca', 'alter, alius', 'ūnus', 'duō',
@@ -100,6 +101,7 @@
'humidus, aqueus', 'siccus', 'rectus', 'propinquus, proximus', 'longus', 'dexter', 'laevus, sinister',
'ad, in', 'in', 'cum', 'et, -que', 'si', 'quod', 'nōmen']
+
swadesh_old_norse = ["ek", "þú", "hann", "vér", "þér", "þeir", "sjá, þessi", "sá", "hér", "þar", "hvar", "hvat", "hvar",
"hvenær", "hvé", "eigi", "allr", "margr", "nǫkkurr", "fár", "annarr", "einn", "tveir", "þrír",
"fjórir", "fimm", "stórr", "langr", "breiðr", "þykkr", "þungr", "lítill", "stuttr", "mjór",
diff --git a/cltk/corpus/telugu/alphabet.py b/cltk/corpus/telugu/alphabet.py
new file mode 100644
--- /dev/null
+++ b/cltk/corpus/telugu/alphabet.py
@@ -0,0 +1,16 @@
+"""Telugu alphabet"""
+__author__ = 'Nishchith Shetty <inishchith[at]gmail[dot]com>'
+
+VOWELS = ['అ ','ఆ','ఇ','ఈ ','ఉ ','ఊ ','ఋ ','ౠ ','ఌ ',
+ 'ౡ','ఎ','ఏ','ఐ','ఒ','ఓ','ఔ ','అం','అః']
+
+CONSONANTS = ['క', 'ఖ','గ','ఘ','ఙ'
+ 'చ','ఛ','జ','ఝ','ఞ',
+ 'ట','ఠ','డ ','ఢ','ణ',
+ 'త','థ','ద','ధ','న',
+ 'ప','ఫ','బ','భ','మ',
+ 'య','ర','ల','వ',
+ 'శ','ష','స ','హ',
+ 'ళ','క్ష ','ఱ']
+
+NUMERLALS = ['౦','౧','౨','౩','౪ ','౫','౬','౭','౮','౯']
| diff --git a/cltk/tests/test_nlp/test_tokenize.py b/cltk/tests/test_nlp/test_tokenize.py
--- a/cltk/tests/test_nlp/test_tokenize.py
+++ b/cltk/tests/test_nlp/test_tokenize.py
@@ -1,7 +1,6 @@
# -*-coding:utf-8-*-
"""Test cltk.tokenize.
-TODO: Add tests for the Indian lang tokenizers: from cltk.tokenize.indian_tokenizer import indian_punctuation_tokenize_regex
"""
from cltk.corpus.utils.importer import CorpusImporter
@@ -249,6 +248,45 @@ def test_middle_high_german_tokenizer(self):
tokenized_lines = tokenizer.tokenize(text)
self.assertTrue(tokenized_lines == target)
+ def test_sentence_tokenizer_bengali(self):
+ """Test tokenizing bengali sentences."""
+ text = "দুর্ব্বাসার শাপে রাজা শকুন্তলাকে একেবারে ভুলে বেশ সুখে আছেন।"
+ target = ['দুর্ব্বাসার', 'শাপে', 'রাজা', 'শকুন্তলাকে', 'একেবারে', 'ভুলে', 'বেশ', 'সুখে', 'আছেন', '।']
+ tokenizer = TokenizeSentence('bengali')
+ tokenized_sentences = tokenizer.tokenize(text)
+ self.assertEqual(tokenized_sentences, target)
+
+ def test_sentence_tokenizer_classical_hindi(self):
+ """Test tokenizing classical_hindi sentences."""
+ text = "जलर् चिकित्सा से उन्हें कोई लाभ नहीं हुआ।"
+ target = ['जलर्', 'चिकित्सा', 'से', 'उन्हें', 'कोई', 'लाभ', 'नहीं', 'हुआ', '।']
+ tokenizer = TokenizeSentence('hindi')
+ tokenized_sentences = tokenizer.tokenize(text)
+ self.assertEqual(tokenized_sentences, target)
+
+ def test_sentence_tokenizer_marathi(self):
+ """Test tokenizing marathi sentences."""
+ text = "अर्जुन उवाच । एवं सतत युक्ता ये भक्तास्त्वां पर्युपासते । ये चाप्यक्षरमव्यक्तं तेषां के योगवित्तमाः ॥"
+ target = ['अर्जुन', 'उवाच', '।', 'एवं', 'सतत', 'युक्ता', 'ये', 'भक्तास्त्वां', 'पर्युपासते', '।', 'ये', 'चाप्यक्षरमव्यक्तं', 'तेषां', 'के', 'योगवित्तमाः', '॥']
+ tokenizer = TokenizeSentence('marathi')
+ tokenized_sentences = tokenizer.tokenize(text)
+ self.assertEqual(tokenized_sentences, target)
+
+ def test_sentence_tokenizer_sanskrit(self):
+ """Test tokenizing sanskrit sentences."""
+ text = "श्री भगवानुवाच पश्य मे पार्थ रूपाणि शतशोऽथ सहस्रशः। नानाविधानि दिव्यानि नानावर्णाकृतीनि च।।"
+ target = ['श्री', 'भगवानुवाच', 'पश्य', 'मे', 'पार्थ', 'रूपाणि', 'शतशोऽथ', 'सहस्रशः', '।', 'नानाविधानि', 'दिव्यानि', 'नानावर्णाकृतीनि', 'च', '।', '।']
+ tokenizer = TokenizeSentence('sanskrit')
+ tokenized_sentences = tokenizer.tokenize(text)
+ self.assertEqual(tokenized_sentences, target)
+
+ def test_sentence_tokenizer_telugu(self):
+ """Test tokenizing telugu sentences."""
+ text = "తా. ఎక్కడెక్కడ బుట్టిన నదులును రత్నాకరుడను నాశతో సముద్రుని చేరువిధముగా నెన్నియిక్కట్టులకైన నోర్చి ప్రజలు దమంతట దామె ప్రియముం జూపుచు ధనికుని యింటికేతెంచుచుందురు."
+ target = ['తా', '.', 'ఎక్కడెక్కడ', 'బుట్టిన', 'నదులును', 'రత్నాకరుడను', 'నాశతో', 'సముద్రుని', 'చేరువిధముగా', 'నెన్నియిక్కట్టులకైన', 'నోర్చి', 'ప్రజలు', 'దమంతట', 'దామె', 'ప్రియముం', 'జూపుచు', 'ధనికుని', 'యింటికేతెంచుచుందురు', '.']
+ tokenizer = TokenizeSentence('telugu')
+ tokenized_sentences = tokenizer.tokenize(text)
+ self.assertEqual(tokenized_sentences, target)
def test_akkadian_word_tokenizer(self):
"""
Tests word_tokenizer.
| Greek not among word tokenizers
Hi! I attempted to use the Greek word tokenizer for a really quick search of a plain text file, as follows:
<pre>
from cltk.tokenize.word import WordTokenizer
word_tokenizer = WordTokenizer('greek')
for word in word_tokenizer.tokenize('files/plain_text/1-23_1.1-42.txt'):
if word.endswith('εαι'):
print(word)
</pre>
When I tried to call the script, I got the following error:
`AssertionError: Specific tokenizer not available for 'greek'. Only available for: '['arabic', 'latin', 'french', 'old_norse']'.`
Thanks!
| 2018-03-16T17:36:59 |
|
cltk/cltk | 869 | cltk__cltk-869 | [
"790"
] | 093e43386444ce1a32b9f9ecb274e913eaa20908 | diff --git a/cltk/prosody/greek/scanner.py b/cltk/prosody/greek/scanner.py
--- a/cltk/prosody/greek/scanner.py
+++ b/cltk/prosody/greek/scanner.py
@@ -138,7 +138,7 @@ def _long_by_nature(self, syllable):
print
if char in self.long_vowels:
return True
- elif char not in self.sing_cons:
+ elif char not in self.sing_cons and char not in self.doub_cons:
vowel_group += char
if ''.join(vowel_group) in self.diphthongs:
| Greek Scansion class: small bug in prosody/greek/scanner.py
In line 141 of `scanner.py` the class method `_long_by_nature` checks only if a character is a single consonant, but not if it is a double consonant:
```python
elif char not in self.sing_cons:
vowel_group += char
```
vowel_group is then used to check if a group is a diphthong. The result is that a syllable with a double consonant and a diphthong is not recognized as "long by nature". E.g.:
```python
>>> from cltk.prosody.greek.scanner import Scansion
>>> scanner = Scansion()
>>> scanner.scan_text("Ζεὺς.")
['˘']
```
| Thanks for finding this bug! I'll try to push out a fix this evening.
Hello, if it has not been fixed yet, can I work on this?
@kylepjohnson @TylerKirby
Hey @siddharthkv7 that would be great. This seems like a pretty small fix. We just need to be checking for double consonants (ξ , ψ , and ζ) | 2019-02-06T20:57:32 |
|
cltk/cltk | 880 | cltk__cltk-880 | [
"879",
"879"
] | 4b6b6beac12c32dfc0f7b190a5542295447ff57a | diff --git a/cltk/stem/sanskrit/indian_syllabifier.py b/cltk/stem/sanskrit/indian_syllabifier.py
--- a/cltk/stem/sanskrit/indian_syllabifier.py
+++ b/cltk/stem/sanskrit/indian_syllabifier.py
@@ -7,12 +7,12 @@
"""
import os
+import csv
try:
import numpy as np
- import pandas as pd
except ImportError:
- print('"pandas" and "numpy" libraries not installed.')
+ print('"numpy" is not installed.')
raise
__author__ = ['Anoop Kunchukuttan']
@@ -93,12 +93,26 @@ def get_lang_data(self):
csv_dir_path = os.path.join(root, 'cltk_data/sanskrit/model/sanskrit_models_cltk/phonetics')
all_phonetic_csv = os.path.join(csv_dir_path, 'all_script_phonetic_data.csv')
- all_phonetic_data = pd.read_csv(all_phonetic_csv, encoding='utf-8')
tamil_csv = os.path.join(csv_dir_path, 'tamil_script_phonetic_data.csv')
- tamil_phonetic_data = pd.read_csv(tamil_csv, encoding='utf-8')
- all_phonetic_vectors = all_phonetic_data.ix[:, PHONETIC_VECTOR_START_OFFSET:].values
- tamil_phonetic_vectors = tamil_phonetic_data.ix[:, PHONETIC_VECTOR_START_OFFSET:].values
+ # Make helper function for this
+ with open(all_phonetic_csv,'r') as f:
+ reader = csv.reader(f, delimiter = ',', quotechar = '"')
+ next(reader, None) # Skip headers
+ all_phonetic_data = [row for row in reader]
+
+ with open(tamil_csv,'r') as f:
+ reader = csv.reader(f, delimiter = ',', quotechar = '"')
+ next(reader, None) # Skip headers
+ # tamil_phonetic_data = [row[PHONETIC_VECTOR_START_OFFSET:] for row in reader]
+ tamil_phonetic_data = [row for row in reader]
+
+ # Handle better?
+ all_phonetic_data = [[int(cell) if cell=='0' or cell=='1' else cell for cell in row] for row in all_phonetic_data]
+ tamil_phonetic_data = [[int(cell) if cell=='0' or cell=='1' else cell for cell in row] for row in tamil_phonetic_data]
+
+ all_phonetic_vectors = np.array([row[PHONETIC_VECTOR_START_OFFSET:] for row in all_phonetic_data])
+ tamil_phonetic_vectors = np.array([row[PHONETIC_VECTOR_START_OFFSET:] for row in tamil_phonetic_data])
phonetic_vector_length = all_phonetic_vectors.shape[1]
@@ -106,7 +120,7 @@ def get_lang_data(self):
@staticmethod
def in_coordinated_range_offset(c_offset):
- """Applicable to Brahmi derived Indic scripts. Used to determine
+ """Applicable to Brahmi derived Indic scripts. Used to determine
whether offset is of a alphabetic character or not.
"""
return COORDINATED_RANGE_START_INCLUSIVE <= c_offset <= COORDINATED_RANGE_END_INCLUSIVE
@@ -140,7 +154,8 @@ def get_phonetic_feature_vector(self, c, lang):
phonetic_data, phonetic_vectors = self.get_phonetic_info(lang)
- if phonetic_data.ix[offset, 'Valid Vector Representation'] == 0:
+ # 'Valid Vector Representation' is the [5] column
+ if phonetic_data[offset][5] == 0:
return self.invalid_vector()
return phonetic_vectors[offset]
| diff --git a/cltk/tests/test_nlp/test_corpus.py b/cltk/tests/test_nlp/test_corpus.py
--- a/cltk/tests/test_nlp/test_corpus.py
+++ b/cltk/tests/test_nlp/test_corpus.py
@@ -122,7 +122,7 @@ def test_tlgu_convert(self):
"""Test TLGU convert. This reads the file
``tlgu_test_text_beta_code.txt``, which mimics a TLG file, and
converts it.
- Note: assertEquals fails on some accented characters ('ή', 'ί').
+ Note: assertEqual fails on some accented characters ('ή', 'ί').
"""
in_test = os.path.abspath('cltk/tests/test_nlp/tlgu_test_text_beta_code.txt')
out_test = os.path.expanduser('~/cltk_data/tlgu_test_text_unicode.txt')
diff --git a/cltk/tests/test_nlp/test_stem.py b/cltk/tests/test_nlp/test_stem.py
--- a/cltk/tests/test_nlp/test_stem.py
+++ b/cltk/tests/test_nlp/test_stem.py
@@ -242,7 +242,7 @@ def test_akkadian_bound_form(self):
word = "awīlum"
bound_form = bound_former.get_bound_form(word, 'm')
target = "awīl"
- self.assertEquals(bound_form, target)
+ self.assertEqual(bound_form, target)
def test_akkadian_cv_pattern(self):
"""Test Akkadian CV pattern method"""
@@ -250,7 +250,7 @@ def test_akkadian_cv_pattern(self):
word = "iparras"
cv_pattern = cv_patterner.get_cv_pattern(word, pprint=True)
target = "V₁C₁V₂C₂C₂V₂C₃"
- self.assertEquals(cv_pattern, target)
+ self.assertEqual(cv_pattern, target)
def test_akkadian_declension(self):
"""Test Akkadian noun declension"""
@@ -264,7 +264,7 @@ def test_akkadian_declension(self):
('iltān', {'case': 'nominative', 'number': 'dual'}),
('ilātim', {'case': 'oblique', 'number': 'plural'}),
('ilātum', {'case': 'nominative', 'number': 'plural'})]
- self.assertEquals(sorted(declension), sorted(target))
+ self.assertEqual(sorted(declension), sorted(target))
def test_akkadian_stemmer(self):
"""Test Akkadian stemmer"""
@@ -272,7 +272,7 @@ def test_akkadian_stemmer(self):
word = "šarrū"
stem = stemmer.get_stem(word, 'm')
target = "šarr"
- self.assertEquals(stem, target)
+ self.assertEqual(stem, target)
def test_akkadian_syllabifier(self):
"""Test Akkadian syllabifier"""
@@ -549,7 +549,7 @@ def french_stemmer_test(self):
target = "j depart a it quant par la vil v err tut a cheval un pucel en tut le siecl n' o si bel un blanc palefre" \
" chevalcho "
self.assertEqual(stemmed_text, target)
-
+
def test_middle_english_stemmer(self):
sentence = ['the', 'speke', 'the', 'henmest', 'kyng', 'in', 'the', 'hillis', 'he', 'beholdis','he', 'lokis', 'vnder',
'his', 'hondis', 'and', 'his', 'hed', 'heldis']
| Remove pandas as a dependency
pandas is only used in the IndianSyllabifier and only for reading a csv file before its data in converted to a numpy array. pandas is also the largest external dependency (e.g. slowest to install in travis-ci). The csv reading and conversion to numpy can be done with the standard libraries, spec. ```csv```
Remove pandas as a dependency
pandas is only used in the IndianSyllabifier and only for reading a csv file before its data in converted to a numpy array. pandas is also the largest external dependency (e.g. slowest to install in travis-ci). The csv reading and conversion to numpy can be done with the standard libraries, spec. ```csv```
| 2019-03-02T00:19:14 |
|
cltk/cltk | 906 | cltk__cltk-906 | [
"903"
] | e30b8ea44c26681e1e86c74636405b62a9d749aa | diff --git a/cltk/tag/ner.py b/cltk/tag/ner.py
--- a/cltk/tag/ner.py
+++ b/cltk/tag/ner.py
@@ -74,10 +74,6 @@ def _check_latest_data(lang):
def tag_ner(lang, input_text, output_type=list):
"""Run NER for chosen language.
- Choosing output_type=list, returns a list of tuples:
-
- >>> tag_ner('latin', input_text='ut Venus, ut Sirius, ut Spica', output_type=list)
- [('ut',), ('Venus',), (',',), ('ut',), ('Sirius', 'Entity'), (',',), ('ut',), ('Spica', 'Entity')]
"""
_check_latest_data(lang)
@@ -134,4 +130,3 @@ def tag_ner(lang, input_text, output_type=list):
return string
return ner_tuple_list
-
| diff --git a/cltk/tests/test_corpus/test_corpus.py b/cltk/tests/test_corpus/test_corpus.py
--- a/cltk/tests/test_corpus/test_corpus.py
+++ b/cltk/tests/test_corpus/test_corpus.py
@@ -710,6 +710,7 @@ class TestFilteredCorpus(unittest.TestCase):
def setUpClass(cls):
try:
corpus_importer = CorpusImporter('latin')
+ corpus_importer.import_corpus('latin_models_cltk')
corpus_importer.import_corpus('latin_text_latin_library')
except:
raise Exception('Failure to download test corpus')
| Fix broken NER doctest
Open to anyone.
This simple NER doctest: https://github.com/cltk/cltk/blob/9b9cdb42dcc1c707ab3db3ef8214837bb7c262b5/cltk/tag/ner.py#L78
is all of a sudden failing (example: https://travis-ci.org/cltk/cltk/jobs/525125856#L1935 ).
The test expects 4 padded spaces on the left.
I have no idea why this would break all of a sudden.
| Investigating... The doctest passes locally with ```python -m doctest -v ner.py```.
This doctest can be considered redundant as there is a unittest that tests the same functionality—going to remove.
Separate (and for a different issue)—looks like this module could be updated/refactored/revisited in general. Will add to my to-do list... | 2019-05-02T18:39:14 |
cltk/cltk | 938 | cltk__cltk-938 | [
"937"
] | c2b604107894298ab35b59c833feb9b0d80c011a | diff --git a/cltk/corpus/greek/corpora.py b/cltk/corpus/greek/corpora.py
--- a/cltk/corpus/greek/corpora.py
+++ b/cltk/corpus/greek/corpora.py
@@ -42,6 +42,12 @@
'name': 'greek_treebank_perseus',
'location': 'remote',
'type': 'treebank'},
+ {'encoding': 'utf-8',
+ 'markup': 'xml',
+ 'origin': 'https://github.com/vgorman1/Greek-Dependency-Trees.git',
+ 'name': 'greek_treebank_gorman',
+ 'location': 'remote',
+ 'type': 'treebank'},
{'encoding': 'xml',
'markup': 'plaintext',
'origin': 'https://github.com/cltk/greek_lexica_perseus.git',
| Add "Gorman Trees" corpus for Greek
https://github.com/perseids-publications/gorman-trees = https://perseids-publications.github.io/gorman-trees/
~500,000 tokens of parsed Ancient Greek.
| Or provide instructions for loading it as a remote corpus; though that may take rewriting corpus_importer (would have to check—@kylepjohnson?)
Related—worth separate issues: retrain the backoff lemmatizer, the pos taggers, etc. Will create soon.
> Or provide instructions for loading it as a remote corpus; though that may take rewriting corpus_importer (would have to check—@kylepjohnson?)
If it is a Git repo, we only need [to add it to our official Greek repo](https://github.com/cltk/cltk/wiki/How-to-add-a-corpus-to-the-CLTK#adding-a-git-backed-corpus-to-github) or an individual can it [locally through a custom config file](http://docs.cltk.org/en/latest/importing_corpora.html#user-defined-distributed-corpora).
Regarding the structure of this repo, it appears to be meant to be an entire website. So there is a bunch of stuff our users wouldn't be interested in, however I do see there is a config.json file. Example, point to a work by Aeschines: https://github.com/perseids-publications/gorman-trees/blob/df219711d26f12089f47b4e0fd4fd1a468da844b/src/config.json#L14
```
"path": "aeschines-1-1-50-bu1",
"xml": "aeschines-1-1-50-bu1.xml",
"chunks": {
```
So someone might want to make a corpus reader, using this config, that pulls these directly.
@ryanfb if you want to give a shot at adding this corpus, and thus get to know the project a little better, I would welcome a PR. Otherwise I can do it quickly myself.
Also, a related topic: Should we continue to fork other repos ([example](https://github.com/cltk/treebank-releases)) or simply point to the original upstream?
The benefit of forking is that we protect against upstream breakage; the downside is that someone must manually update each repo with a PR, which in practice means that the data in our forked repos is usually out of date.
In the past, I favored forking, though now I lean towards pointing to the original repo and hoping working with those maintainers when they break compatibility.
Vanessa Gorman also has a repo with just the XML files [here](https://github.com/vgorman1/Greek-Dependency-Trees), and has stated (on the GLtreebank Google Group) that she intends to keep both up to date.
I'd be happy to make a quick PR for pulling that repo in as a Greek treebank corpus. The past forking was why I was unsure. There are, as you mention, upsides and downsides to each approach.
OK please make the PR using the original XML repo (thus not our own cltk fork).
Meta: In general, I am inclined to reduce the number of corpora we make available, and instead focus energies on better integration. | 2019-09-18T19:13:13 |
|
cltk/cltk | 944 | cltk__cltk-944 | [
"943"
] | a58c2e45e32394337a5913d30923a3cf7d0df0ab | diff --git a/cltk/tokenize/latin/params.py b/cltk/tokenize/latin/params.py
--- a/cltk/tokenize/latin/params.py
+++ b/cltk/tokenize/latin/params.py
@@ -154,3 +154,6 @@
class LatinLanguageVars(PunktLanguageVars):
_re_non_word_chars = PunktLanguageVars._re_non_word_chars.replace("'",'')
+
+PUNCTUATION = ('.', '?', '!')
+STRICT_PUNCTUATION = PUNCTUATION+('-', ':', ';')
diff --git a/cltk/tokenize/latin/sentence.py b/cltk/tokenize/latin/sentence.py
--- a/cltk/tokenize/latin/sentence.py
+++ b/cltk/tokenize/latin/sentence.py
@@ -5,28 +5,32 @@
__license__ = 'MIT License.'
import os.path
-
+import nltk
+from nltk.tokenize.punkt import PunktLanguageVars
from cltk.tokenize.sentence import BaseSentenceTokenizer, BasePunktSentenceTokenizer
-from cltk.tokenize.latin.params import LatinLanguageVars
+from cltk.tokenize.latin.params import LatinLanguageVars, PUNCTUATION, STRICT_PUNCTUATION
from cltk.utils.file_operations import open_pickle
-def SentenceTokenizer(tokenizer:str = 'punkt'):
+def SentenceTokenizer(tokenizer:str = 'punkt', strict:bool = False):
if tokenizer=='punkt':
- return LatinPunktSentenceTokenizer()
+ return LatinPunktSentenceTokenizer(strict=strict)
class LatinPunktSentenceTokenizer(BasePunktSentenceTokenizer):
""" PunktSentenceTokenizer trained on Latin
"""
models_path = os.path.normpath(get_cltk_data_dir() + '/latin/model/latin_models_cltk/tokenizers/sentence')
- missing_models_message = "BackoffLatinLemmatizer requires the ```latin_models_cltk``` to be in cltk_data. Please load this corpus."
+ missing_models_message = "LatinPunktSentenceTokenizer requires the ```latin_models_cltk``` to be in cltk_data. Please load this corpus."
- def __init__(self: object, language:str = 'latin'):
+ def __init__(self: object, language:str = 'latin', strict:bool = False):
"""
:param language : language for sentence tokenization
:type language: str
+ :param strict : allow for stricter puctuation for sentence tokenization
+ :type strict: bool
"""
self.lang_vars = LatinLanguageVars()
+ self.strict = strict
super().__init__(language='latin', lang_vars=self.lang_vars)
self.models_path = LatinPunktSentenceTokenizer.models_path
@@ -34,3 +38,8 @@ def __init__(self: object, language:str = 'latin'):
self.model = open_pickle(os.path.join(self.models_path, 'latin_punkt.pickle'))
except FileNotFoundError as err:
raise type(err)(LatinPunktSentenceTokenizer.missing_models_message)
+
+ if self.strict:
+ PunktLanguageVars.sent_end_chars=STRICT_PUNCTUATION
+ else:
+ PunktLanguageVars.sent_end_chars=PUNCTUATION
diff --git a/cltk/tokenize/sentence.py b/cltk/tokenize/sentence.py
--- a/cltk/tokenize/sentence.py
+++ b/cltk/tokenize/sentence.py
@@ -164,11 +164,3 @@ def tokenize(self, untokenized_string: str, model=None):
:param untokenized_string: A string containing one of more sentences.
"""
return self.tokenize_sentences(untokenized_string)
-
-if __name__ == "__main__":
- text = """श्री भगवानुवाच भूय एव महाबाहो श्रृणु मे परमं वचः। यत्तेऽहं प्रीयमाणाय वक्ष्यामि हितकाम्यया।।
-न मे विदुः सुरगणाः प्रभवं न महर्षयः। अहमादिर्हि देवानां महर्षीणां च सर्वशः।।"""
- t = TokenizeSentence('sanskrit')
- sents = t.tokenize(text)
- for i, sent in enumerate(sents, 1):
- print(f'{i}: {sent}')
| diff --git a/cltk/tests/test_nlp/test_tokenize.py b/cltk/tests/test_nlp/test_tokenize.py
--- a/cltk/tests/test_nlp/test_tokenize.py
+++ b/cltk/tests/test_nlp/test_tokenize.py
@@ -50,10 +50,16 @@ def test_sentence_tokenizer_latin_punkt(self):
'Hos ego video consul et de re publica sententiam rogo et, quos ferro trucidari oportebat, eos nondum voce volnero!',
'Fuisti igitur apud Laecam illa nocte, Catilina, distribuisti partes Italiae, statuisti, quo quemque proficisci placeret, delegisti, quos Romae relinqueres, quos tecum educeres, discripsisti urbis partes ad incendia, confirmasti te ipsum iam esse exiturum, dixisti paulum tibi esse etiam nunc morae, quod ego viverem.'] # pylint: disable=line-too-long
tokenizer = LatinPunktSentenceTokenizer()
- print(tokenizer.models_path)
tokenized_sentences = tokenizer.tokenize(self.latin_text)
self.assertEqual(tokenized_sentences, target)
+ def test_sentence_tokenizer_latin_punkt_strict(self):
+ """Test tokenizing Latin sentences with stricter punctuation."""
+ target = ['in principio creavit Deus caelum et terram;', 'terra autem erat inanis et vacua et tenebrae super faciem abyssi et spiritus Dei ferebatur super aquas;', 'dixitque Deus fiat lux et facta est lux;', 'et vidit Deus lucem quod esset bona et divisit lucem ac tenebras.'] # pylint: disable=line-too-long
+ tokenizer = LatinPunktSentenceTokenizer(strict=True)
+ tokenized_sentences = tokenizer.tokenize("""in principio creavit Deus caelum et terram; terra autem erat inanis et vacua et tenebrae super faciem abyssi et spiritus Dei ferebatur super aquas; dixitque Deus fiat lux et facta est lux; et vidit Deus lucem quod esset bona et divisit lucem ac tenebras.""")
+ self.assertEqual(tokenized_sentences, target)
+
# Deprecated use cltk.tokenize.latin.sentence
def test_sentence_tokenizer_latin(self):
"""Test tokenizing Latin sentences."""
| Latin sentence tokenizer splits on limited punctuation
The current version of the Latin punkt sentence tokenizer only splits on ('.', '?', '!'). More flexibility is needed to account for editorial practice, digitization choices, etc.
re: editorial practice, cf. e.g. discussion in Pinkster, H. 2010. “Notes on the Language of Marcus Caelius Rufus.” In Dickey, E. and Chahoud, A. eds. Colloquial and Literary Latin. Cambridge: Cambridge University Press; page 195-196. https://books.google.com/books?id=iXlEtbSauCYC&pg=PA196#v=onepage&q&f=false.
re: digitization choices, cf.—at an extreme—the punctuation of the the Tesserae version of the Vulgate (https://github.com/cltk/latin_text_tesserae/blob/master/texts/jerome.vulgate.part.1.genesis.tess) which only uses semicolons.
| 2019-10-06T17:02:51 |
|
cltk/cltk | 970 | cltk__cltk-970 | [
"969"
] | a0463a8b1b1a3d6663693405d3fcaa593829a7d9 | diff --git a/cltk/tokenize/latin_exceptions.py b/cltk/tokenize/latin_exceptions.py
--- a/cltk/tokenize/latin_exceptions.py
+++ b/cltk/tokenize/latin_exceptions.py
@@ -92,6 +92,223 @@
'non', 'numen', 'omen', 'orion', 'paean', 'pan', 'pelion', 'phaethon', 'python', 'quin', 'semen', 'sin',
'specimen', 'tamen', 'themin', 'titan']
+# Automatic from collatinus n_exceptions
+# an exceptions for -n from Collatinus Data
+n_exceptions += ['Acarnan', 'Aegipan', 'Alcman', 'Aman', 'Azan', 'Balaan', 'Balanan', 'Cainan', 'Chanaan', 'Chanan',
+ 'Euan', 'Euhan', 'Joathan', 'Johanan', 'Laban', 'Leviathan', 'Madian', 'Magedan', 'Naaman',
+ 'Nabuzardan', 'Nathan', 'Nisan', 'Pan', 'Pharan', 'Satan', 'Titan', 'dan', 'forsan', 'forsitan',
+ 'fortan', 'fortassean', 'man', 'paean', 'tragopan']
+# en exceptions for -n from Collatinus Data
+n_exceptions += ['Astarthen', 'Bethaven', 'Cebren', 'Cophen', 'Damen', 'Eden', 'Hellen', 'Manahen', 'Philopoemen',
+ 'Ruben', 'Siren', 'Troezen', 'Tychen', 'Zosimen', 'abdomen', 'abdumen', 'absegmen', 'acumen',
+ 'adaugmen', 'adfamen', 'adflamen', 'adhortamen', 'adjuvamen', 'adligamen', 'adnomen', 'aequamen',
+ 'aeramen', 'agmen', 'agnomen', 'albamen', 'albumen', 'almen', 'alumen', 'amen', 'amicimen', 'anguen',
+ 'arcumen', 'argyranchen', 'arsen', 'aspectamen', 'aspiramen', 'attagen', 'aucupiamen', 'augmen',
+ 'bitumen', 'cacumen', 'caelamen', 'calceamen', 'calciamen', 'cantamen', 'carmen', 'catillamen',
+ 'cavamen', 'certamen', 'chalasticamen', 'cicuticen', 'circen', 'citharicen', 'clinamen', 'cluden',
+ 'cogitamen', 'cognomen', 'columen', 'conamen', 'consolamen', 'contamen', 'conttamen', 'cornicen',
+ 'coronamen', 'coruscamen', 'crassamen', 'creamen', 'crimen', 'cruciamen', 'culmen', 'cunctamen',
+ 'curmen', 'curvamen', 'cyclamen', 'decoramen', 'detramen', 'dictamen', 'discrimen', 'docimen',
+ 'documen', 'dolamen', 'donamen', 'dulceamen', 'duramen', 'ebriamen', 'effamen', 'eliquamen',
+ 'epinomen', 'examen', 'excusamen', 'exhortamen', 'famen', 'farcimen', 'femen', 'ferrumen', 'ferumen',
+ 'fidamen', 'fidicen', 'figmen', 'filamen', 'firmamen', 'flamen', 'flemen', 'flumen', 'foramen',
+ 'formidamen', 'fragmen', 'frumen', 'frustramen', 'fulcimen', 'fulmen', 'fundamen', 'generamen',
+ 'genimen', 'germen', 'gestamen', 'glomeramen', 'gluten', 'gramen', 'gravamen', 'gubernamen', 'gumen',
+ 'hortamen', 'hymen', 'hyphen', 'imitamen', 'inchoamen', 'inflamen', 'inguen', 'inspiramen',
+ 'intercisimen', 'involumen', 'irritamen', 'juvamen', 'laetamen', 'lassamen', 'lateramen', 'legumen',
+ 'lenimen', 'levamen', 'libamen', 'libramen', 'lichen', 'lien', 'ligamen', 'lignamen', 'limen',
+ 'linamen', 'linimen', 'linteamen', 'liquamen', 'litamen', 'liticen', 'luctamen', 'lumen', 'lustramen',
+ 'lyricen', 'machinamen', 'manamen', 'medicamen', 'meditamen', 'miseramen', 'moderamen', 'modulamen',
+ 'molimen', 'momen', 'motamen', 'munimen', 'nemen', 'nodamen', 'nomen', 'notamen', 'novamen',
+ 'nullificamen', 'numen', 'nutamen', 'nutrimen', 'objectamen', 'oblectamen', 'oblenimen', 'occamen',
+ 'odoramen', 'oleamen', 'omen', 'ornamen', 'oscen', 'osmen', 'ostentamen', 'palpamen', 'peccamen',
+ 'pecten', 'pedamen', 'perflamen', 'petimen', 'piamen', 'pilumen', 'pinguamen', 'placamen', 'polimen',
+ 'pollen', 'postlimen', 'praecantamen', 'praeexercitamen', 'praefamen', 'praeligamen', 'praenomen',
+ 'praesegmen', 'precamen', 'proflamen', 'prolimen', 'pronomen', 'propagmen', 'psalmicen', 'pullamen',
+ 'pulpamen', 'purgamen', 'putamen', 'putramen', 'pyren', 'rasamen', 'refluamen', 'regimen', 'relevamen',
+ 'religamen', 'remoramen', 'ren', 'renovamen', 'resegmen', 'respiramen', 'revocamen', 'rogamen',
+ 'ructamen', 'rumen', 'saepimen', 'sagmen', 'salsamen', 'sanguen', 'sarcimen', 'sarmen', 'saturamen',
+ 'sedamen', 'sedimen', 'segmen', 'semen', 'sepimen', 'signamen', 'simulamen', 'sinuamen', 'siticen',
+ 'solamen', 'solen', 'solidamen', 'specimen', 'spectamen', 'speculamen', 'spiramen', 'splen',
+ 'spurcamen', 'sputamen', 'stabilimen', 'stamen', 'statumen', 'stipamen', 'stramen', 'sublimen',
+ 'substamen', 'substramen', 'subtegmen', 'suffimen', 'sufflamen', 'sulcamen', 'sumen', 'superlimen',
+ 'susurramen', 'synanchen', 'tamen', 'tegimen', 'tegmen', 'tegumen', 'temptamen', 'tentamen',
+ 'terebramen', 'termen', 'testamen', 'tibicen', 'tormen', 'tramen', 'tubicen', 'tumulamen', 'turben',
+ 'tutamen', 'ululamen', 'unguen', 'vegetamen', 'velamen', 'velumen', 'verumtamen', 'veruntamen',
+ 'vexamen', 'vibramen', 'vimen', 'vitreamen', 'vitulamen', 'vocamen', 'volumen']
+# in exceptions for -n from Collatinus Data
+n_exceptions += ['Arin', 'Attin', 'Benjamin', 'Cain', 'Corozain', 'Dothain', 'Eleusin', 'Eliacin', 'Engonasin',
+ 'Joachin', 'Seraphin', 'Trachin', 'Tubalcain', 'ain', 'alioquin', 'atquin', 'ceteroquin', 'cherubin',
+ 'delfin', 'delphin', 'hin', 'nostin', 'quin', 'satin', 'sin']
+# on exceptions for -n from Collatinus Data
+n_exceptions += ['Aaron', 'Abaddon', 'Abessalon', 'Abiron', 'Absalon', 'Accaron', 'Acheron', 'Achilleon', 'Acmon',
+ 'Acroathon', 'Actaeon', 'Adipson', 'Adon', 'Aeantion', 'Aegaeon', 'Aegilion', 'Aegion', 'Aegon',
+ 'Aemon', 'Aeson', 'Aethion', 'Aethon', 'Aetion', 'Agamemnon', 'Aglaophon', 'Ajalon', 'Alabastron',
+ 'Alabon', 'Albion', 'Alcimedon', 'Alcmaeon', 'Alcon', 'Alcumaeon', 'Alcyon', 'Alebion', 'Alemon',
+ 'Alexion', 'Aliacmon', 'Alison', 'Almon', 'Alymon', 'Amazon', 'Amithaon', 'Amithhaon', 'Ammon',
+ 'Amnon', 'Amorion', 'Amphictyon', 'Amphimedon', 'Amphion', 'Amphitryon', 'Amydon', 'Amythaon',
+ 'Amyzon', 'Anacreon', 'Anaon', 'Andraemon', 'Andremon', 'Androgeon', 'Androtion', 'Anticyricon',
+ 'Antiphon', 'Antron', 'Aon', 'Apion', 'Apollyon', 'Apteron', 'Arethon', 'Arion', 'Aristocreon',
+ 'Aristogiton', 'Ariston', 'Aristophon', 'Artacaeon', 'Arthedon', 'Asarhaddon', 'Asidon', 'Aspledon',
+ 'Astragon', 'Astron', 'Aulion', 'Auson', 'Automedon', 'Auximon', 'Avenion', 'Axion', 'Babylon',
+ 'Baeton', 'Barcinon', 'Batton', 'Bellerophon', 'Bethoron', 'Bion', 'Bithynion', 'Biton', 'Blascon',
+ 'Blepharon', 'Borion', 'Branchiadon', 'Brauron', 'Bronton', 'Bruchion', 'Bryalion', 'Bryazon',
+ 'Bryllion', 'Bubon', 'Bucion', 'Byzantion', 'Cacomnemon', 'Calcedon', 'Calchedon', 'Calliphon',
+ 'Callon', 'Calon', 'Calydon', 'Carchedon', 'Carnion', 'Caulon', 'Cedron', 'Celadon', 'Cerberion',
+ 'Cercyon', 'Ceron', 'Chaeremon', 'Chalaeon', 'Chalcedon', 'Chaon', 'Chardaleon', 'Charon', 'Cheraemon',
+ 'Chersiphron', 'Chilon', 'Chimerion', 'Chion', 'Chiron', 'Choerogylion', 'Cimon', 'Cisamon',
+ 'Cithaeron', 'Citheron', 'Claeon', 'Cleomedon', 'Cleon', 'Cleophon', 'Codrion', 'Colophon', 'Condylon',
+ 'Conon', 'Corragon', 'Corrhagon', 'Corydon', 'Cothon', 'Cotton', 'Cotyaion', 'Crannon', 'Cranon',
+ 'Cremmyon', 'Creon', 'Crialoon', 'Criumetopon', 'Cromyon', 'Ctesiphon', 'Cydon', 'Daedalion', 'Dagon',
+ 'Daiphron', 'Dalion', 'Damasichthon', 'Damon', 'Dareion', 'Deltoton', 'Demetrion', 'Demoleon',
+ 'Demophon', 'Demophoon', 'Deucalion', 'Dexon', 'Diaron', 'Didumaon', 'Didymaon', 'Didymeon',
+ 'Dindymon', 'Dinon', 'Diomedon', 'Dion', 'Diptychon', 'Dipylon', 'Dolichaon', 'Dolon', 'Dorion',
+ 'Doriscon', 'Dortigon', 'Dotion', 'Dracanon', 'Edon', 'Eetion', 'Eion', 'Electruon', 'Electryon',
+ 'Eluron', 'Emathion', 'Endymion', 'Enguion', 'Engyon', 'Eon', 'Ephron', 'Erineon', 'Erisichthon',
+ 'Erotopaegnion', 'Erysichthon', 'Esdrelon', 'Euagon', 'Euctemon', 'Eudaemon', 'Eudon', 'Euphorion',
+ 'Euphron', 'Euprosopon', 'Eurymedon', 'Eurytion', 'Gabaon', 'Gargaron', 'Gedeon', 'Gehon', 'Gelon',
+ 'Genethliacon', 'Geon', 'Georgicon', 'Gerrhon', 'Gerson', 'Geryon', 'Glycon', 'Gorgon', 'Gyrton',
+ 'Habron', 'Haemon', 'Hagnon', 'Haliacmon', 'Hammon', 'Hannon', 'Harmedon', 'Harpocration', 'Hebon',
+ 'Hebron', 'Helicaon', 'Helicon', 'Hephaestion', 'Hermacreon', 'Hesebon', 'Hexaemeron', 'Hexapylon',
+ 'Hicetaon', 'Hieron', 'Hilarion', 'Hippocoon', 'Hippomedon', 'Hippon', 'Holmon', 'Holon', 'Hygienon',
+ 'Hypaton', 'Hyperion', 'Iasion', 'Icadion', 'Icosion', 'Idmon', 'Ilion', 'Imaon', 'Iseon', 'Ixion',
+ 'Jason', 'Lacedaemon', 'Lacon', 'Lacydon', 'Ladon', 'Laestrygon', 'Lagon', 'Lampon', 'Laocoon',
+ 'Laomedon', 'Laucoon', 'Lauron', 'Lecton', 'Leocorion', 'Leon', 'Lepreon', 'Leprion', 'Lestrygon',
+ 'Lethon', 'Lilybaeon', 'Lycaon', 'Lycon', 'Lycophon', 'Lycophron', 'Lydion', 'Lyson', 'Macedon',
+ 'Machaon', 'Maeon', 'Maeson', 'Mageddon', 'Magon', 'Marathon', 'Marcion', 'Mathon', 'Medeon', 'Medon',
+ 'Memnon', 'Menephron', 'Menon', 'Mentonomon', 'Metagon', 'Methion', 'Metion', 'Meton', 'Micon',
+ 'Miction', 'Micton', 'Milanion', 'Milon', 'Mirsion', 'Mision', 'Mnason', 'Mnemon', 'Mnesigiton',
+ 'Molon', 'Mulon', 'Mycon', 'Mydon', 'Mygdon', 'Myrmidon', 'Naasson', 'Nahasson', 'Naron', 'Narycion',
+ 'Nasamon', 'Nebon', 'Neon', 'Nicephorion', 'Nicon', 'Noemon', 'Nomion', 'Oenopion', 'Olizon', 'Ophion',
+ 'Orchomenon', 'Orion', 'Oromedon', 'Ortiagon', 'Paeon', 'Palaemon', 'Pallon', 'Pandion', 'Panopion',
+ 'Pantaleon', 'Pantheon', 'Paphlagon', 'Paridon', 'Parion', 'Parmenion', 'Parthaon', 'Parthenion',
+ 'Parthenon', 'Passaron', 'Patron', 'Paulon', 'Pedon', 'Pelagon', 'Pelion', 'Pellaon', 'Pergamon',
+ 'Peteon', 'Phaedon', 'Phaenon', 'Phaethon', 'Phalerion', 'Phaleron', 'Phaon', 'Pharaon', 'Pharathon',
+ 'Phidon', 'Philammon', 'Philemon', 'Philistion', 'Philon', 'Phison', 'Phlegethon', 'Phlegon',
+ 'Phocion', 'Phradmon', 'Phryxelon', 'Physcon', 'Pion', 'Pitholeon', 'Pleuron', 'Pluton', 'Polemon',
+ 'Polydaemon', 'Polygiton', 'Polypemon', 'Polyperchon', 'Porphyrion', 'Prion', 'Procyon', 'Protagorion',
+ 'Protheon', 'Pseudostomon', 'Pteleon', 'Pygmalion', 'Pyracmon', 'Pyriphlegethon', 'Python', 'Region',
+ 'Rhinthon', 'Rhinton', 'Rhion', 'Rhizon', 'Rhoetion', 'Rhytion', 'Rubicon', 'Rumon', 'Salomon',
+ 'Samson', 'Sarion', 'Sarpedon', 'Sason', 'Satiricon', 'Satyricon', 'Sciron', 'Scyron', 'Sebeon',
+ 'Sicyon', 'Sidon', 'Sigalion', 'Silaniion', 'Silanion', 'Simeon', 'Simon', 'Sinon', 'Sisichthon',
+ 'Sisichton', 'Sithon', 'Socration', 'Solomon', 'Solon', 'Sophron', 'Spiridion', 'Stilbon', 'Stilpon',
+ 'Stimichon', 'Stimon', 'Stratioton', 'Straton', 'Strenion', 'Strongylion', 'Strymon', 'Sunion',
+ 'Taenaron', 'Tarracon', 'Tauron', 'Taygeton', 'Technopaegnion', 'Tecmon', 'Telamon', 'Telon',
+ 'Tenthredon', 'Teredon', 'Teuthredon', 'Thabusion', 'Thelbon', 'Themison', 'Theon', 'Thermodon',
+ 'Theromedon', 'Theron', 'Thesbon', 'Thronion', 'Thryon', 'Thylon', 'Timoleon', 'Timon', 'Topazion',
+ 'Topazon', 'Trallicon', 'Trevidon', 'Triton', 'Tritonon', 'Tryphon', 'Tylon', 'Typhon', 'Ucalegon',
+ 'Vibon', 'Vulchalion', 'Xenophon', 'Zabulon', 'Zenon', 'Zephyrion', 'Zon', 'Zopyrion', 'acanthion',
+ 'aconiton', 'acopon', 'acoron', 'acratophoron', 'acrochordon', 'acrocolion', 'acron', 'adamenon',
+ 'adipsatheon', 'aedon', 'aegolethron', 'aeon', 'aesalon', 'aeschrion', 'agaricon', 'agathodaemon',
+ 'ageraton', 'agon', 'agriophyllon', 'aizoon', 'alazon', 'alexipharmacon', 'allasson', 'alphiton',
+ 'alypon', 'alyseidion', 'alysidion', 'alysson', 'alyssson', 'amaracion', 'amerimnon', 'amethystizon',
+ 'ammonitron', 'amomon', 'ampeloprason', 'amphibion', 'anabibazon', 'anacoluthon', 'anagon',
+ 'anarrhinon', 'ancistron', 'ancon', 'ancyloblepharon', 'andron', 'androsaemon', 'annon', 'anodynon',
+ 'anteridion', 'anthedon', 'anthereon', 'anthyllion', 'antibiblion', 'antipharmacon', 'antirrhinon',
+ 'antiscorodon', 'antistrephon', 'antitheton', 'antizeugmenon', 'aphron', 'apiacon', 'apocynon',
+ 'apographon', 'apologeticon', 'apoproegmenon', 'aposcopeuon', 'arcebion', 'archebion', 'archidiacon',
+ 'archigeron', 'architecton', 'archon', 'arcion', 'arcoleon', 'arction', 'argemon', 'argemonion',
+ 'argennon', 'aristereon', 'armon', 'arnion', 'arnoglosson', 'aron', 'arrhenogonon', 'arsenogonon',
+ 'artemedion', 'artemon', 'arusion', 'asaron', 'asbeston', 'ascalon', 'asceterion', 'asclepion',
+ 'ascyron', 'asphaltion', 'aspideion', 'asplenon', 'asterion', 'astrabicon', 'astrion', 'asyndeton',
+ 'asyntrophon', 'athenogeron', 'athlon', 'atlantion', 'aulon', 'autochthon', 'autochton', 'automaton',
+ 'axon', 'azymon', 'barbiton', 'barypicron', 'barython', 'basilicon', 'batrachion', 'bechion', 'belion',
+ 'bisdiapason', 'bison', 'blachnon', 'blechhnon', 'blechon', 'blechron', 'bolbiton', 'botryon',
+ 'boustrophedon', 'brochon', 'bryon', 'bubalion', 'bubonion', 'buleuterion', 'bunion', 'bupleuron',
+ 'burrhinon', 'buselinon', 'bustrophedon', 'busycon', 'butyron', 'caballion', 'cacemphaton',
+ 'cacodaemon', 'cacophaton', 'cacosyntheton', 'cacozelon', 'caesapon', 'calligonon', 'callion',
+ 'callipetalon', 'callitrichon', 'calopodion', 'camelopodion', 'cammaron', 'canon', 'carcinethron',
+ 'carcinothron', 'carpophyllon', 'caryllon', 'caryon', 'caryophyllon', 'caryphyllon', 'cassiteron',
+ 'catalepton', 'causon', 'centaurion', 'cephalaeon', 'ceration', 'cerion', 'cestron', 'chaerephyllon',
+ 'chalazion', 'chalcanthon', 'chalcanton', 'chamaedracon', 'chamaeleon', 'chamaemelon', 'chamaezelon',
+ 'charisticon', 'charistion', 'chariton', 'charitonblepharon', 'chelidon', 'chelyon', 'chenoboscion',
+ 'chiliophyllon', 'chirographon', 'chironomon', 'chlorion', 'chondrillon', 'chreston', 'chrysallion',
+ 'chrysanthemon', 'cichorion', 'cinnamon', 'circaeon', 'cirsion', 'cissaron', 'cission', 'cleonicion',
+ 'cleopiceton', 'clidion', 'clinopodion', 'cneoron', 'cnestron', 'coacon', 'cobion', 'coenon',
+ 'colobathron', 'colon', 'comaron', 'contomonobolon', 'coriandron', 'corion', 'corisson', 'corymbion',
+ 'cotyledon', 'crataegon', 'crataeogonon', 'crinon', 'crocodileon', 'crocodilion', 'croton',
+ 'crysallion', 'crystallion', 'cuferion', 'cybion', 'cyceon', 'cyclaminon', 'cylon', 'cymation',
+ 'cynocardamon', 'cynocephalion', 'cynodon', 'cynomazon', 'cynomorion', 'cynorrhodon', 'cynorrodon',
+ 'cynozolon', 'cyperon', 'daemon', 'daimon', 'damasonion', 'daphnon', 'daucion', 'daucon', 'deleterion',
+ 'diaartymaton', 'diabotanon', 'diacerason', 'diacheton', 'diachyton', 'diacochlecon', 'diacodion',
+ 'diacon', 'diaglaucion', 'diagrydion', 'dialibanon', 'dialion', 'dialthaeon', 'dialyton',
+ 'diameliloton', 'diameliton', 'diamoron', 'diapanton', 'diapason', 'diaprasion', 'diascorodon',
+ 'diasmyrnon', 'diaspermaton', 'diatessaron', 'diatoichon', 'diatonicon', 'diazeugmenon', 'dichalcon',
+ 'dichomenion', 'diezeugmenon', 'digammon', 'diospyron', 'dircion', 'disdiapason', 'distichon',
+ 'dodecatemorion', 'dodecatheon', 'dorcadion', 'dorcidion', 'doron', 'dorycnion', 'dorypetron',
+ 'dracon', 'dracontion', 'dryophonon', 'dysprophoron', 'ebenotrichon', 'echeon', 'echion', 'ectomon',
+ 'egersimon', 'elaeon', 'elaphoboscon', 'elegeion', 'elegeon', 'elegidarion', 'elegidion', 'elegion',
+ 'eleison', 'embadon', 'emmoton', 'emplecton', 'enchiridion', 'enemion', 'engonaton', 'enhaemon',
+ 'enneaphyllon', 'epagon', 'ephedron', 'ephemeron', 'epicedion', 'epigrammation', 'epimedion',
+ 'epinicion', 'epipetron', 'epiradion', 'epitaphion', 'epithalamion', 'epitheton', 'epithymon',
+ 'epitonion', 'epomphalion', 'eranthemon', 'erigeron', 'erioxylon', 'eryngion', 'erysisceptron',
+ 'erythraicon', 'erythranon', 'eschatocollion', 'etymon', 'eubolion', 'eucharisticon', 'eugalacton',
+ 'eunuchion', 'euphrosynon', 'eupteron', 'eutheriston', 'euzomon', 'exacon', 'exonychon', 'exormiston',
+ 'galeobdolon', 'galion', 'gamelion', 'ganglion', 'garyophyllon', 'geranion', 'gethyon', 'gingidion',
+ 'glaucion', 'glechon', 'glinon', 'glycyrrhizon', 'gnaphalion', 'gnomon', 'gossipion', 'gossypion',
+ 'hadrobolon', 'haematicon', 'halcyon', 'halicacabon', 'halimon', 'halipleumon', 'halmyridion', 'halon',
+ 'hecatombion', 'hegemon', 'hegemonicon', 'heleoselinon', 'heliochryson', 'helioscopion',
+ 'helioselinon', 'heliotropion', 'hemerobion', 'hemionion', 'hemisphaerion', 'hemitonion', 'hepatizon',
+ 'heptaphyllon', 'heroion', 'heterocliton', 'hexaclinon', 'hexaphoron', 'hieracion', 'hieromnemon',
+ 'hippolapathon', 'hippomarathon', 'hippophaeston', 'hippopheon', 'hippophlomon', 'hipposelinon',
+ 'histon', 'hodoeporicon', 'holocyron', 'holosteon', 'homoeopropheron', 'homoeoprophoron',
+ 'homoeoptoton', 'homoeoteleuton', 'horaeon', 'horizon', 'hormiscion', 'hyacinthizon', 'hydrogeron',
+ 'hydrolapathon', 'hypecoon', 'hyperbaton', 'hypericon', 'hypocauston', 'hypogeson', 'hypoglottion',
+ 'hypomochlion', 'hypopodion', 'ichneumon', 'icon', 'idolon', 'ion', 'iphyon', 'ischaemon',
+ 'isocinnamon', 'isopleuron', 'isopyron', 'langon', 'larbason', 'ledon', 'leontocaron', 'leontopetalon',
+ 'leontopodion', 'leptophyllon', 'leucanthemon', 'leuceoron', 'leucoion', 'leucon', 'leucophoron',
+ 'leucrion', 'leuson', 'lexidion', 'libadion', 'lignyzon', 'limodoron', 'limonion', 'linostrophon',
+ 'lirinon', 'lirion', 'lithizon', 'lithognomon', 'lithospermon', 'logarion', 'longanon', 'lucmon',
+ 'lychnion', 'lyncurion', 'lyron', 'lytron', 'machaerophyllon', 'madon', 'maldacon', 'malobathron',
+ 'mammon', 'manicon', 'manon', 'margarition', 'maron', 'maronion', 'mastichelaeon', 'mazonomon',
+ 'mecon', 'meconion', 'medion', 'melamphyllon', 'melampodion', 'melanspermon', 'melanthion',
+ 'melaspermon', 'melissophyllon', 'meliton', 'melocarpon', 'melophyllon', 'melothron', 'memecylon',
+ 'menion', 'menogenion', 'mesanculon', 'metopion', 'metopon', 'metron', 'mileon', 'miuron',
+ 'mnemosynon', 'monazon', 'monemeron', 'monobolon', 'monochordon', 'monosyllabon', 'morion',
+ 'mormorion', 'myacanthon', 'myophonon', 'myosoton', 'myriophyllon', 'myrmecion', 'myron',
+ 'myrtopetalon', 'mystron', 'myxarion', 'myxon', 'nardostachyon', 'naulon', 'nechon', 'necnon',
+ 'nephelion', 'nerion', 'nession', 'neurospaston', 'nicephyllon', 'nitrion', 'non', 'notion',
+ 'nyctegreton', 'nymphon', 'nysion', 'octaphhoron', 'octaphoron', 'octophoron', 'ololygon',
+ 'onocardion', 'onochelon', 'onochilon', 'onopordon', 'onopradon', 'ophidion', 'ophiostaphylon',
+ 'opion', 'opition', 'opocarpathon', 'orchion', 'oreon', 'oreoselinon', 'orestion', 'origanon',
+ 'ornithon', 'orobethron', 'otion', 'oxybaphon', 'oxylapathon', 'oxytonon', 'oxytriphyllon',
+ 'panathenaicon', 'pancration', 'panion', 'paradoxon', 'paranarrhinon', 'paranatellon', 'parelion',
+ 'pareoron', 'parergon', 'parhelion', 'parhomoeon', 'parison', 'paromoeon', 'paronymon', 'parthenicon',
+ 'pausilypon', 'pedalion', 'peganon', 'pelecinon', 'pellion', 'pentadactylon', 'pentagonon',
+ 'pentaphyllon', 'pentaspaston', 'pentatomon', 'pentorobon', 'perichristarion', 'perinaeon', 'perineon',
+ 'periosteon', 'peripodion', 'perispomenon', 'perisson', 'peristereon', 'petroselinon', 'peucedanon',
+ 'phaenion', 'phaenomenon', 'phalaecion', 'phalangion', 'pharicon', 'pharnaceon', 'pharnacion',
+ 'phasganion', 'phellandrion', 'pheuxaspidion', 'philanthropion', 'phlegmon', 'phorimon', 'phrenion',
+ 'phryganion', 'phrynion', 'phyllon', 'phynon', 'physiognomon', 'pisselaeon', 'pitydion', 'pityon',
+ 'platanon', 'platon', 'platyphyllon', 'plethron', 'polion', 'polyandrion', 'polyarchion', 'polyarcyon',
+ 'polycnemon', 'polygonaton', 'polyneuron', 'polypodion', 'polyptoton', 'polyrrhizon', 'polyrrizon',
+ 'polyspaston', 'polysyntheton', 'polytrichon', 'poppyzon', 'potamogeton', 'potamogiton', 'poterion',
+ 'pramnion', 'prapedilon', 'prapedion', 'prasion', 'prason', 'proarchon', 'probation', 'procoeton',
+ 'procomion', 'proegmenon', 'prognosticon', 'promnion', 'pronaon', 'propempticon', 'propnigeon',
+ 'propylaeon', 'propylon', 'prosopon', 'protagion', 'protrepticon', 'protropon', 'pseudobunion',
+ 'pseudoselinon', 'psychotrophon', 'psyllion', 'pteron', 'pycnocomon', 'pyrethron', 'pythion',
+ 'pythonion', 'quilon', 'rhagion', 'rhapeion', 'rhaphanidion', 'rhigolethron', 'rhinion',
+ 'rhododendron', 'rhopalon', 'rhuselinon', 'rhythmizomenon', 'saccharon', 'sacon', 'sagapenon',
+ 'sagenon', 'sanchromaton', 'sangenon', 'saphon', 'sarcion', 'satyrion', 'saurion', 'scazon',
+ 'scimpodion', 'sciothericon', 'scolecion', 'scolibrochon', 'scolopendrion', 'scordilon', 'scordion',
+ 'scorodon', 'scorpioctonon', 'scorpion', 'scorpiuron', 'scybalon', 'selenion', 'selenogonon',
+ 'selinon', 'selinophyllon', 'semimetopion', 'semnion', 'sepioticon', 'serapion', 'setanion',
+ 'sicelicon', 'siderion', 'sindon', 'sion', 'siphon', 'sisonagrion', 'sisyrinchion', 'sisyringion',
+ 'smilion', 'smyrnion', 'sophismation', 'sparganion', 'sparton', 'spathalion', 'sphaerion', 'sphingion',
+ 'sphondylion', 'splenion', 'spondiazon', 'spondylion', 'stacton', 'staphylodendron', 'stasimon',
+ 'statioron', 'stergethron', 'stomation', 'stratopedon', 'struthion', 'subdiacon', 'sycaminon',
+ 'sycophyllon', 'symphyton', 'symposion', 'syndon', 'synemmenon', 'syngenicon', 'synoneton',
+ 'synonymon', 'syntonon', 'syntononon', 'syreon', 'syron', 'taurophthalmon', 'technyphion', 'telephion',
+ 'tenon', 'teramon', 'tetartemorion', 'tethalassomenon', 'tetrachordon', 'tetracolon', 'tetragnathion',
+ 'tetraptoton', 'tetrastichon', 'tetrastylon', 'teucrion', 'teuthrion', 'theatridion', 'thelycon',
+ 'thelygonon', 'thelyphonon', 'theobrotion', 'theodotion', 'theoremation', 'theribethron', 'therion',
+ 'theriophonon', 'thermospodion', 'thesion', 'thorybethron', 'thorybetron', 'thrauston', 'thrion',
+ 'thymion', 'thyon', 'tiphyon', 'tithymalon', 'tordylion', 'tordylon', 'toxicon', 'tragion',
+ 'tragopogon', 'trapezophoron', 'tribon', 'trichalcon', 'tricolon', 'trigon', 'trihemitonion',
+ 'tripolion', 'tryginon', 'trygon', 'typhonion', 'ulophonon', 'ulophyton', 'urion', 'xenon',
+ 'xeromyron', 'xeron', 'xiphion', 'xyliglycon', 'xylion', 'xylon', 'xylophyton', 'zacon',
+ 'zoophthalmon', 'zopyron', 'zopyrontion', 'zugon']
+
# English words; this list added to better handle English header, navigation, etc. in plaintext files of the Latin Library corpus.
n_exceptions += ['alcuin', 'caen', 'christian', 'chronicon', 'châtillon', 'claudian', 'john', 'justin', 'latin',
'lucan', 'martin', 'novatian', 'quintilian', 'roman', 'tertullian']
| Latin tokenizer tokenizes wrongly "bitumen" with "bitume" & "-ne"
Hi there,
When I was looking closer at the results of tokenization, I saw this magnificient feature:
```python
from cltk.tokenize.word import WordTokenizer
word_tokenizer = WordTokenizer('latin')
text = 'atque haec abuterque puerve paterne nihil'
text = "bellum bitumen"
word_tokenizer.tokenize(text)
>>> ['bellum', 'bitume', '-ne']
```
| 2020-02-19T18:18:11 |
||
cltk/cltk | 1,057 | cltk__cltk-1057 | [
"634"
] | 293e64cf960d0946a6edc9667a1fb3b038288fa8 | diff --git a/src/cltk/languages/pipelines.py b/src/cltk/languages/pipelines.py
--- a/src/cltk/languages/pipelines.py
+++ b/src/cltk/languages/pipelines.py
@@ -164,7 +164,7 @@ class CopticPipeline(Pipeline):
description: str = "Pipeline for the Coptic language"
language: Language = get_lang("cop")
processes: List[Type[Process]] = field(
- default_factory=lambda: [CopticStanzaProcess]
+ default_factory=lambda: [CopticStanzaProcess, StopsProcess]
)
diff --git a/src/cltk/stops/cop.py b/src/cltk/stops/cop.py
new file mode 100644
--- /dev/null
+++ b/src/cltk/stops/cop.py
@@ -0,0 +1,329 @@
+"""
+This list is adapted from https://github.com/computationalstylistics/tidystopwords
+which in turn is based on UniversalDependencies treebanks.
+"""
+
+STOPS = [
+ "ϣⲁ", # lemma: ϣⲁ, UD_pos: ADP
+ "ϣⲁⲣⲟ", # lemma: ϣⲁ, UD_pos: ADP
+ "ϣⲁⲣⲱ", # lemma: ϣⲁ, UD_pos: ADP
+ "ϩⲁ", # lemma: ϩⲁ, UD_pos: ADP
+ "ϩⲁϩⲧⲏ", # lemma: ϩⲁϩⲧⲛ, UD_pos: ADP
+ "ϩⲁⲣⲁⲧ", # lemma: ϩⲁⲣⲁⲧ, UD_pos: ADP
+ "ϩⲁⲣⲟ", # lemma: ϩⲁ, UD_pos: ADP
+ "ϩⲁⲣⲱ", # lemma: ϩⲁ, UD_pos: ADP
+ "ϩⲁⲧⲉ", # lemma: ϩⲁⲧⲉ, UD_pos: ADP
+ "ϩⲁⲧⲉ", # lemma: ϩⲁϩⲧⲛ, UD_pos: ADP
+ "ϩⲁⲧⲛ", # lemma: ϩⲁⲧⲛ, UD_pos: ADP
+ "ϩⲓ", # lemma: ϩⲓ, UD_pos: ADP
+ "ϩⲓϫⲙ", # lemma: ϩⲓϫⲛ, UD_pos: ADP
+ "ϩⲓϫⲛ", # lemma: ϩⲓϫⲛ, UD_pos: ADP
+ "ϩⲓϫⲱ", # lemma: ϩⲓϫⲛ, UD_pos: ADP
+ "ϩⲓⲣⲙ", # lemma: ϩⲓⲣⲛ, UD_pos: ADP
+ "ϩⲓⲧⲙ", # lemma: ϩⲓⲧⲛ, UD_pos: ADP
+ "ϩⲓⲧⲛ", # lemma: ϩⲓⲧⲛ, UD_pos: ADP
+ "ϩⲓⲧⲟⲟⲧ", # lemma: ϩⲓⲧⲟⲟⲧ, UD_pos: ADP
+ "ϩⲓⲧⲟⲟⲧ", # lemma: ϩⲓⲧⲛ, UD_pos: ADP
+ "ϩⲓⲧⲟⲩⲱ", # lemma: ϩⲓⲧⲟⲩⲛ, UD_pos: ADP
+ "ϩⲓⲱ", # lemma: ϩⲓ, UD_pos: ADP
+ "ϩⲓⲱⲱ", # lemma: ϩⲓ, UD_pos: ADP
+ "ϩⲙ", # lemma: ϩⲛ, UD_pos: ADP
+ "ϩⲙ", # lemma: ϩⲙ, UD_pos: ADP
+ "ϩⲛ", # lemma: ϩⲛ, UD_pos: ADP
+ "ϫⲓⲛ", # lemma: ϫⲓⲛ, UD_pos: ADP
+ "ⲁϫⲛ", # lemma: ⲁϫⲛ, UD_pos: ADP
+ "ⲉ", # lemma: ⲉ, UD_pos: ADP
+ "ⲉ", # lemma: ⲉⲣⲉ, UD_pos: ADP
+ "ⲉ", # lemma: ⲉⲣⲉ_ⲛⲧⲟⲟⲩ, UD_pos: ADP
+ "ⲉϩⲟⲩⲉ", # lemma: ⲉϩⲟⲩⲉ, UD_pos: ADP
+ "ⲉϫⲙ", # lemma: ⲉϫⲛ, UD_pos: ADP
+ "ⲉϫⲛ", # lemma: ⲉϫⲛ, UD_pos: ADP
+ "ⲉϫⲱ", # lemma: ⲉϫⲛ, UD_pos: ADP
+ "ⲉⲣⲁⲧ", # lemma: ⲉⲣⲁⲧ, UD_pos: ADP
+ "ⲉⲣⲟ", # lemma: ⲉ, UD_pos: ADP
+ "ⲉⲣⲱ", # lemma: ⲉ, UD_pos: ADP
+ "ⲉⲧⲃⲉ", # lemma: ⲉⲧⲃⲉ, UD_pos: ADP
+ "ⲉⲧⲃⲏⲏⲧ", # lemma: ⲉⲧⲃⲉ, UD_pos: ADP
+ "ⲉⲧⲟⲟⲧ", # lemma: ⲉⲧⲛ, UD_pos: ADP
+ "ⲕⲁⲧⲁ", # lemma: ⲕⲁⲧⲁ, UD_pos: ADP
+ "ⲙ", # lemma: ⲛ, UD_pos: ADP
+ "ⲙ", # lemma: ⲙ, UD_pos: ADP
+ "ⲙⲙⲟ", # lemma: ⲛ, UD_pos: ADP
+ "ⲙⲙⲟ", # lemma: ⲙⲙⲟ, UD_pos: ADP
+ "ⲙⲙⲱ", # lemma: ⲛ, UD_pos: ADP
+ "ⲙⲛ", # lemma: ⲙⲛ, UD_pos: ADP
+ "ⲙⲛⲛⲥⲁ", # lemma: ⲙⲛⲛⲥⲁ, UD_pos: ADP
+ "ⲙⲛⲛⲥⲱ", # lemma: ⲙⲛⲛⲥⲁ, UD_pos: ADP
+ "ⲛ", # lemma: ⲛ, UD_pos: ADP
+ "ⲛ", # lemma: ⲙ, UD_pos: ADP
+ "ⲛ", # lemma: ⲡ, UD_pos: ADP
+ "ⲛ", # lemma: ⲙ̄, UD_pos: ADP
+ "ⲛ", # lemma: ⲛⲁ, UD_pos: ADP
+ "ⲛϩⲏⲧ", # lemma: ϩⲛ, UD_pos: ADP
+ "ⲛϩⲏⲧ", # lemma: ϩⲏⲧ, UD_pos: ADP
+ "ⲛϩⲏⲧ", # lemma: ⲛϩⲏⲧ, UD_pos: ADP
+ "ⲛⲁ", # lemma: ⲛⲁ, UD_pos: ADP
+ "ⲛⲁ", # lemma: ⲙ, UD_pos: ADP
+ "ⲛⲁ", # lemma: ⲛ, UD_pos: ADP
+ "ⲛⲁϩⲣⲁ", # lemma: ⲛⲁϩⲣⲛ, UD_pos: ADP
+ "ⲛⲏ", # lemma: ⲛⲁ, UD_pos: ADP
+ "ⲛⲙ", # lemma: ⲙⲛ, UD_pos: ADP
+ "ⲛⲙⲙ", # lemma: ⲙⲛ, UD_pos: ADP
+ "ⲛⲙⲙ", # lemma: ⲛⲙⲙ, UD_pos: ADP
+ "ⲛⲙⲙⲁ", # lemma: ⲙⲛ, UD_pos: ADP
+ "ⲛⲙⲙⲏ", # lemma: ⲙⲛ, UD_pos: ADP
+ "ⲛⲛⲁϩⲣⲙ", # lemma: ⲛⲛⲁϩⲣⲛ, UD_pos: ADP
+ "ⲛⲛⲁϩⲣⲙ", # lemma: ⲛⲁϩⲣⲛ, UD_pos: ADP
+ "ⲛⲛⲁϩⲣⲛ", # lemma: ⲛⲛⲁϩⲣⲛ, UD_pos: ADP
+ "ⲛⲛⲁϩⲣⲛ", # lemma: ⲛⲁϩⲣⲛ, UD_pos: ADP
+ "ⲛⲥⲁ", # lemma: ⲛⲥⲁ, UD_pos: ADP
+ "ⲛⲥⲱ", # lemma: ⲛⲥⲁ, UD_pos: ADP
+ "ⲛⲧⲉ", # lemma: ⲛⲧⲉ, UD_pos: ADP
+ "ⲛⲧⲟⲟⲧ", # lemma: ⲛⲧⲛ, UD_pos: ADP
+ "ⲟⲩⲃⲉ", # lemma: ⲟⲩⲃⲉ, UD_pos: ADP
+ "ⲟⲩⲃⲏ", # lemma: ⲟⲩⲃⲉ, UD_pos: ADP
+ "ⲡⲁⲣⲁ", # lemma: ⲡⲁⲣⲁ, UD_pos: ADP
+ "ⲧⲟⲟⲧ", # lemma: ⲧⲟⲟⲧ, UD_pos: ADP
+ "ⲭⲱⲣⲓⲥ", # lemma: ⲭⲱⲣⲓⲥ, UD_pos: ADP
+ "ϣ", # lemma: ϣ, UD_pos: AUX
+ "ϣⲁ", # lemma: ϣⲁⲣⲉ, UD_pos: AUX
+ "ϣⲁ", # lemma: ϣⲁ, UD_pos: AUX
+ "ϣⲁⲛⲧ", # lemma: ϣⲁⲛⲧⲉ, UD_pos: AUX
+ "ϣⲁⲛⲧⲉ", # lemma: ϣⲁⲛⲧⲉ, UD_pos: AUX
+ "ϣⲁⲣⲉ", # lemma: ϣⲁⲣⲉ, UD_pos: AUX
+ "ⲁ", # lemma: ⲁ, UD_pos: AUX
+ "ⲁ", # lemma: ⲛⲁ, UD_pos: AUX
+ "ⲉϣ", # lemma: ϣ, UD_pos: AUX
+ "ⲉϣ", # lemma: ⲉϣ, UD_pos: AUX
+ "ⲉⲣϣⲁⲛ", # lemma: ⲉⲣϣⲁⲛ, UD_pos: AUX
+ "ⲉⲣⲉ", # lemma: ⲉⲣⲉ, UD_pos: AUX
+ "ⲙⲁ", # lemma: ⲙⲉⲣⲉ, UD_pos: AUX
+ "ⲙⲁ", # lemma: ⲙⲉ, UD_pos: AUX
+ "ⲙⲁⲣ", # lemma: ⲙⲁⲣⲉ, UD_pos: AUX
+ "ⲙⲁⲣⲉ", # lemma: ⲙⲁⲣⲉ, UD_pos: AUX
+ "ⲙⲉ", # lemma: ⲙⲉⲣⲉ, UD_pos: AUX
+ "ⲙⲉⲣⲉ", # lemma: ⲙⲉ, UD_pos: AUX
+ "ⲙⲙⲛ", # lemma: ⲙⲛ, UD_pos: AUX
+ "ⲙⲛ", # lemma: ⲙⲛ, UD_pos: AUX
+ "ⲙⲡ", # lemma: ⲙⲡⲉ, UD_pos: AUX
+ "ⲙⲡ", # lemma: ⲙⲡ, UD_pos: AUX
+ "ⲙⲡⲁⲧ", # lemma: ⲙⲡⲁⲧⲉ, UD_pos: AUX
+ "ⲙⲡⲁⲧⲉ", # lemma: ⲙⲡⲁⲧⲉ, UD_pos: AUX
+ "ⲙⲡⲉ", # lemma: ⲙⲡⲉ, UD_pos: AUX
+ "ⲙⲡⲣⲧⲣⲉ", # lemma: ⲙⲡⲣⲧⲣⲉ, UD_pos: AUX
+ "ⲛ", # lemma: ⲛⲧⲉ, UD_pos: AUX
+ "ⲛⲁ", # lemma: ⲛⲁ, UD_pos: AUX
+ "ⲛⲉ", # lemma: ⲛⲉⲣⲉ, UD_pos: AUX
+ "ⲛⲉ", # lemma: ⲛⲉ, UD_pos: AUX
+ "ⲛⲉⲣⲉ", # lemma: ⲛⲉⲣⲉ, UD_pos: AUX
+ "ⲛⲛ", # lemma: ⲛⲛⲉ, UD_pos: AUX
+ "ⲛⲛⲉ", # lemma: ⲛⲛⲉ, UD_pos: AUX
+ "ⲛⲧⲉ", # lemma: ⲛⲧⲉ, UD_pos: AUX
+ "ⲛⲧⲉⲣ", # lemma: ⲛⲧⲉⲣⲉ, UD_pos: AUX
+ "ⲛⲧⲉⲣⲉ", # lemma: ⲛⲧⲉⲣⲉ, UD_pos: AUX
+ "ⲟⲩⲛ", # lemma: ⲟⲩⲛ, UD_pos: AUX
+ "ⲧⲁⲣ", # lemma: ⲧⲁⲣ, UD_pos: AUX
+ "ⲧⲁⲣⲉ", # lemma: ⲧⲁⲣⲉ, UD_pos: AUX
+ "ⲩⲛ", # lemma: ⲟⲩⲛ, UD_pos: AUX
+ "ϩⲟⲧⲁⲛ", # lemma: ϩⲟⲧⲁⲛ, UD_pos: CCONJ
+ "ϩⲱⲥ", # lemma: ϩⲱⲥ, UD_pos: CCONJ
+ "ϩⲱⲥⲧⲉ", # lemma: ϩⲱⲥⲧⲉ, UD_pos: CCONJ
+ "ϫⲉ", # lemma: ϫⲉ, UD_pos: CCONJ
+ "ϫⲉⲕⲁⲁⲥ", # lemma: ϫⲉⲕⲁⲁⲥ, UD_pos: CCONJ
+ "ϫⲉⲕⲁⲁⲥ", # lemma: ϫⲉⲕⲁⲥ, UD_pos: CCONJ
+ "ϫⲉⲕⲁⲥ", # lemma: ϫⲉⲕⲁⲥ, UD_pos: CCONJ
+ "ϫⲉⲕⲁⲥ", # lemma: ϫⲉⲕⲁⲁⲥ, UD_pos: CCONJ
+ "ϫⲓ", # lemma: ϫⲓ, UD_pos: CCONJ
+ "ϫⲓⲛ", # lemma: ϫⲓⲛ, UD_pos: CCONJ
+ "ϫⲛ", # lemma: ϫⲛ, UD_pos: CCONJ
+ "ⲁⲗⲗⲁ", # lemma: ⲁⲗⲗⲁ, UD_pos: CCONJ
+ "ⲁⲩⲱ", # lemma: ⲁⲩⲱ, UD_pos: CCONJ
+ "ⲉϣϫⲉ", # lemma: ⲉϣϫⲉ, UD_pos: CCONJ
+ "ⲉϣⲱⲡⲉ", # lemma: ⲉϣⲱⲡⲉ, UD_pos: CCONJ
+ "ⲉⲓⲉ", # lemma: ⲉⲓⲉ, UD_pos: CCONJ
+ "ⲉⲓⲙⲏⲧⲓ", # lemma: ⲉⲓⲙⲏⲧⲓ, UD_pos: CCONJ
+ "ⲉⲓⲧⲉ", # lemma: ⲉⲓⲧⲉ, UD_pos: CCONJ
+ "ⲉⲛⲉ", # lemma: ⲉⲛⲉ, UD_pos: CCONJ
+ "ⲉⲡⲉⲓⲇⲏ", # lemma: ⲉⲡⲉⲓⲇⲏ, UD_pos: CCONJ
+ "ⲏ", # lemma: ⲏ, UD_pos: CCONJ
+ "ⲕⲁⲓ", # lemma: ⲕⲁⲓ, UD_pos: CCONJ
+ "ⲕⲁⲛ", # lemma: ⲕⲁⲛ, UD_pos: CCONJ
+ "ⲙⲉⲛ", # lemma: ⲙⲉⲛ, UD_pos: CCONJ
+ "ⲙⲏ", # lemma: ⲙⲏ, UD_pos: CCONJ
+ "ⲙⲏⲡⲟⲧⲉ", # lemma: ⲙⲏⲡⲟⲧⲉ, UD_pos: CCONJ
+ "ⲙⲏⲧⲓ", # lemma: ⲙⲏⲧⲓ, UD_pos: CCONJ
+ "ⲙⲙⲟⲛ", # lemma: ⲙⲙⲟⲛ, UD_pos: CCONJ
+ "ⲟⲩⲇⲉ", # lemma: ⲟⲩⲇⲉ, UD_pos: CCONJ
+ "ⲟⲩⲧⲉ", # lemma: ⲟⲩⲧⲉ, UD_pos: CCONJ
+ "ⲡⲗⲏⲛ", # lemma: ⲡⲗⲏⲛ, UD_pos: CCONJ
+ "ϩⲉⲛ", # lemma: ⲟⲩ, UD_pos: DET
+ "ϩⲛ", # lemma: ϩⲛ, UD_pos: DET
+ "ϭⲉ", # lemma: ϭⲉ, UD_pos: DET
+ "ϯ", # lemma: ⲡⲓ, UD_pos: DET
+ "ϯ", # lemma: ϯ, UD_pos: DET
+ "ⲕⲉ", # lemma: ⲕⲉ, UD_pos: DET
+ "ⲙ", # lemma: ⲡ, UD_pos: DET
+ "ⲙ", # lemma: ⲛ, UD_pos: DET
+ "ⲛ", # lemma: ⲡ, UD_pos: DET
+ "ⲛ", # lemma: ⲛ, UD_pos: DET
+ "ⲛⲁ", # lemma: ⲡⲁ, UD_pos: DET
+ "ⲛⲁ", # lemma: ⲡⲉ, UD_pos: DET
+ "ⲛⲁ", # lemma: ⲛⲁ, UD_pos: DET
+ "ⲛⲁⲓ", # lemma: ⲡⲁⲓ, UD_pos: DET
+ "ⲛⲁⲓ", # lemma: ⲛⲁⲓ, UD_pos: DET
+ "ⲛⲉ", # lemma: ⲡ, UD_pos: DET
+ "ⲛⲉ", # lemma: ⲛⲉⲣⲉ, UD_pos: DET
+ "ⲛⲉ", # lemma: ⲛ, UD_pos: DET
+ "ⲛⲉϥ", # lemma: ⲡⲉϥ, UD_pos: DET
+ "ⲛⲉⲓ", # lemma: ⲡⲉⲓ, UD_pos: DET
+ "ⲛⲉⲕ", # lemma: ⲡⲉⲕ, UD_pos: DET
+ "ⲛⲉⲛ", # lemma: ⲡⲉⲛ, UD_pos: DET
+ "ⲛⲉⲥ", # lemma: ⲡⲉⲥ, UD_pos: DET
+ "ⲛⲉⲧⲛ", # lemma: ⲡⲉⲧⲛ, UD_pos: DET
+ "ⲛⲉⲩ", # lemma: ⲡⲉⲩ, UD_pos: DET
+ "ⲛⲏ", # lemma: ⲡⲏ, UD_pos: DET
+ "ⲛⲓ", # lemma: ⲡⲓ, UD_pos: DET
+ "ⲛⲟⲩ", # lemma: ⲡⲟⲩ, UD_pos: DET
+ "ⲟⲩ", # lemma: ⲟⲩ, UD_pos: DET
+ "ⲟⲩ", # lemma: ⲛⲧⲟⲟⲩ, UD_pos: DET
+ "ⲡ", # lemma: ⲡ, UD_pos: DET
+ "ⲡⲁ", # lemma: ⲡⲁ, UD_pos: DET
+ "ⲡⲁⲓ", # lemma: ⲡⲁⲓ, UD_pos: DET
+ "ⲡⲉ", # lemma: ⲡ, UD_pos: DET
+ "ⲡⲉ", # lemma: ⲡⲉ, UD_pos: DET
+ "ⲡⲉϥ", # lemma: ⲡⲉϥ, UD_pos: DET
+ "ⲡⲉϥ", # lemma: ⲡ, UD_pos: DET
+ "ⲡⲉⲓ", # lemma: ⲡⲉⲓ, UD_pos: DET
+ "ⲡⲉⲕ", # lemma: ⲡⲉⲕ, UD_pos: DET
+ "ⲡⲉⲛ", # lemma: ⲡⲉⲛ, UD_pos: DET
+ "ⲡⲉⲥ", # lemma: ⲡⲉⲥ, UD_pos: DET
+ "ⲡⲉⲧⲛ", # lemma: ⲡⲉⲧⲛ, UD_pos: DET
+ "ⲡⲉⲩ", # lemma: ⲡⲉⲩ, UD_pos: DET
+ "ⲡⲏ", # lemma: ⲡⲏ, UD_pos: DET
+ "ⲡⲓ", # lemma: ⲡⲓ, UD_pos: DET
+ "ⲡⲓ", # lemma: ⲡⲉⲓ, UD_pos: DET
+ "ⲡⲟⲩ", # lemma: ⲡⲟⲩ, UD_pos: DET
+ "ⲡⲱⲕ", # lemma: ⲡⲱⲕ, UD_pos: DET
+ "ⲡⲱⲧⲛ", # lemma: ⲡⲱⲧⲛ, UD_pos: DET
+ "ⲧ", # lemma: ⲡ, UD_pos: DET
+ "ⲧ", # lemma: ⲧ, UD_pos: DET
+ "ⲧⲁ", # lemma: ⲡⲁ, UD_pos: DET
+ "ⲧⲁⲓ", # lemma: ⲡⲁⲓ, UD_pos: DET
+ "ⲧⲉ", # lemma: ⲡ, UD_pos: DET
+ "ⲧⲉϥ", # lemma: ⲡⲉϥ, UD_pos: DET
+ "ⲧⲉⲓ", # lemma: ⲡⲉⲓ, UD_pos: DET
+ "ⲧⲉⲕ", # lemma: ⲡⲉⲕ, UD_pos: DET
+ "ⲧⲉⲛ", # lemma: ⲡⲉⲛ, UD_pos: DET
+ "ⲧⲉⲥ", # lemma: ⲡⲉⲥ, UD_pos: DET
+ "ⲧⲉⲧⲛ", # lemma: ⲡⲉⲧⲛ, UD_pos: DET
+ "ⲧⲉⲩ", # lemma: ⲡⲉⲩ, UD_pos: DET
+ "ⲧⲟⲩ", # lemma: ⲡⲟⲩ, UD_pos: DET
+ "ⲩ", # lemma: ⲟⲩ, UD_pos: DET
+ "ⲩ", # lemma: ⲛⲧⲟⲟⲩ, UD_pos: DET
+ "ϥ", # lemma: ⲛⲧⲟϥ, UD_pos: PRON
+ "ϩⲱ", # lemma: ϩⲱⲱ_ⲁⲛⲟⲕ, UD_pos: PRON
+ "ϯ", # lemma: ⲁⲛⲟⲕ, UD_pos: PRON
+ "ϯ", # lemma: ϯ, UD_pos: PRON
+ "ⲁ", # lemma: ⲁⲛⲟⲕ, UD_pos: PRON
+ "ⲁ", # lemma: ⲁ, UD_pos: PRON
+ "ⲁ", # lemma: ⲛⲧⲟ, UD_pos: PRON
+ "ⲁϣ", # lemma: ⲁϣ, UD_pos: PRON
+ "ⲁⲛⲅ", # lemma: ⲁⲛⲟⲕ, UD_pos: PRON
+ "ⲁⲛⲟⲕ", # lemma: ⲁⲛⲟⲕ, UD_pos: PRON
+ "ⲁⲛⲟⲛ", # lemma: ⲁⲛⲟⲛ, UD_pos: PRON
+ "ⲁⲟⲩⲏⲣ", # lemma: ⲁⲟⲩⲏⲣ, UD_pos: PRON
+ "ⲁⲣ", # lemma: ⲁ_ⲛⲧⲟ, UD_pos: PRON
+ "ⲅ", # lemma: ⲛⲧⲟⲕ, UD_pos: PRON
+ "ⲅ", # lemma: ⲅ, UD_pos: PRON
+ "ⲉ", # lemma: ⲛⲧⲟ, UD_pos: PRON
+ "ⲉϥ", # lemma: ⲉϥ, UD_pos: PRON
+ "ⲉϥϣⲁⲛ", # lemma: ⲉⲣϣⲁⲛ_ⲛⲧⲟϥ, UD_pos: PRON
+ "ⲉϥⲉ", # lemma: ⲉⲣⲉ_ⲛⲧⲟϥ, UD_pos: PRON
+ "ⲉⲓ", # lemma: ⲉⲓ, UD_pos: PRON
+ "ⲉⲓ", # lemma: ⲁⲛⲟⲕ, UD_pos: PRON
+ "ⲉⲓϣⲁⲛ", # lemma: ⲉⲣϣⲁⲛ_ⲁⲛⲟⲕ, UD_pos: PRON
+ "ⲉⲓⲉ", # lemma: ⲉⲣⲉ_ⲁⲛⲟⲕ, UD_pos: PRON
+ "ⲉⲕϣⲁⲛ", # lemma: ⲉⲣϣⲁⲛ_ⲛⲧⲟⲕ, UD_pos: PRON
+ "ⲉⲕⲉ", # lemma: ⲉⲕⲉ, UD_pos: PRON
+ "ⲉⲛϣⲁⲛ", # lemma: ⲉⲣϣⲁⲛ_ⲁⲛⲟⲛ, UD_pos: PRON
+ "ⲉⲛⲉ", # lemma: ⲉⲛⲉ, UD_pos: PRON
+ "ⲉⲛⲉ", # lemma: ⲉⲣⲉ_ⲁⲛⲟⲛ, UD_pos: PRON
+ "ⲉⲣ", # lemma: ⲉⲣⲉ_ⲛⲧⲟ, UD_pos: PRON
+ "ⲉⲣⲉ", # lemma: ⲉⲣⲉ_ⲛⲧⲟ, UD_pos: PRON
+ "ⲉⲣⲉ", # lemma: ⲉⲣⲉ, UD_pos: PRON
+ "ⲉⲣⲟ", # lemma: ⲉ_ⲛⲧⲟ, UD_pos: PRON
+ "ⲉⲥ", # lemma: ⲉⲥ, UD_pos: PRON
+ "ⲉⲧⲉⲧⲛϣⲁⲛ", # lemma: ⲉⲧⲉⲧⲛϣⲁⲛ, UD_pos: PRON
+ "ⲉⲧⲉⲧⲛϣⲁⲛ", # lemma: ⲉⲣϣⲁⲛ_ⲛⲧⲱⲧⲛ, UD_pos: PRON
+ "ⲉⲧⲉⲧⲛⲉ", # lemma: ⲉⲣⲉ_ⲛⲧⲱⲧⲛ, UD_pos: PRON
+ "ⲉⲩϣⲁⲛ", # lemma: ⲉⲣϣⲁⲛ_ⲛⲧⲟⲟⲩ, UD_pos: PRON
+ "ⲉⲩⲉ", # lemma: ⲉⲣⲉ_ⲛⲧⲟⲟⲩ, UD_pos: PRON
+ "ⲓ", # lemma: ⲁⲛⲟⲕ, UD_pos: PRON
+ "ⲕ", # lemma: ⲛⲧⲟⲕ, UD_pos: PRON
+ "ⲕ", # lemma: ⲕ, UD_pos: PRON
+ "ⲕ", # lemma: ⲁⲛⲟⲕ, UD_pos: PRON
+ "ⲙⲙⲟ", # lemma: ⲛ_ⲛⲧⲟ, UD_pos: PRON
+ "ⲛ", # lemma: ⲁⲛⲟⲛ, UD_pos: PRON
+ "ⲛ", # lemma: ⲡⲉ, UD_pos: PRON
+ "ⲛ", # lemma: ⲡ, UD_pos: PRON
+ "ⲛ", # lemma: ⲛ, UD_pos: PRON
+ "ⲛ", # lemma: ⲛⲧⲉ, UD_pos: PRON
+ "ⲛϩⲏⲧ", # lemma: ϩⲛ, UD_pos: PRON
+ "ⲛϩⲏⲧ", # lemma: ϩⲛ_ⲁⲛⲟⲕ, UD_pos: PRON
+ "ⲛⲉ", # lemma: ⲡⲉ, UD_pos: PRON
+ "ⲛⲉⲣ", # lemma: ⲛⲉⲣⲉ_ⲛⲧⲟ, UD_pos: PRON
+ "ⲛⲓⲙ", # lemma: ⲛⲓⲙ, UD_pos: PRON
+ "ⲛⲧⲉⲧⲛ", # lemma: ⲛⲧⲱⲧⲛ, UD_pos: PRON
+ "ⲛⲧⲉⲧⲛ", # lemma: ⲛⲧⲉⲧⲛ, UD_pos: PRON
+ "ⲛⲧⲕ", # lemma: ⲛⲧⲟⲕ, UD_pos: PRON
+ "ⲛⲧⲟ", # lemma: ⲛⲧⲟ, UD_pos: PRON
+ "ⲛⲧⲟϥ", # lemma: ⲛⲧⲟϥ, UD_pos: PRON
+ "ⲛⲧⲟⲕ", # lemma: ⲛⲧⲟⲕ, UD_pos: PRON
+ "ⲛⲧⲟⲟⲩ", # lemma: ⲛⲧⲟⲟⲩ, UD_pos: PRON
+ "ⲛⲧⲟⲥ", # lemma: ⲛⲧⲟⲥ, UD_pos: PRON
+ "ⲛⲧⲱⲧⲛ", # lemma: ⲛⲧⲱⲧⲛ, UD_pos: PRON
+ "ⲟⲩ", # lemma: ⲟⲩ, UD_pos: PRON
+ "ⲟⲩ", # lemma: ⲛⲧⲟⲟⲩ, UD_pos: PRON
+ "ⲟⲩⲏⲣ", # lemma: ⲟⲩⲏⲣ, UD_pos: PRON
+ "ⲡ", # lemma: ⲡⲉ, UD_pos: PRON
+ "ⲡ", # lemma: ⲡ, UD_pos: PRON
+ "ⲡⲉ", # lemma: ⲡⲉ, UD_pos: PRON
+ "ⲣⲁⲧ", # lemma: ⲣⲁⲧ_ⲁⲛⲟⲕ, UD_pos: PRON
+ "ⲣⲱ", # lemma: ⲣⲟ, UD_pos: PRON
+ "ⲥ", # lemma: ⲛⲧⲟⲥ, UD_pos: PRON
+ "ⲥϥ", # lemma: ⲛⲧⲟϥ, UD_pos: PRON
+ "ⲥϥ", # lemma: ⲥϥ, UD_pos: PRON
+ "ⲥⲉ", # lemma: ⲛⲧⲟⲟⲩ, UD_pos: PRON
+ "ⲥⲉ", # lemma: ⲥⲉ, UD_pos: PRON
+ "ⲥⲟⲩ", # lemma: ⲛⲧⲟⲟⲩ, UD_pos: PRON
+ "ⲧ", # lemma: ⲁⲛⲟⲕ, UD_pos: PRON
+ "ⲧ", # lemma: ⲡⲉ, UD_pos: PRON
+ "ⲧⲁ", # lemma: ⲛⲧⲉ_ⲁⲛⲟⲕ, UD_pos: PRON
+ "ⲧⲁ", # lemma: ⲁⲛⲟⲕ, UD_pos: PRON
+ "ⲧⲁ", # lemma: ⲁⲛⲟⲕ_ⲛⲧⲉ, UD_pos: PRON
+ "ⲧⲁ", # lemma: ⲡⲁ, UD_pos: PRON
+ "ⲧⲉ", # lemma: ⲡⲉ, UD_pos: PRON
+ "ⲧⲉ", # lemma: ⲛⲧⲟ, UD_pos: PRON
+ "ⲧⲉⲧ", # lemma: ⲛⲧⲱⲧⲛ, UD_pos: PRON
+ "ⲧⲉⲧ", # lemma: ⲧⲉⲧ, UD_pos: PRON
+ "ⲧⲉⲧⲛ", # lemma: ⲛⲧⲱⲧⲛ, UD_pos: PRON
+ "ⲧⲉⲧⲛ", # lemma: ⲧⲉⲧⲛ, UD_pos: PRON
+ "ⲧⲏⲩⲧⲛ", # lemma: ⲛⲧⲱⲧⲛ, UD_pos: PRON
+ "ⲧⲛ", # lemma: ⲁⲛⲟⲛ, UD_pos: PRON
+ "ⲧⲛ", # lemma: ⲛⲧⲱⲧⲛ, UD_pos: PRON
+ "ⲧⲱⲛ", # lemma: ⲧⲱⲛ, UD_pos: PRON
+ "ⲩ", # lemma: ⲛⲧⲟⲟⲩ, UD_pos: PRON
+ "ⲉ", # lemma: ⲉⲣⲉ, UD_pos: SCONJ
+ "ⲉ", # lemma: ⲉⲧⲉⲣⲉ, UD_pos: SCONJ
+ "ⲉ", # lemma: ⲉ, UD_pos: SCONJ
+ "ⲉ", # lemma: ⲉⲧⲉ, UD_pos: SCONJ
+ "ⲉⲛⲧ", # lemma: ⲉⲧⲉⲣⲉ, UD_pos: SCONJ
+ "ⲉⲣⲉ", # lemma: ⲉⲣⲉ, UD_pos: SCONJ
+ "ⲉⲧ", # lemma: ⲉⲧⲉⲣⲉ, UD_pos: SCONJ
+ "ⲉⲧ", # lemma: ⲉⲧ, UD_pos: SCONJ
+ "ⲉⲧ", # lemma: ⲉⲧⲉ, UD_pos: SCONJ
+ "ⲉⲧⲉ", # lemma: ⲉⲧⲉⲣⲉ, UD_pos: SCONJ
+ "ⲉⲧⲉ", # lemma: ⲉⲧⲉ, UD_pos: SCONJ
+ "ⲉⲧⲉⲣⲉ", # lemma: ⲉⲧⲉⲣⲉ, UD_pos: SCONJ
+ "ⲛⲧ", # lemma: ⲉⲧⲉⲣⲉ, UD_pos: SCONJ
+]
diff --git a/src/cltk/stops/words.py b/src/cltk/stops/words.py
--- a/src/cltk/stops/words.py
+++ b/src/cltk/stops/words.py
@@ -6,13 +6,14 @@
from typing import List
from cltk.languages.utils import get_lang
-from cltk.stops import (akk, ang, arb, enm, fro, gmh, grc, hin, lat, non, omr,
+from cltk.stops import (akk, ang, arb, cop, enm, fro, gmh, grc, hin, lat, non, omr,
pan, san)
MAP_ISO_TO_MODULE = dict(
akk=akk,
ang=ang,
arb=arb,
+ cop=cop,
enm=enm,
fro=fro,
gmh=gmh,
| Add Coptic stoplist?
Coptic does not yet have a language-specific submodule in cltk.stop: https://github.com/cltk/cltk/tree/master/cltk/stop
| I shall take this.
Hi,
I found this repo https://github.com/cinkova/stopwoRds
It supports coptic as well as many languages.
Is this a good seed stopwords list to start with?
Interesting find @AMR-KELEG . Are you interested in doing this one yourself? It should be pretty easy, plus you could learn about our upcoming release.
Steps:
- Make a new file at `dev/src/cltk/stops/.cop.py` ([here](https://github.com/cltk/cltk/tree/dev/src/cltk/stops))
- Make its formatting identical to `src/cltk/stops/lat.py` ([here](https://github.com/cltk/cltk/blob/dev/src/cltk/stops/lat.py))
- In `src/cltk/stops/words.py` add `cop` to the imports ([here](https://github.com/cltk/cltk/blob/dev/src/cltk/stops/words.py#L9))
- Add to dict [here](https://github.com/cltk/cltk/blob/dev/src/cltk/stops/words.py#L12)
- Add `StopsProcess` to the `CopticPipeline` ([here](https://github.com/cltk/cltk/blob/dev/src/cltk/languages/pipelines.py#L167))
> Interesting find @AMR-KELEG . Are you interested in doing this one yourself? It should be pretty easy, plus you could learn about our upcoming release.
>
> Steps:
>
> * Make a new file at `dev/src/cltk/stops/.cop.py` ([here](https://github.com/cltk/cltk/tree/dev/src/cltk/stops))
> * Make its formatting identical to `src/cltk/stops/lat.py` ([here](https://github.com/cltk/cltk/blob/dev/src/cltk/stops/lat.py))
> * In `src/cltk/stops/words.py` add `cop` to the imports ([here](https://github.com/cltk/cltk/blob/dev/src/cltk/stops/words.py#L9))
> * Add to dict [here](https://github.com/cltk/cltk/blob/dev/src/cltk/stops/words.py#L12)
> * Add `StopsProcess` to the `CopticPipeline` ([here](https://github.com/cltk/cltk/blob/dev/src/cltk/languages/pipelines.py#L167))
Yes, I would love to do it myself.
Thanks for the detailed pointers.
I am just concerned about the precision of this list. Is there a way to double check if each entry is a stopword or not without having knowledge of coptic?
> I am just concerned about the precision of this list. Is there a way to double check if each entry is a stopword or not without having knowledge of coptic?
🤣
You're asking a fair question. I have found that it is best to code something and then ask for help.
Presumably the people who made this first list are not totally ignorant. To make them better, in the future:
- Ask Coptic scholars to look at them and validate for us
- Learn a little about the language ourselves. It does have some inflections so we need to make sure that the variants are in there: https://en.wikipedia.org/wiki/Coptic_language#Grammar
- There is a little bit of theory behind stopword lists -- some take a statistical approach and remove whatever is very frequent; others take a grammatical approach and remove words w/ low semantics like pronouns (he, you, they) and articles (the, a). For CLTK we have taken the latter approach.
If @AMR-KELEG you're still interested, do you have a date by which you think you could finish this? Take as long as you want; but a soft deadline helps remember me follow up.
If you have any issues w/ our new codebase (use the `dev` branch), please reach out or email me!
I am having problems with deadlines recently but I will make sure to work on this PR and provide frequent updates.
I tried installing the R library but I am getting errors with loading a file (I am not a fan of R but I will try to hack the scripts).
On the other hand, I found that the library makes use of the universal dependencies treebank (https://github.com/UniversalDependencies/UD_Coptic-Scriptorium) based on this paper (https://www.aclweb.org/anthology/W18-6022.pdf).
I am thinking of parsing the treebank files to extract tokens of closed class pos tags such as conjunctions (e.g: [conjunction token in coptic treebank](https://github.com/UniversalDependencies/UD_Coptic-Scriptorium/blob/master/cop_scriptorium-ud-dev.conllu#L11))
This would only work for separable tokens but for clitics such as the determiner article `ⲩ` in [determiner token in coptic treebank](https://github.com/UniversalDependencies/UD_Coptic-Scriptorium/blob/master/cop_scriptorium-ud-dev.conllu#L13-L16) , it won't be easy to classify these sub-tokens as stopwords without having a word segmentation model for Coptic in cltk.
> On the other hand, I found that the library makes use of the universal dependencies treebank
> This would only work for separable tokens but for clitics such as the determiner article
> it won't be easy to classify these sub-tokens as stopwords without having a word segmentation mode
Your plan here might work, but I think it is preferable to find a stopwords list from another and start here.
For splitting words, this is a separate process that could/should be taken care of by the [CopticStanzaProcess](https://github.com/cltk/cltk/blob/dev/src/cltk/dependency/processes.py#L179).
@AMR-KELEG How about you raise an issue on their repo? https://github.com/cinkova/stopwoRds You can reference this issue here and ask for a plaintext version their Coptic stopwords.
> @AMR-KELEG How about you raise an issue on their repo? https://github.com/cinkova/stopwoRds You can reference this issue here and ask for a plaintext version their Coptic stopwords.
Let's hope we will get a response soon :sweat_smile:
https://github.com/computationalstylistics/tidystopwords/issues/7
@AMR-KELEG Good work!
Did the developer email you the list? | 2021-02-13T11:31:07 |
|
cltk/cltk | 1,095 | cltk__cltk-1095 | [
"1051"
] | c37f79983b80ec22eef1d66704596376a6eca99a | diff --git a/src/cltk/lemmatize/grc.py b/src/cltk/lemmatize/grc.py
--- a/src/cltk/lemmatize/grc.py
+++ b/src/cltk/lemmatize/grc.py
@@ -55,13 +55,13 @@ def __init__(
def _randomize_data(train: List[list], seed: int):
import random
-
random.seed(seed)
random.shuffle(train)
- pos_train_sents = train[:4000]
+ train_size = int(.9 * len(train))
+ pos_train_sents = train[:train_size]
lem_train_sents = [[(item[0], item[1]) for item in sent] for sent in train]
- train_sents = lem_train_sents[:4000]
- test_sents = lem_train_sents[4000:5000]
+ train_sents = lem_train_sents[:train_size]
+ test_sents = lem_train_sents[train_size:]
return pos_train_sents, train_sents, test_sents
diff --git a/src/cltk/lemmatize/lat.py b/src/cltk/lemmatize/lat.py
--- a/src/cltk/lemmatize/lat.py
+++ b/src/cltk/lemmatize/lat.py
@@ -541,13 +541,13 @@ def __init__(
def _randomize_data(train: List[list], seed: int):
import random
-
random.seed(seed)
random.shuffle(train)
- pos_train_sents = train[:4000]
+ train_size = int(.9 * len(train))
+ pos_train_sents = train[:train_size]
lem_train_sents = [[(item[0], item[1]) for item in sent] for sent in train]
- train_sents = lem_train_sents[:4000]
- test_sents = lem_train_sents[4000:5000]
+ train_sents = lem_train_sents[:train_size]
+ test_sents = lem_train_sents[train_size:]
return pos_train_sents, train_sents, test_sents
| UnigramLemmatizer.choose_tag returns '', not None, for certain words, short-circuiting BackoffGreekLemmatizer
**Describe the bug**
The [backoff Greek lemmatizer](https://docs.cltk.org/en/stable/greek.html#lemmatization-backoff-method) is supposed to use the following chain of sub-lemmatizers ([ref](https://github.com/cltk/cltk/blob/de3f4fb72ed557101538e215b0e8096ad7ccb811/cltk/lemmatize/greek/backoff.py#L56-L61)):
1. `DictLemmatizer`
2. `UnigramLemmatizer`
3. `RegexpLemmatizer`
4. `DictLemmatizer`
5. `IdentityLemmatizer`
The `SequentialBackoffLemmatizer` superclass tries each sub-lemmatizer in turn, moving to the next whenever one returns `None` ([ref](https://github.com/cltk/cltk/blob/98a64abf428643186d9b21de675597ecc456d28d/src/cltk/lemmatize/backoff.py#L104-L105)). But certain words cause `UnigramLemmatizer` to return `''`, not `None`, which causes `SequentialBackoffLemmatizer` to return `''` as the lemma, without trying `RegexpLemmatizer` and the other following sub-lemmatizers. One such word is `'διοτρεφές'`.
```
>>> from cltk.lemmatize.greek.backoff import BackoffGreekLemmatizer
>>> from cltk.corpus.utils.formatter import cltk_normalize
>>> word = cltk_normalize('διοτρεφές')
>>> lemmatizer = BackoffGreekLemmatizer()
>>> lemmatizer.lemmatize([word])
[('διοτρεφές', '')]
```
By walking the chain of sub-lemmatizers manually, we find that it is `UnigramLemmatizer` (`backoff4`) that returns an empty string. If it returned `None`, then backoff would continue and eventually find a lemma with `backoff2`.
```
>>> lemmatizer.backoff5.choose_tag([word], 0, None)
>>> lemmatizer.backoff4.choose_tag([word], 0, None)
''
>>> lemmatizer.backoff3.choose_tag([word], 0, None)
>>> lemmatizer.backoff2.choose_tag([word], 0, None)
'διοτρεφής'
>>> lemmatizer.backoff1.choose_tag([word], 0, None)
'διοτρεφές'
>>> lemmatizer.backoff4
<UnigramLemmatizer: CLTK Sentence Training Data>
```
I cannot find a place in the inheritance chain of `UnigramLemmatizer` (through [`UnigramTagger`](https://github.com/nltk/nltk/blob/3.5/nltk/tag/sequential.py#L326), [`NgramTagger`](https://github.com/nltk/nltk/blob/3.5/nltk/tag/sequential.py#L259), and [`ContextTagger`](https://github.com/nltk/nltk/blob/3.5/nltk/tag/sequential.py#L108)) that explicitly returns an empty string, so I suppose it must be happening somewhere in the model.
**To Reproduce**
Steps to reproduce the behavior:
1. Install Python version 3.7.3.
2. Install CLTK version 0.1.121 with greek_models_cltk at commit [a68b983734d34df16fd49661f11c4ea037ab173a](https://github.com/cltk/grc_models_cltk/tree/a68b983734d34df16fd49661f11c4ea037ab173a).
```
python3 -m venv venv
source venv/bin/activate
pip3 install cltk
python3 -c 'from cltk.corpus.utils.importer import CorpusImporter; CorpusImporter('greek').import_corpus('greek_models_cltk')'
```
3. In a script or REPL, run the following code:
```
>>> from cltk.lemmatize.greek.backoff import BackoffGreekLemmatizer
>>> from cltk.corpus.utils.formatter import cltk_normalize
>>> word = cltk_normalize('διοτρεφές')
>>> lemmatizer = BackoffGreekLemmatizer()
>>> lemmatizer.lemmatize([word])
```
4. See error:
```
[('διοτρεφές', '')]
```
**Expected behavior**
```
[('διοτρεφές', 'διοτρεφής')]
```
**Desktop (please complete the following information):**
Debian 10.7
**Additional context**
`UnigramLemmatizer` does not return `''` for all strings. For example, giving it a string of non-Greek text falls all the way through to `IdentityLemmatizer`, as expected.
```
>>> lemmatizer.lemmatize(['foo'])
[('foo', 'foo')]
```
This is how we are using the lemmatizer. We are inserting our own `DictLemmatizer` with corrections we have found in our corpus at the beginning of the backoff chain.
https://github.com/sasansom/sedes/blob/85ba9e2a2b5e9fbf52655368451b6057922582a6/src/lemma.py#L206-L216
| > so I suppose it must be happening somewhere in the model.
I found 57 entries in `UnigramLemmatizer._context_to_tag` (about 0.3% of 19459 entries total) that map to an empty string.
So it's looking like this issue may belong instead in https://github.com/cltk/grc_models_cltk. Should I copy it there, or is here the best place?
```
>>> from cltk.lemmatize.greek.backoff import BackoffGreekLemmatizer
>>> lemmatizer = BackoffGreekLemmatizer()
>>> for k in sorted([k for (k, v) in lemmatizer.backoff4._context_to_tag.items() if not v]): print(k)
20
;̓
·.
Αἰγούσσαις
Βαλιαρεῖς
Βριάρεων
Λυκουργίδας
Μέλητον
Μεργάνην
Νηληϊάδη
Πλάκῳ
Φθίης
α
αἱματη
β
βοῶπις
γαμψώνυχες
γεμόν
διοτρεφές
εὐ
εὐρυσθενές
εὐχερεί
ησυδουπιάπιτα
θαλείῃ
κεκλή
μελις
ξενοτί
ξψπρια
παθοῦς
πλεόνεσσιν
πρευμενεῖς
σβυτοδόκοι
στασινυς
τέξῃ
φέροιτο
φρ
ἀν
ἀνθεμίζομαι
ἀπε
ἀπρός
ἀρ
ἀῤ
ἀώρ
ἄ
ἄρκον
Ἀλευαδῶν
Ἀράξεω
Ἀρριβίου
ἐκικλή
ἐπάμιδα
ἐπελπομέν
ἑπτήρη
ἕπε
ἵ
Ἱππακρίτας
ὁμοί
Ὑκκάρου
```
Hi, Stephen. Thanks for all the detail and yes this is the right place to post. @diyclassics wrote this code and he's been pretty busy lately, so not sure the timeline of getting a fix in.
> I cannot find a place in the inheritance chain of UnigramLemmatizer (through UnigramTagger, NgramTagger, and ContextTagger) that explicitly returns an empty string, so I suppose it must be happening somewhere in the model.
[UnigramLemmatizer](https://github.com/cltk/cltk/blob/36a9feba26e71d5d311e34c009128e001feb2f5c/src/cltk/lemmatize/backoff.py#L209) yeah I skimmed a bit … there is a lot of inheritance going on here! I have the feeling you're right about this coming from the model
> I found 57 entries in UnigramLemmatizer._context_to_tag (about 0.3% of 19459 entries total) that map to an empty string.
Ah, you seem to have found it. I commend your bravery! What do you recommend should be done? Is there a clean way to overwrite the contents of the model? I have never inspected their contents.
> We are inserting our own DictLemmatizer with corrections we have found in our corpus at the beginning of the backoff chain.
Generally speaking, this is just how I recommend end users massage the outputs of many of our tools. We know these statistical methods create known-bad results and fixing them at the tail end is easier and more consistent than trying to manipulate training data to *maybe* come to the right answer.
This is interesting. Sometime we'd be very interested to know more of your scansion work and how it differs from our approaches (have some stuff for the German languages and Latin).
I am not Stephen (that's @sasansom), but David Fifield, his collaborator on the Sedes project.
> What do you recommend should be done? Is there a clean way to overwrite the contents of the model?
I was worried that the lemmatization failures might be something systemic—a failure to handle certain suffixes, for example. But that seems not to be the case. As long as it's just a small number of random words, it's something we can work around.
The training data for `UnigramLemmatizer` come from the file [greek_lemmatized_sents.pickle](https://github.com/cltk/grc_models_cltk/blob/master/lemmata/backoff/greek_lemmatized_sents.pickle) ([ref](https://github.com/cltk/cltk/blob/de3f4fb72ed557101538e215b0e8096ad7ccb811/cltk/lemmatize/greek/backoff.py#L29)). It appears the be a list of known lemmatization pairs, with context:
```
>>> with open(os.path.expanduser("~/cltk_data/greek/model/greek_models_cltk/lemmata/backoff/greek_lemmatized_sents.pickle"), "rb") as f:
... model = pickle.load(f)
>>> for entry in model[:4]: print(entry)
[('Κάδμου', 'Κάδμος'), ('πολῖται', 'πολίτης'), (',', ','), ('χρὴ', 'χρή'), ...]
[('εἰ', 'εἰ'), ('μὲν', 'μέν'), ('γὰρ', 'γάρ'), ('εὖ', 'εὖ'), ('πράξαιμεν', 'πράσσω'), ...]
[('εἰ', 'εἰ'), ('δ̓', 'δέ'), ('αὖθ̓', 'αὖτε'), (',', ','), ('ὃ', 'ὅς'), ('μὴ', 'μή'), ...]
[('ὑμᾶς', 'σύ'), ('δὲ', 'δέ'), ('χρὴ', 'χρή'), ('νῦν', 'νῦν'), (',', ','), ...]
```
I believe this list is then passed through [`ContextTagger._train`](https://github.com/nltk/nltk/blob/3.5/nltk/tag/sequential.py#L154), which counts frequencies and builds a map of words to their most frequent lemma.
Curiously, although there are 2 occurrences of `('διοτρεφές', '')` in the model, there are 3 of `('διοτρεφές', 'Διοτρεφής')` and 16 of `('διοτρεφές', 'διοτρεφής')`:
```
>>> collections.Counter((word, lemma) for line in model for word, lemma in line if word == "διοτρεφές")
Counter({('διοτρεφές', 'διοτρεφής'): 16, ('διοτρεφές', 'Διοτρεφής'): 3, ('διοτρεφές', ''): 2})
```
We would therefore expect the training process to arrive at `διοτρεφής` as the most frequent lemma of `διοτρεφές`. What I think is happening is the [pre-training subsetting step](https://github.com/cltk/cltk/blob/de3f4fb72ed557101538e215b0e8096ad7ccb811/cltk/lemmatize/greek/backoff.py#L40-L47) in `BackoffGreekLemmatizer` is just getting unlucky for this word and throwing away its most common lemmata.
So one way to deal with this, in the absence of more detailed information about where the greek_lemmatized_sents.pickle model comes from and how it came to contain empty strings, would be to have [`UnigramLemmatizer.__init__`](https://github.com/cltk/cltk/blob/98a64abf428643186d9b21de675597ecc456d28d/src/cltk/lemmatize/backoff.py#L214) filter out obviously bogus lemmata like `''` from its `train` argument.
Another way may be to remove the subsetting step from `BackoffGreekLemmatizer`, and hope that the numbers work out to settle on the correct lemmata. The subsetting, if I am reading it correctly, retains only 4000 word–lemma pairs, out of 549906 available. (It at first appears that the subsetting is randomized, however the randomization uses a [fixed seed](https://github.com/cltk/cltk/blob/de3f4fb72ed557101538e215b0e8096ad7ccb811/cltk/lemmatize/greek/backoff.py#L23), so it is actually deterministic.) But I would expect the removal of subsetting to have a large and unknown effect on the output lemmatization, as `UnigramLemmatizer` takes over a larger fraction of the input.
```
>>> sum(len(line) for line in model)
549906
```
Ah, David, my mistake, sorry. Really appreciate how carefully you traced this backwards.
> have UnigramLemmatizer.__init__ filter out obviously bogus lemmata like '' from its train argument
> Another way may be to remove the subsetting step from BackoffGreekLemmatizer
Am fine with either, depends on whether Patrick wants to retrain or not. He is looking at this now.
> So it's looking like this issue may belong instead in https://github.com/cltk/grc_models_cltk. Should I copy it there, or is here the best place?
Thank you for catching this—working on it now. You raise a number of good points that I am looking into.
First...
- I think you should copy over the issue of blank (i.e. '') lemmas to grc_models_cltk. I will update the token-lemma pair sentences; for now, I will just remove pairs with missing lemmas. (I will then turn some attention to putting together an updated list of sentences and releasing a new version of this lemmatizer.)
I will also make two updates to the lemmatizer code itself...
1. Look into adding a check for blank ('') lemmas to UnigramLemmatizer
2. Change the value in, as you refer to it, the subsetting step from 4000 (sentences, I believe—I will check) to 90% of the sents. (Also, when I have a chance to update the sentences, I can create an official train-test split in the models repo.)
Again, thank you for bringing these to our attention—looking forward to working with you all more on improving these tools/models.
@kylepjohnson Placeholder issue/comment—1. how should we version different tools/models within the language-specific model repos (like e.g. grc_models_cltk)?; 2. how should we coordinate versioning of the main repo with versioning of the corpora, models, etc.?
> 1. how should we version different tools/models within the language-specific model repos (like e.g. grc_models_cltk
Everything is in Git, so while not convenient, it does have an archive of all versions.
> 2. how should we coordinate versioning of the main repo with versioning of the corpora, models, etc.?
You could also consider tagging a branch w/ its corresponding cltk version number. However this is not convenient for you or the users. ...
Question @diyclassics will your fix be solely of the model? or will this involve a code fix, too? If both, then if you merge the pull requests at the same (models repo and this repo) and also push to pypi -- well, this will be in synch enough for me.
Am I following your question, Patrick?
Thinking aloud about two related but separate things—1. I am updating only *part* of the models repo, but updating the version of the *whole* models repo (whether in a git commit number or tag, etc.); making sure this is the best way to handle incremental changes to the datasets, models, etc.; 2. when the main repo refers to one of the corpus/model repos (e.g. with the default Backoff lemmatizers, or the corpus readers, etc.), is there any indication of versioning for the user, e.g. when the tool is called? (This is what I meant by 'coordinate'; didn't know what word fit best.) Just trying to anticipate problems that may arise from fixing the model, the code, or both.
> is there any indication of versioning for the user, e.g. w
I think you're asking whether we have *model versioning* to sync up w/ the *software versioning* of the core code. The answer is NO, only that we keep the models dir up-to-date w/ whatever the latest prod version is. Yes, this is definitely not ideal … there's a cost (measured in time) to do such model versioning that we haven't been willing to pay … absolutely, it's worth figuring out.
A cheap trick could be put a model version into the model's filename (`lat_lemmatizer_v0.0.127.pickle`) and then somehow this is hardcoded into the code. The burden falls from the users to us (nothing wrong with that, just not sure we have the bandwidth to deal with the upkeep and breakage).
Heh look at me, now I'm the one thinking aloud.
FYI https://github.com/cltk/cltk/issues/988#issuecomment-787486041
@whoopsedesy @diyclassics Is there an update on this? I don't want the ticket to linger, especially the error results from an implementation choice. But of course, we very much want the patch if this will affect other users in the future. | 2021-04-14T01:08:04 |
|
cltk/cltk | 1,116 | cltk__cltk-1116 | [
"1113"
] | 5dbfcf6fccade146d322cae036b35533aec59286 | diff --git a/src/cltk/lexicon/lat.py b/src/cltk/lexicon/lat.py
--- a/src/cltk/lexicon/lat.py
+++ b/src/cltk/lexicon/lat.py
@@ -47,6 +47,8 @@ def lookup(self, lemma: str) -> str:
>>> lll = LatinLewisLexicon()
>>> lll.lookup("clemens")[:50]
'clēmēns entis (abl. -tī; rarely -te, L.), adj. wit'
+ >>> all(word in lll.lookup("levis") for word in ["levis","lēvis"]) # Test for concatenated entries
+ True
>>> lll.lookup("omnia")
''
>>> lll.lookup(".")
@@ -55,6 +57,8 @@ def lookup(self, lemma: str) -> str:
''
>>> lll.lookup("175.")
''
+ >>> lll.lookup("(") # Test for regex special character
+ ''
"""
if not self.entries:
raise CLTKException(
@@ -64,7 +68,7 @@ def lookup(self, lemma: str) -> str:
if regex.match(r"^[0-9\.\?,\:;\!\<\>\-]*$", lemma) is not None:
return ""
- lemma = lemma.lower()
+ lemma = regex.escape(lemma.lower())
keys = self.entries.keys()
matches = [key for key in keys if regex.match(rf"^{lemma}[0-9]?$", key)]
| Lexicon process for Latin fails on regex special characters
LatinLexiconProcess fails when regex special characters, e.g. single open parenthesis (i.e. ```(```) are included in tokenized input. Occurred while running MacOS 11.4; Python 3.9.5; CLTK 1.0.15; regex 2021.4.4 (but should fail in any case when this input is passed to the regex module). The solution is to escape the input ```lemma``` before running ```regex.match``` at https://github.com/cltk/cltk/blob/5dbfcf6fccade146d322cae036b35533aec59286/src/cltk/lexicon/lat.py#L70
I have written the patch and will make a PR soon.
Example and traceback:
```
from cltk import NLP
text = "Omnes igitur partes mundi (tangam autem maximas) calore fultae sustinentur." # Cic. Nat. D. 2.25
cltk_nlp = NLP(language="lat")
cltk_doc = cltk_nlp.analyze(text=test)
```
```
Traceback (most recent call last):
File "test.py", line 4, in <module>
cltk_doc = cltk_nlp.analyze(text=text)
File "[PATH]/lib/python3.9/site-packages/cltk/nlp.py", line 142, in analyze
doc = a_process.run(doc)
File "[PATH]/lib/python3.9/site-packages/cltk/lexicon/processes.py", line 45, in run
word.definition = lookup_algo.lookup(word.lemma)
File "[PATH]/lib/python3.9/site-packages/cltk/lexicon/lat.py", line 70, in lookup
matches = [key for key in keys if regex.match(rf"^{lemma}[0-9]?$", key)]
File "[PATH]/lib/python3.9/site-packages/cltk/lexicon/lat.py", line 70, in <listcomp>
matches = [key for key in keys if regex.match(rf"^{lemma}[0-9]?$", key)]
File "[PATH]/lib/python3.9/site-packages/regex/regex.py", line 253, in match
pat = _compile(pattern, flags, ignore_unused, kwargs, True)
File "[PATH]/lib/python3.9/site-packages/regex/regex.py", line 532, in _compile
raise error(caught_exception.msg, caught_exception.pattern,
regex._regex_core.error: missing ) at position 9
```
| The solution is to refuse special characters for regular expressions in the `lemma` string. | 2021-07-03T17:12:10 |
|
cltk/cltk | 1,146 | cltk__cltk-1146 | [
"1145"
] | 1a8c2f5ef72389e2579dfce1fa5af8e59ebc9ec1 | diff --git a/src/cltk/tokenizers/lat/lat.py b/src/cltk/tokenizers/lat/lat.py
--- a/src/cltk/tokenizers/lat/lat.py
+++ b/src/cltk/tokenizers/lat/lat.py
@@ -59,6 +59,9 @@ def tokenize(
>>> toker.tokenize('Cicero dixit orationem pro Sex. Roscio')
['Cicero', 'dixit', 'orationem', 'pro', 'Sex.', 'Roscio']
+ >>> toker.tokenize('nihilne te nocturnum praesidium Palati')
+ ['nihil', '-ne', 'te', 'nocturnum', 'praesidium', 'Palati']
+
>>> toker.tokenize('Cenavin ego heri in navi in portu Persico?')
['Cenavi', '-ne', 'ego', 'heri', 'in', 'navi', 'in', 'portu', 'Persico', '?']
@@ -141,9 +144,7 @@ def replace(matching):
else:
specific_tokens += [token[: -len(enclitic)]] + ["est"]
else:
- specific_tokens += [token[: -len(enclitic)]] + [
- "-" + enclitic
- ]
+ specific_tokens += [token]
is_enclitic = True
break
if not is_enclitic:
| Hyphen added when separating 'ne' enclitics at sentence start
**Describe the bug**
Hyphen added when separating '-ne' enclitics at sentence start
**To Reproduce**
Python 3.9.9; CLTK 1.0.22
```
test = 'nihilne te nocturnum praesidium Palati'
t = LatinWordTokenizer()
print(t.tokenize(test))
```
Current output:
['nihil', '-', '-ne', 'te', 'nocturnum', 'praesidium', 'Palati']
4. See error (include literal copy-paste)
Expected output:
['nihil', '-ne', 'te', 'nocturnum', 'praesidium', 'Palati']
**Desktop (please complete the following information):**
- MacOS 12.1
**Additional context**
Issue is that there is a redundant hyphen being added in cases where '-ne' is separated as part of the first token in a sentence. I will submit a PR to fix this.
| 2022-01-11T21:13:05 |
||
cltk/cltk | 1,257 | cltk__cltk-1257 | [
"1250"
] | 8ea1b2656aa1c686d7fee3259c93a546b782a722 | diff --git a/src/cltk/dependency/processes.py b/src/cltk/dependency/processes.py
--- a/src/cltk/dependency/processes.py
+++ b/src/cltk/dependency/processes.py
@@ -320,7 +320,6 @@ def spacy_to_cltk_word_type(spacy_doc: spacy.tokens.doc.Doc):
]
cltk_word.features = MorphosyntacticFeatureBundle(*cltk_features)
cltk_word.category = to_categorial(cltk_word.pos)
- cltk_word.spacy_features = spacy_word.morph
sent_words[cltk_word.index_token] = cltk_word
words_list.append(cltk_word)
return words_list
| Cannot pickle CLTK Doc containing certain data types from Spacy
**Is your feature request related to a problem? Please describe.**
I'm always frustrated when my program crashes and I lose all of the `Doc`s I generated.
**Describe the solution you'd like**
I would like to be able to serialize (or somehow save) `Doc`s.
| Are you familiar with storing results as a pickle file? https://realpython.com/lessons/using-pickle-module/
When I am processing many documents with the cltk, I try to run them one-by-one and save that doc to disk.
Feel free to send a code snippet of what you’re trying to do.
Feb 18, 2024 at 12:59 by ***@***.***:
>
>
>
>
>
> Is your feature request related to a problem? Please describe.
> I'm always frustrated when my program crashes and I lose all of the > Doc> s I generated.
>
>
>
>
> Describe the solution you'd like
> I would like to be able to serialize (or somehow save) > Doc> s.
>
>
>
>
> —
> Reply to this email directly, > view it on GitHub <https://github.com/cltk/cltk/issues/1250>> , or > unsubscribe <https://github.com/notifications/unsubscribe-auth/AAOE36B5KOKVDK5ZK4KREGTYUJTTBAVCNFSM6AAAAABDON6UTSVHI2DSMVQWIX3LMV43ASLTON2WKOZSGE2DCMJYGEZTMNI>> .
> You are receiving this because you are subscribed to this thread.> Message ID: > <cltk/cltk/issues/1250> @> github> .> com>
>
>
>
@kylepjohnson I actually tried exactly what you suggest. Here's a snippet that illustrates my problem (let me know if you would like more of my code):
```
# downloaded files of lat_text_latin_library to corpus_files above
cltk_nlp = NLP(language="lat", suppress_banner=True)
with open(corpus_files[i], "rt", encoding="utf8") as file:
cltk_doc = cltk_nlp.analyze(text=file.read())
pickled_cltk_doc = pickle.dumps(cltk_doc)
b = pickle.load(pickled_cltk_doc)
```
When I tried to do this I tried to dump `pickled_cltk_doc` directly do its own file, but I get the same error here, so the example suffices:
```
pickled_cltk_doc = pickle.dumps(cltk_doc)
^^^^^^^^^^^^^^^^^^^^^^
File "stringsource", line 2, in spacy.tokens.morphanalysis.MorphAnalysis.__reduce_cython__
TypeError: self.c cannot be converted to a Python object for pickling
```
I just assumed that there was something about `Doc`s that wasn't serializable. Is this supposed to work? Am I doing something wrong?
Ahh, this is a new one. I can confirm that I get the same error (it comes from spacy): https://github.com/explosion/spaCy/issues/7644
``` python
>>> pickle.dumps(doc)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "stringsource", line 2, in spacy.tokens.morphanalysis.MorphAnalysis.__reduce_cython__
TypeError: self.c cannot be converted to a Python object for pickling
```
@clemsciences have you ever encountered this with Python?
@mwitz8 if this is a deal-breaker for you, you can use the the Stanza model (which is still quite good):
``` python
>>> nlp.pipeline.processes[1]
<class 'cltk.dependency.processes.LatinSpacyProcess'>
>>> from cltk.dependency.processes import LatinStanzaProcess
>>> nlp.pipeline.processes[1] = LatinStanzaProcess
>>> nepos = "Idem cum prope ad annum octogesimum prospera pervenisset fortuna, extremis temporibus magnum in odium pervenit suorum civium, primo quod cum Demade de urbe tradenda Antipatro consenserat eiusque consilio Demosthenes cum ceteris, qui bene de re publica meriti existimabantur, populi scito in exsilium erant expulsi."
>>> doc = nlp.analyze(nepos)
>>> pickle.dumps(doc) # works ok
```
> @mwitz8 if this is a deal-breaker for you, you can use the the Stanza model (which is still quite good):
The Stanza model works for what I need. Thanks!
Thank you for bringing this to our attention. Someone will need to dig into this. Likely, the answer will be to transform native Spacy data (regarding only morphology, I think) into strings, before writing its data to the CLTK's `Doc` type. I will leave this issue open to remind the world that we need to fix this sooner than later.
I'm going to work on this issue this week.
Thank you, Clément. This is a serious bug for many users (I always write my processed Doc’s as pickles.)
Apr 22, 2024 at 14:24 by ***@***.***:
>
>
>
>
> I'm going to work on this issue this week.
>
>
>
>
> —
> Reply to this email directly, > view it on GitHub <https://github.com/cltk/cltk/issues/1250#issuecomment-2070977798>> , or > unsubscribe <https://github.com/notifications/unsubscribe-auth/AAOE36D3DRB5AQVVSHTDTHDY6V52PAVCNFSM6AAAAABDON6UTSVHI2DSMVQWIX3LMV43OSLTON2WKQ3PNVWWK3TUHMZDANZQHE3TONZZHA>> .
> You are receiving this because you were mentioned.> Message ID: > <cltk/cltk/issues/1250/2070977798> @> github> .> com>
>
>
>
>
My first investigation is showing that the attribute `spacy_features` of `Word` (coming from the attribute `morph` of `spacy.tokens.Token`) added in the `SpacyProcess` is not serializable. This attribute has two methods that are usable to serialize it: `to_json` and `to_dict`.
Overriding `__get_state__` and `__set_state__` may fix the problem (see https://docs.python.org/3/library/pickle.html#pickling-class-instances).
A `Token` is explicitly expected not to be serialized.

However, `Token.morph`, which is a [`MorphAnalysis`](https://spacy.io/api/morphology#morphanalysis), fails to be serialized.

It must be said that it has a `vocab` attribute that should not be serialized. It also has a [`to_dict`](https://spacy.io/api/morphology#morphanalysis-to_dict) method that helps serialize, but I'm not sure how to deserialize it.
Should I open a new issue on https://github.com/explosion/spaCy?
What if you try `del token.morph` or `setattr()`? We need this info while writing spacy’s data to our Doc, however once we have it we do not need to save that info anymore.
You could ask the project but it’s so big you might not get a response. It’s definitely a bug, I once saw an Issue about it.
Apr 25, 2024 at 14:20 by ***@***.***:
>
>
>
>
>
> A > Token> is explicitly expected not to be serialized.
> > image.png (view on web) <https://github.com/cltk/cltk/assets/13933660/0546e81f-8631-4f2e-8f67-87fe02ac3ae5>
>
>
>
>
> However, > Token.morph> , which is a > MorphAnalysis <https://spacy.io/api/morphology#morphanalysis>> , fails to be serialized.
> > image.png (view on web) <https://github.com/cltk/cltk/assets/13933660/9a1857bd-84cc-443e-a68f-9d40658496b5>
>
>
>
> It must be said that it has a > vocab> attribute that should not be serialized. It also has a > to_dict <https://spacy.io/api/morphology#morphanalysis-to_dict>> method that helps serialize, but I'm not sure how to deserialize it.
>
>
> Should I open a new issue on > https://github.com/explosion/spaCy> ?
>
>
>
> —
> Reply to this email directly, > view it on GitHub <https://github.com/cltk/cltk/issues/1250#issuecomment-2078195153>> , or > unsubscribe <https://github.com/notifications/unsubscribe-auth/AAOE36BJPD3E6EAQ4I47ZJDY7FXSDAVCNFSM6AAAAABDON6UTSVHI2DSMVQWIX3LMV43OSLTON2WKQ3PNVWWK3TUHMZDANZYGE4TKMJVGM>> .
> You are receiving this because you were mentioned.> Message ID: > <cltk/cltk/issues/1250/2078195153> @> github> .> com>
>
>
>
In the `SpacyProcess`,
```python
def spacy_to_cltk_word_type(spacy_doc: spacy.tokens.doc.Doc):
"""...
"""
words_list: list[Word] = []
for sentence_index, sentence in enumerate(spacy_doc.doc.sents):
sent_words: dict[int, Word] = {}
for spacy_word in sentence:
pos: Optional[MorphosyntacticFeature] = None
if spacy_word.pos_:
pos = from_ud("POS", spacy_word.pos_)
cltk_word = Word(
# Note: In order to match how Stanza orders token output
# (index starting at 1, not 0), we must add an extra 1 to each
index_token=spacy_word.i + 1,
index_char_start=spacy_word.idx,
index_char_stop=spacy_word.idx + len(spacy_word),
index_sentence=sentence_index,
string=spacy_word.text, # same as ``token.text``
pos=pos,
xpos=spacy_word.tag_,
upos=spacy_word.pos_,
lemma=spacy_word.lemma_,
dependency_relation=spacy_word.dep_, # str
stop=spacy_word.is_stop,
# Note: Must increment this, too
governor=spacy_word.head.i + 1, # TODO: Confirm this is the index
)
raw_features: list[tuple[str, str]] = (
[
(feature, value)
for feature, value in spacy_word.morph.to_dict().items()
]
if spacy_word.morph
else []
)
cltk_features = [
from_ud(feature_name, feature_value)
for feature_name, feature_value in raw_features
]
cltk_word.features = MorphosyntacticFeatureBundle(*cltk_features)
cltk_word.category = to_categorial(cltk_word.pos)
cltk_word.spacy_features = spacy_word.morph
sent_words[cltk_word.index_token] = cltk_word
words_list.append(cltk_word)
return words_list
```
`cltk_word.features` already has all the info contained in `spacy_word.morph`. I suggest that we remove `cltk_word.spacy_features` because it's redundant with `cltk_word.features`. | 2024-05-01T22:32:34 |
|
pypa/cibuildwheel | 32 | pypa__cibuildwheel-32 | [
"7"
] | 0facf0ae1205a95cedc20cdb9e59a1b5e081a0c2 | diff --git a/cibuildwheel/macos.py b/cibuildwheel/macos.py
--- a/cibuildwheel/macos.py
+++ b/cibuildwheel/macos.py
@@ -19,10 +19,14 @@ def build(project_dir, package_name, output_dir, test_command, test_requires, be
PythonConfiguration(version='3.6', identifier='cp36-macosx_10_6_intel', url='https://www.python.org/ftp/python/3.6.0/python-3.6.0-macosx10.6.pkg'),
]
- def shell(args, env=None, cwd=None):
+ def call(args, env=None, cwd=None, shell=False):
# print the command executing for the logs
- print('+ ' + ' '.join(shlex_quote(a) for a in args))
- return subprocess.check_call(args, env=env, cwd=cwd)
+ if shell:
+ print('+ %s' % args)
+ else:
+ print('+ ' + ' '.join(shlex_quote(a) for a in args))
+
+ return subprocess.check_call(args, env=env, cwd=cwd, shell=shell)
for config in python_configurations:
if skip(config.identifier):
@@ -30,9 +34,9 @@ def shell(args, env=None, cwd=None):
continue
# download the pkg
- shell(['curl', '-L', '-o', '/tmp/Python.pkg', config.url])
+ call(['curl', '-L', '-o', '/tmp/Python.pkg', config.url])
# install
- shell(['sudo', 'installer', '-pkg', '/tmp/Python.pkg', '-target', '/'])
+ call(['sudo', 'installer', '-pkg', '/tmp/Python.pkg', '-target', '/'])
env = os.environ.copy()
env['PATH'] = os.pathsep.join([
@@ -44,14 +48,14 @@ def shell(args, env=None, cwd=None):
pip = 'pip3' if config.version[0] == '3' else 'pip2'
# check what version we're on
- shell(['which', python], env=env)
- shell([python, '--version'], env=env)
+ call(['which', python], env=env)
+ call([python, '--version'], env=env)
# install pip & wheel
- shell([python, '-m', 'ensurepip', '--upgrade'], env=env)
- shell([pip, '--version'], env=env)
- shell([pip, 'install', 'wheel'], env=env)
- shell([pip, 'install', 'delocate'], env=env)
+ call([python, '-m', 'ensurepip', '--upgrade'], env=env)
+ call([pip, '--version'], env=env)
+ call([pip, 'install', 'wheel'], env=env)
+ call([pip, 'install', 'delocate'], env=env)
# setup dirs
if os.path.exists('/tmp/built_wheel'):
@@ -64,10 +68,10 @@ def shell(args, env=None, cwd=None):
# run the before_build command
if before_build:
before_build_prepared = prepare_command(before_build, python=python, pip=pip)
- shell(shlex.split(before_build_prepared), env=env)
+ call(before_build_prepared, env=env, shell=True)
# build the wheel
- shell([pip, 'wheel', project_dir, '-w', '/tmp/built_wheel', '--no-deps'], env=env)
+ call([pip, 'wheel', project_dir, '-w', '/tmp/built_wheel', '--no-deps'], env=env)
built_wheel = glob('/tmp/built_wheel/*.whl')[0]
if built_wheel.endswith('none-any.whl'):
@@ -75,24 +79,24 @@ def shell(args, env=None, cwd=None):
shutil.move(built_wheel, '/tmp/delocated_wheel')
else:
# list the dependencies
- shell(['delocate-listdeps', built_wheel], env=env)
+ call(['delocate-listdeps', built_wheel], env=env)
# rebuild the wheel with shared libraries included and place in output dir
- shell(['delocate-wheel', '-w', '/tmp/delocated_wheel', built_wheel], env=env)
+ call(['delocate-wheel', '-w', '/tmp/delocated_wheel', built_wheel], env=env)
delocated_wheel = glob('/tmp/delocated_wheel/*.whl')[0]
# install the wheel
- shell([pip, 'install', delocated_wheel], env=env)
+ call([pip, 'install', delocated_wheel], env=env)
# test the wheel
if test_requires:
- shell([pip, 'install'] + test_requires, env=env)
+ call([pip, 'install'] + test_requires, env=env)
if test_command:
# run the tests from $HOME, with an absolute path in the command
# (this ensures that Python runs the tests against the installed wheel
# and not the repo code)
abs_project_dir = os.path.abspath(project_dir)
test_command_absolute = test_command.format(project=abs_project_dir)
- shell(shlex.split(test_command_absolute), cwd=os.environ['HOME'], env=env)
+ call(shlex.split(test_command_absolute), cwd=os.environ['HOME'], env=env)
# we're all done here; move it to output
shutil.move(delocated_wheel, output_dir)
| diff --git a/test/03_before_build/environment.json b/test/03_before_build/environment.json
--- a/test/03_before_build/environment.json
+++ b/test/03_before_build/environment.json
@@ -1,4 +1,4 @@
{
- "CIBW_BEFORE_BUILD": "{python} -c \"import sys; open('/tmp/pythonversion.txt', 'w').write(sys.version)\"",
- "CIBW_BEFORE_BUILD_WINDOWS": "{python} -c \"import sys; open('c:\\pythonversion.txt', 'w').write(sys.version)\""
+ "CIBW_BEFORE_BUILD": "{python} -c \"import sys; open('/tmp/pythonversion.txt', 'w').write(sys.version)\" && {python} -c \"import sys; open('/tmp/pythonexecutable.txt', 'w').write(sys.executable)\"",
+ "CIBW_BEFORE_BUILD_WINDOWS": "{python} -c \"import sys; open('c:\\pythonversion.txt', 'w').write(sys.version)\" && {python} -c \"import sys; open('c:\\pythonexecutable.txt', 'w').write(sys.executable)\""
}
diff --git a/test/03_before_build/setup.py b/test/03_before_build/setup.py
--- a/test/03_before_build/setup.py
+++ b/test/03_before_build/setup.py
@@ -1,8 +1,8 @@
from setuptools import setup, Extension
-import sys
+import sys, os
if sys.argv[-1] != '--name':
- # assert that the Python version as written to version.txt in the CIBW_BEFORE_BUILD step
+ # assert that the Python version as written to pythonversion.txt in the CIBW_BEFORE_BUILD step
# is the same one as is currently running.
version_file = 'c:\\pythonversion.txt' if sys.platform == 'win32' else '/tmp/pythonversion.txt'
with open(version_file) as f:
@@ -11,6 +11,15 @@
print('sys.version', sys.version)
assert stored_version == sys.version
+ # check that the executable also was written
+ executable_file = 'c:\\pythonexecutable.txt' if sys.platform == 'win32' else '/tmp/pythonexecutable.txt'
+ with open(executable_file) as f:
+ stored_executable = f.read()
+ print('stored_executable', stored_executable)
+ print('sys.executable', sys.executable)
+ # windows/mac are case insensitive
+ assert os.path.realpath(stored_executable).lower() == os.path.realpath(sys.executable).lower()
+
setup(
name="spam",
ext_modules=[Extension('spam', sources=['spam.c'])],
| Run shell commands in a system shell rather than directly
This would allow things like `CIBW_BEFORE_BUILD=command && command2`
| 2017-09-06T21:39:16 |
|
pypa/cibuildwheel | 76 | pypa__cibuildwheel-76 | [
"75"
] | 8800ea514b3c16b5a32d08bf958d483ad9ba4cbc | diff --git a/cibuildwheel/windows.py b/cibuildwheel/windows.py
--- a/cibuildwheel/windows.py
+++ b/cibuildwheel/windows.py
@@ -40,6 +40,8 @@ def shell(args, env=None, cwd=None):
PythonConfiguration(version='3.5.x', arch="64", identifier='cp35-win_amd64', path='C:\Python35-x64'),
PythonConfiguration(version='3.6.x', arch="32", identifier='cp36-win32', path='C:\Python36'),
PythonConfiguration(version='3.6.x', arch="64", identifier='cp36-win_amd64', path='C:\Python36-x64'),
+ PythonConfiguration(version='3.7.x', arch="32", identifier='cp37-win32', path='C:\Python37'),
+ PythonConfiguration(version='3.7.x', arch="64", identifier='cp37-win_amd64', path='C:\Python37-x64'),
]
abs_project_dir = os.path.abspath(project_dir)
@@ -50,6 +52,10 @@ def shell(args, env=None, cwd=None):
if skip(config.identifier):
print('cibuildwheel: Skipping build %s' % config.identifier, file=sys.stderr)
continue
+
+ # check python & pip exist for this configuration
+ assert os.path.exists(os.path.join(config.path, 'python.exe'))
+ assert os.path.exists(os.path.join(config.path, 'Scripts', 'pip.exe'))
# setup dirs
if os.path.exists(built_wheel_dir):
| Windows wheels are not built for python 3.7
On Linux wheels are correctly built for all supported python versions. Windows however stops at python 3.6.
Looking at windows.py there seem to be no references to python 3.7
| 2018-06-30T15:46:59 |
||
pypa/cibuildwheel | 88 | pypa__cibuildwheel-88 | [
"87"
] | 332b8ec6acfaaa99659a3ecbce464168c9169086 | diff --git a/cibuildwheel/__main__.py b/cibuildwheel/__main__.py
--- a/cibuildwheel/__main__.py
+++ b/cibuildwheel/__main__.py
@@ -108,9 +108,10 @@ def main():
print('cibuildwheel: Could not find setup.py at root of project', file=sys.stderr)
exit(2)
else:
+ print(err.output)
print('cibuildwheel: Failed to get name of the package. Command was %s' % err.cmd,
file=sys.stderr)
- exit(2)
+ exit(err.returncode)
if package_name == '' or package_name == 'UNKNOWN':
print('cibuildwheel: Invalid package name "%s". Check your setup.py' % package_name,
| 'wheel' missing from setup.py install_requires list?
While trying to use a vanilla cibuildwheel configuration with AppVeyor, I kept running into this error,
```
cibuildwheel: Failed to get name of the package. Command was ['c:\\python27\\python.exe', '.\\setup.py', '--name']
```
It was only after running that command (`python .\\setup.py --name`) ahead of the `cibuildwheel --output-dir wheelhouse` that I finally got a clue of what was going on,
```
setup requires that the Python package 'wheel' be installed. Try the command 'pip install wheel'.
```
To resolve this problem I've added `'wheel'` to [setup.py `install_requires` list](https://github.com/joerick/cibuildwheel/blob/master/setup.py#L12) in our fork.
I think that two changes are necessary:
1. On [line 111 of `__main__.py`](https://github.com/joerick/cibuildwheel/blob/master/cibuildwheel/__main__.py#L111), print the **`output`** of [CalledProcessError](https://docs.python.org/2/library/subprocess.html#subprocess.CalledProcessError), and exit with the value of **`returncode`**.
2. Add `'wheel'` to [setup.py `install_requires` list](https://github.com/joerick/cibuildwheel/blob/master/setup.py#L12)
Do you want a PR?
| Hey miguelsousa! Thanks for the debugging. Would love a PR for [1] above. Regarding [2], can you see where that error message is coming from? Perhaps there's something about your project's `setup.py` that requires it? | 2018-08-21T21:12:46 |
|
pypa/cibuildwheel | 129 | pypa__cibuildwheel-129 | [
"122"
] | 8523b25a18beb3e58399b9a17ca8fbdda21abdd9 | diff --git a/cibuildwheel/macos.py b/cibuildwheel/macos.py
--- a/cibuildwheel/macos.py
+++ b/cibuildwheel/macos.py
@@ -86,8 +86,7 @@ def call(args, env=None, cwd=None, shell=False):
# install pip & wheel
call(['python', get_pip_script, '--no-setuptools', '--no-wheel'], env=env)
call(['pip', '--version'], env=env)
- # sudo required, because the removal of the old version of setuptools might cause problems with newer pip versions (see issue #122)
- call(['sudo', 'pip', 'install', '--upgrade', 'setuptools'], env=env)
+ call(['pip', 'install', '--upgrade', 'setuptools'], env=env)
call(['pip', 'install', 'wheel'], env=env)
call(['pip', 'install', 'delocate'], env=env)
| MacOS travis-ci build stalls
I noticed today that our MacOS travis-CI build using cibuildwheel has started stalling at the following point of the cibuildwheel setup:
```bash
+ pip install --upgrade setuptools
Collecting setuptools
Downloading https://files.pythonhosted.org/packages/37/06/754589caf971b0d2d48f151c2586f62902d93dc908e2fd9b9b9f6aa3c9dd/setuptools-40.6.3-py2.py3-none-any.whl (573kB)
Installing collected packages: setuptools
Found existing installation: setuptools 28.8.0
Uninstalling setuptools-28.8.0:
No output has been received in the last 10m0s, this potentially indicates a stalled build or something wrong with the build itself.
Check the details on how to adjust your build configuration on: https://docs.travis-ci.com/user/common-build-problems/#Build-times-out-because-no-output-was-received
The build has been terminated
```
This hasn't affected our Windows/Linux builds, and no changes to our devops pipeline has occurred (and even then, only superficial Python commits in our codebase were committed).
This issue happens no matter how many times we restart the build, and seems odd - this step is usually instantaneous on previous Mac cibuildwheel builds.
Since this is a command that is called by cibuildwheel, has there been a recent change that breaks this step?
| I can reproduce this on my build as well: https://travis-ci.org/YannickJadoul/Parselmouth/jobs/483834720, but I have no clue what's going on.
Let's run a branch with verbosity flags for this command? Unless anyone has better suggestions?
Weird, Travis builds of cibuildwheel fail in the exact same way, and `-vvv` does not provide any more insight: https://travis-ci.org/YannickJadoul/cibuildwheel/jobs/483882146#L3949
The problem seems to be with Python 3.4, though, because the Python 2.7 does work. Is this the same for you, @josh146?
Installing `pip<19` seems to work, so I'm guessing it's a problem with pip and Python 3.4: https://github.com/YannickJadoul/cibuildwheel/commit/f876231da4adeca96636411da067fc234733a6f5 and https://travis-ci.org/YannickJadoul/cibuildwheel/builds/483890015
Not sure what we can do about it though. This fix seems very ad-hoc and ugly?
This seems to be the same/a similar issue: https://github.com/pypa/pip/issues/6169
It stalls too with Python3.5 https://travis-ci.org/cds-astro/cds-healpix-python/jobs/483904177 . Seems to be a problem with pip 19.0.1
I'm not using either Python 2.7 _or_ Python 3.4, but I see the issue on Python 3.5
Ah, weird that it did work with 2.7 in those builds, then; but thanks for the confirmation.
If you need a quick patch, https://github.com/YannickJadoul/cibuildwheel/commit/f876231da4adeca96636411da067fc234733a6f5 will fix it on macOS (though the title of the commit is not correct anymore, after amending the commit); but I assume a fix will be released soon enough by `pip`?
EDIT: Although, maybe it is an interesting option to provide a release with this small addtion, since all builds using `cibuildwheel` are now failing. What do you think, @joerick?
From further reading, it seems like this bug occurs when upgrading a package that was previously installed via `sudo`, but is attempting to be upgraded without `sudo`. Previous versions of pip would silently fail. Maybe this is why it only affects the MacOS builds?
Ooh, nice insight @josh146.
Sorry I'm away on holiday so unable to help with this! If this is breaking builds now, I think a sensible approach would be a patch to install pip==18.1 for now, and then wait to see how the bug shakes out. Maybe we'll have to change which commands have sudo down the line, but let's wait and see. I think it was working before!
(Side note: I'm thinking about pinning all the versions of pip/setuptools/wheel to give better stability and build repeatability. We can upgrade them when we need features or to fix reported bugs. Thoughts?)
Sent from my phone
> On 26 Jan 2019, at 07:12, Josh Izaac <[email protected]> wrote:
>
> From further reading, it seems like this bug occurs when upgrading a package that was previously installed via sudo, but is attempting to be upgraded without sudo. Previous versions of pip would silently fail. Maybe this is why it only affects the MacOS builds?
>
> —
> You are receiving this because you were mentioned.
> Reply to this email directly, view it on GitHub, or mute the thread.
I noticed the same problem (pip freezing on upgrading without sudo) on Ubuntu PC (16.04), so it seems that missing sudo is the problem.
@matkoniecz I'm still failing to reproduce the problem manually; I'm only seeing it on the online build :-(
Is there anything in particular you do to trigger the problem on Ubuntu? Is `pip` installed with/without sudo, locally/globally? Can I do it for any package?
I've just tried installing `cibuildwheel<0.10` with sudo, then updating to the latest `cibuildwheel`, but that does not seem to trigger it on my Ubuntu 18.04 machine.
(Just wondering, because the issue on the `pip` repo does not seem to advance a lot. So I'm wondering if I could debug/help reporting more details there.)
I get stall with `pip install --upgrade pillow` later `sudo pip install --upgrade pillow` worked fine.
I will try whatever it is reproducible.
Thanks for that! When I do this, I'm getting nice errors, though:
```
~$ pip3 install --upgrade pillow
Collecting pillow
Using cached https://files.pythonhosted.org/packages/85/5e/e91792f198bbc5a0d7d3055ad552bc4062942d27eaf75c3e2783cf64eae5/Pillow-5.4.1-cp36-cp36m-manylinux1_x86_64.whl
Installing collected packages: pillow
Found existing installation: Pillow 5.3.0
Uninstalling Pillow-5.3.0:
Could not install packages due to an EnvironmentError: [Errno 13] Permission denied: 'ImageDraw.py'
Consider using the `--user` option or check the permissions.
```
I believe I've figured out the problem, indeed something with permissions: see https://github.com/pypa/pip/issues/6169
I think this means we should just use `sudo`, in our case, but I'll have a closer look tomorrow.
Meanwhile, I'd suggest using `pip install git+https://github.com/YannickJadoul/cibuildwheel.git@pip-19-stalling-workaround` (to install this branch using pip 18: https://github.com/YannickJadoul/cibuildwheel/tree/debug-issue-122)
@joerick Enjoy your holiday! I don't know if I have a strong opinion about pinning them. In general I'd say we haven't had a lot of problems with these packages (and if we do, the rest of the world also has them), and maybe we'd like to be in on new `pip` features without extra work/releases on `cibuildwheel`?
@YannickJadoul Thanks for checking this! Let me know if further testing would be useful.
@YannickJadoul amazing! I had stumbled on another GitHub issue that mentioned it was something to do with permissions, but for the life of me couldn't find it again - hence why my second comment was so vague.
@josh146 Actually, your comment was the start of my debugging. Otherwise I'd have had absolutely no clue where to start. So thanks for that :-)
And in the end, it seems that adding `sudo` will indeed be the solution, so you were spot on.
@matkoniecz Thanks for that! I'll check this hypotheses tomorrow (this should be testable by changing permissions to the `site-packages` folder) and I'll let you know if I don't manage to reproduce the issue.
Update: a solution is on its way: https://github.com/pypa/pip/pull/6215
I think I will wait to see the exact outcome of that to see what we should do about the issue.
Adding `sudo` or `--user` to `pip install` invocations will work, but since it worked without those before, we might want to see if it will work like that after the `pip` fix?
Temporary fix in #123, releasing now...
Released as 0.10.1 | 2019-03-10T14:36:34 |
|
pypa/cibuildwheel | 194 | pypa__cibuildwheel-194 | [
"159"
] | c22d50427c4610eed6fe6313555f33d1797d8787 | diff --git a/cibuildwheel/__main__.py b/cibuildwheel/__main__.py
--- a/cibuildwheel/__main__.py
+++ b/cibuildwheel/__main__.py
@@ -77,12 +77,12 @@ def main():
if args.platform != 'auto':
platform = args.platform
else:
- ci = strtobool(os.environ.get('CI', 'false')) or 'BITRISE_BUILD_NUMBER' in os.environ or 'AZURE_HTTP_USER_AGENT' in os.environ
+ ci = strtobool(os.environ.get('CI', 'false')) or 'BITRISE_BUILD_NUMBER' in os.environ or 'AZURE_HTTP_USER_AGENT' in os.environ or 'GITHUB_WORKFLOW' in os.environ
if not ci:
print('cibuildwheel: Unable to detect platform. cibuildwheel should run on your CI server, '
- 'Travis CI, AppVeyor, Azure Pipelines and CircleCI are supported. You can run on your '
- 'development machine or other CI providers using the --platform argument. Check --help '
- 'output for more information.',
+ 'Travis CI, AppVeyor, Azure Pipelines, GitHub Actions and CircleCI are supported. You '
+ 'can run on your development machine or other CI providers using the --platform argument. '
+ 'Check --help output for more information.',
file=sys.stderr)
exit(2)
if sys.platform.startswith('linux'):
| diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
new file mode 100644
--- /dev/null
+++ b/.github/workflows/test.yml
@@ -0,0 +1,29 @@
+name: Test
+
+on: [push, pull_request]
+
+jobs:
+ test:
+ name: Test cibuildwheel on ${{ matrix.os }}
+ runs-on: ${{ matrix.os }}
+ strategy:
+ matrix:
+ os: [ubuntu-18.04, windows-latest, macos-latest]
+ python_version: ['3.7']
+ steps:
+ - uses: actions/checkout@v1
+ - uses: actions/setup-python@v1
+ name: Install Python ${{ matrix.python_version }}
+ with:
+ python-version: ${{ matrix.python_version }}
+
+ - name: Install dependencies
+ run: |
+ python -m pip install -r requirements-dev.txt
+ - name: Install Visual C++ for Python 2.7
+ if: startsWith(matrix.os, 'windows')
+ run: |
+ choco install vcpython27 -f -y
+ - name: Test cibuildwheel
+ run: |
+ python ./bin/run_tests.py
diff --git a/unit_test/main_tests/main_platform_test.py b/unit_test/main_tests/main_platform_test.py
--- a/unit_test/main_tests/main_platform_test.py
+++ b/unit_test/main_tests/main_platform_test.py
@@ -11,6 +11,7 @@ def test_unknown_platform_non_ci(monkeypatch, capsys):
monkeypatch.delenv('CI', raising=False)
monkeypatch.delenv('BITRISE_BUILD_NUMBER', raising=False)
monkeypatch.delenv('AZURE_HTTP_USER_AGENT', raising=False)
+ monkeypatch.delenv('GITHUB_WORKFLOW', raising=False)
with pytest.raises(SystemExit) as exit:
main()
| Support for GitHub actions
https://github.com/features/actions
(which is still in closed beta, but filing this anyhow since hopefully it's in the cards?).
| Yep, this is very interesting for us, since it's Linux/mac/windows and right here on github. Investigations and PRs welcome! | 2019-11-05T22:47:33 |
pypa/cibuildwheel | 199 | pypa__cibuildwheel-199 | [
"198"
] | 0375e92110113689025a5f05376f77d220d8b5c9 | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -21,6 +21,8 @@
package_data={
'cibuildwheel': ['resources/*'],
},
+ # Supported python versions
+ python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',
keywords='ci wheel packaging pypi travis appveyor macos linux windows',
classifiers=[
'Intended Audience :: Developers',
| diff --git a/unit_test/environment_test.py b/unit_test/environment_test.py
--- a/unit_test/environment_test.py
+++ b/unit_test/environment_test.py
@@ -1,3 +1,4 @@
+import os
from cibuildwheel.environment import parse_environment
@@ -37,12 +38,15 @@ def test_inheritance():
def test_shell_eval():
environment_recipe = parse_environment('VAR="$(echo "a test" string)"')
+ env_copy = os.environ.copy()
+ env_copy.pop('VAR', None)
+
environment_dict = environment_recipe.as_dictionary(
- prev_environment={}
+ prev_environment=env_copy
)
environment_cmds = environment_recipe.as_shell_commands()
- assert environment_dict == {'VAR': 'a test string'}
+ assert environment_dict['VAR'] == 'a test string'
assert environment_cmds == ['export VAR="$(echo "a test" string)"']
def test_shell_eval_and_env():
| cibuildwheel CI tests failing on Azure for windows
`cibuildwheel` CI tests which are using the sample configuration in README are failing on Windows following Azure update to support python 3.8
Given the number of CI providers now tested, I guess we can try to test `cibuildwheel` on python 2.7, 3.5, 3.6, 3.7 and 3.8 without too much overhead on test time by dispatching the python versions running `cibuildwheel` across CI providers.
| 2019-11-09T10:33:37 |
|
pypa/cibuildwheel | 204 | pypa__cibuildwheel-204 | [
"146"
] | a3e9b7b8ec81e4091e5b81d125201cba47a25d1c | diff --git a/cibuildwheel/__main__.py b/cibuildwheel/__main__.py
--- a/cibuildwheel/__main__.py
+++ b/cibuildwheel/__main__.py
@@ -24,6 +24,12 @@ def get_option_from_environment(option_name, platform=None, default=None):
return os.environ.get(option_name, default)
+def strtobool(val):
+ if val.lower() in ('y', 'yes', 't', 'true', 'on', '1'):
+ return True
+ return False
+
+
def main():
parser = argparse.ArgumentParser(
description='Build wheels for all the platforms.',
@@ -38,7 +44,7 @@ def main():
'script is going to automatically install MacPython on your system, '
'so don\'t run on your development machine. For "windows", you need to '
'run in Windows, and it will build and test for all versions of '
- 'Python at C:\\PythonXX[-x64]. Default: auto.'))
+ 'Python. Default: auto.'))
parser.add_argument('--output-dir',
default=os.environ.get('CIBW_OUTPUT_DIR', 'wheelhouse'),
help='Destination folder for the wheels.')
@@ -59,36 +65,19 @@ def main():
if args.platform != 'auto':
platform = args.platform
else:
- platform = None
-
- if os.environ.get('TRAVIS_OS_NAME') == 'linux':
- platform = 'linux'
- elif os.environ.get('TRAVIS_OS_NAME') == 'osx':
- platform = 'macos'
- elif os.environ.get('TRAVIS_OS_NAME') == 'windows':
- platform = 'windows'
- elif 'APPVEYOR' in os.environ:
- platform = 'windows'
- elif 'BITRISE_BUILD_NUMBER' in os.environ:
- platform = 'macos'
- elif os.environ.get('CIRCLECI'):
+ ci = strtobool(os.environ.get('CI', 'false')) or 'BITRISE_BUILD_NUMBER' in os.environ or 'AZURE_HTTP_USER_AGENT' in os.environ
+ if ci:
if sys.platform.startswith('linux'):
platform = 'linux'
- elif sys.platform.startswith('darwin'):
+ elif sys.platform == 'darwin':
platform = 'macos'
- elif 'AZURE_HTTP_USER_AGENT' in os.environ:
- if os.environ['AGENT_OS'] == 'Linux':
- platform = 'linux'
- elif os.environ['AGENT_OS'] == 'Darwin':
- platform = 'macos'
- elif os.environ['AGENT_OS'] == 'Windows_NT':
+ elif sys.platform == 'win32':
platform = 'windows'
-
if platform is None:
print('cibuildwheel: Unable to detect platform. cibuildwheel should run on your CI server, '
- 'Travis CI, AppVeyor, and CircleCI are supported. You can run on your development '
- 'machine using the --platform argument. Check --help output for more '
- 'information.',
+ 'Travis CI, AppVeyor, Azure Pipelines and CircleCI are supported. You can run on your '
+ 'development machine or other CI providers using the --platform argument. Check --help '
+ 'output for more information.',
file=sys.stderr)
exit(2)
| Linux support in AppVeyor
FYI
https://www.appveyor.com/blog/2018/03/06/appveyor-for-linux/
https://www.appveyor.com/blog/2018/05/15/appveyor-for-linux-is-generally-available/
https://www.appveyor.com/docs/getting-started-with-appveyor-for-linux/
| 2019-11-10T17:54:42 |
||
pypa/cibuildwheel | 263 | pypa__cibuildwheel-263 | [
"139"
] | 8236877f02ad8ad13c2ebeba3161ab473aacff4d | diff --git a/cibuildwheel/linux.py b/cibuildwheel/linux.py
--- a/cibuildwheel/linux.py
+++ b/cibuildwheel/linux.py
@@ -1,5 +1,5 @@
from __future__ import print_function
-import os, subprocess, sys, uuid
+import os, subprocess, sys, uuid, textwrap
from collections import namedtuple
from .util import prepare_command, get_build_verbosity_extra_flags
@@ -173,8 +173,36 @@ def run_docker(command, stdin_str=None):
run_docker(['cp', os.path.abspath(project_dir) + '/.', container_name + ':/project'])
run_docker(['start', '-i', '-a', container_name], stdin_str=bash_script)
run_docker(['cp', container_name + ':/output/.', os.path.abspath(output_dir)])
- except subprocess.CalledProcessError:
+ except subprocess.CalledProcessError as error:
+ troubleshoot(project_dir, error)
exit(1)
finally:
# Still gets executed, even when 'exit(1)' gets called
run_docker(['rm', '--force', '-v', container_name])
+
+
+def troubleshoot(project_dir, error):
+ if (isinstance(error, subprocess.CalledProcessError) and 'start' in error.cmd):
+ # the bash script failed
+ print('Checking for common errors...')
+ so_files = []
+ for root, dirs, files in os.walk(project_dir):
+ for name in files:
+ _, ext = os.path.splitext(name)
+ if ext == '.so':
+ so_files.append(os.path.join(root, name))
+
+ if so_files:
+ print(textwrap.dedent('''
+ NOTE: Shared object (.so) files found in this project.
+
+ These files might be built against the wrong OS, causing problems with
+ auditwheel.
+
+ If you're using Cython and have previously done an in-place build,
+ remove those build files (*.so and *.c) before starting cibuildwheel.
+ '''))
+
+ print(' Files detected:')
+ print('\n'.join([' '+f for f in so_files]))
+ print('')
| remove .so files when copying source into the container
Trying to build wheels on travis and I'm getting the following error:
```
auditwheel: error: cannot repair "/tmp/built_wheel/pydantic-0.27a1-cp37-cp37m-linux_x86_64.whl" to "manylinux1_x86_64" ABI because of the presence of too-recent versioned symbols. You'll need to compile the wheel on an older toolchain.
```
See [this build](https://travis-ci.org/samuelcolvin/pydantic/jobs/537258176) for details.
I don't get this error when building locally with `cibuildwheel --platform linux --output-dir dist`.
Any idea what I'm doing wrong?
| possibly the same as https://github.com/pypa/auditwheel/issues/36#issuecomment-217965612 since the first image build goes fine, it's the second that fails.
sorted this at last, found the answer at https://github.com/pypa/manylinux/issues/214#issuecomment-409375930 - I was building the package on the host before running `cibuildwheel`.
`cibuildwheel` is copying the `.so` files along with the source into the container thus the problem.
`cibuildwheel` should exclude `.so` (and perhaps `.c`?) files when copying the source into the contianer.
Ah, thanks for tracking this down. Maybe a simple way around this would be to delete the 'build' directory inside the docker container before each build? Anything in that folder can be regenerated as part of the build, as I understand it.
That wouldn't help in my case, during testing I install/build with `pip install -e .` which means lots of `*.so` files along side `*.py` files.
Ideally `cibuildwheel` would either:
* delete all `.so` files in src before copying (with a warning)
* or better, filter out such files when copying with some kind of glob,
* or delete `.so` files inside the container before building
* or just fail with a clear error message if such files exist in the source directory
* or document this potential pitfall
Or some combination of the above. I'm afraid I can't be much help on this at the moment - I'm already spending far too much time on open source stuff, but thanks a lot for cibuildwheel, even with this problem it's saved me a lot of time.
> `cibuildwheel` is copying the `.so` files along with the source into the container thus the problem.
>
> `cibuildwheel` should exclude `.so` (and perhaps `.c`?) files when copying the source into the contianer.
Maybe I don't fully understand the problem, but this seems like a problem on the user-side of `cibuildwheel`? `cibuildwheel` just makes sure the builds inside the container have access to all files in the `project` folder passed at invocation. It feels rather dangerous to start making assumptions and exclude certain types of files, if you ask me.
Correct me if I'm wrong, but the actual problem seems to be that the project is built twice? Which is in principle not a property of `cibuildwheel`, but a consequence of how it's used in the CI config?
Is there a possibility to clean up the build? Or maybe to run `cibuildwheel` in a separate job, such that your builds do not conflict?
> Is there a possibility to clean up the build? Or maybe to run cibuildwheel in a separate job, such that your builds do not conflict?
Yes that's what I'm doing now.
Maybe just a warning then and a note in docs?
Yeah, sure, if this can be confusing, it should be clarified. I'm just not sure where to add it and how general it should be. What kind of note are you thinking about? Something related to this specific problem, or a more general note on `cibuildwheel` actually making a full independent build?
question is what can cause the "presence of too-recent versioned symbols" error? Is it just `.so` dynamic library files or some other extensions? I'm guessing 99.99% of the time it'll be `.so` files.
I would say:
* add to the docs, "make sure your source directory doesn't include any build artifacts like `*.c` or `*.so` files before running `cibuildwheel`"
* if there are any `*.so` files in the build directory issue a warning during build saying something like "Build artifacts (`*.so` files) were found in your source directory "/path/to/src", this may cause cibuildwheel builds to fail. Consider deleting these files before calling cibuildwheel" | 2020-02-02T14:35:45 |
|
pypa/cibuildwheel | 264 | pypa__cibuildwheel-264 | [
"261"
] | 4f47ab8052b6c4dc8efeace296d180cbf2c016d2 | diff --git a/cibuildwheel/linux.py b/cibuildwheel/linux.py
--- a/cibuildwheel/linux.py
+++ b/cibuildwheel/linux.py
@@ -63,17 +63,30 @@ def build(project_dir, output_dir, test_command, test_requires, test_extras, bef
mkdir /output
cd /project
- {environment_exports}
+ for PYBIN in {pybin_paths}; do (
+ # Temporary hack/workaround, putting loop body in subshell; fixed in PR #256
+
+ export PATH="$PYBIN:$PATH"
+ {environment_exports}
+
+ # check the active python and pip are in PYBIN
+ if [ "$(which pip)" != "$PYBIN/pip" ]; then
+ echo "cibuildwheel: python available on PATH doesn't match our installed instance. If you have modified PATH, ensure that you don't overwrite cibuildwheel's entry or insert python above it."
+ exit 1
+ fi
+ if [ "$(which python)" != "$PYBIN/python" ]; then
+ echo "cibuildwheel: pip available on PATH doesn't match our installed instance. If you have modified PATH, ensure that you don't overwrite cibuildwheel's entry or insert pip above it."
+ exit 1
+ fi
- for PYBIN in {pybin_paths}; do
if [ ! -z {before_build} ]; then
- PATH="$PYBIN:$PATH" sh -c {before_build}
+ sh -c {before_build}
fi
# Build the wheel
rm -rf /tmp/built_wheel
mkdir /tmp/built_wheel
- PATH="$PYBIN:$PATH" "$PYBIN/pip" wheel . -w /tmp/built_wheel --no-deps {build_verbosity_flag}
+ pip wheel . -w /tmp/built_wheel --no-deps {build_verbosity_flag}
built_wheel=(/tmp/built_wheel/*.whl)
# repair the wheel
@@ -92,9 +105,9 @@ def build(project_dir, output_dir, test_command, test_requires, test_extras, bef
if [ ! -z {test_command} ]; then
# Set up a virtual environment to install and test from, to make sure
# there are no dependencies that were pulled in at build time.
- "$PYBIN/pip" install virtualenv
+ pip install virtualenv
venv_dir=`mktemp -d`/venv
- "$PYBIN/python" -m virtualenv "$venv_dir"
+ python -m virtualenv "$venv_dir"
# run the tests in a subshell to keep that `activate`
# script from polluting the env
@@ -123,7 +136,7 @@ def build(project_dir, output_dir, test_command, test_requires, test_extras, bef
)
# exit if tests failed (needed for older bash versions)
if [ $? -ne 0 ]; then
- exit 1;
+ exit 1;
fi
# clean up
@@ -133,7 +146,7 @@ def build(project_dir, output_dir, test_command, test_requires, test_extras, bef
# we're all done here; move it to output
mv "${{repaired_wheels[@]}}" /output
for repaired_wheel in "${{repaired_wheels[@]}}"; do chown {uid}:{gid} "/output/$(basename "$repaired_wheel")"; done
- done
+ ) done
'''.format(
pybin_paths=' '.join(c.path + '/bin' for c in platform_configs),
test_requires=' '.join(test_requires),
diff --git a/cibuildwheel/macos.py b/cibuildwheel/macos.py
--- a/cibuildwheel/macos.py
+++ b/cibuildwheel/macos.py
@@ -2,6 +2,7 @@
import shlex
import shutil
import subprocess
+import sys
import tempfile
from collections import namedtuple
from glob import glob
@@ -137,11 +138,20 @@ def build(project_dir, output_dir, test_command, test_requires, test_extras, bef
# check what version we're on
call(['which', 'python'], env=env)
call(['python', '--version'], env=env)
+ which_python = subprocess.check_output(['which', 'python'], env=env, universal_newlines=True).strip()
+ if which_python != '/tmp/cibw_bin/python':
+ print("cibuildwheel: python available on PATH doesn't match our installed instance. If you have modified PATH, ensure that you don't overwrite cibuildwheel's entry or insert python above it.", file=sys.stderr)
+ exit(1)
# install pip & wheel
call(['python', get_pip_script], env=env, cwd="/tmp")
assert os.path.exists(os.path.join(installation_bin_path, 'pip'))
+ call(['which', 'pip'], env=env)
call(['pip', '--version'], env=env)
+ which_pip = subprocess.check_output(['which', 'pip'], env=env, universal_newlines=True).strip()
+ if which_pip != '/tmp/cibw_bin/pip':
+ print("cibuildwheel: pip available on PATH doesn't match our installed instance. If you have modified PATH, ensure that you don't overwrite cibuildwheel's entry or insert pip above it.", file=sys.stderr)
+ exit(1)
call(['pip', 'install', '--upgrade', 'setuptools', 'wheel', 'delocate'], env=env)
# setup target platform, only required for python 3.5
diff --git a/cibuildwheel/windows.py b/cibuildwheel/windows.py
--- a/cibuildwheel/windows.py
+++ b/cibuildwheel/windows.py
@@ -1,6 +1,7 @@
import os
import shutil
import subprocess
+import sys
import tempfile
from collections import namedtuple
from glob import glob
@@ -141,11 +142,19 @@ def build(project_dir, output_dir, test_command, test_requires, test_extras, bef
simple_shell(['where', 'python'], env=env)
simple_shell(['python', '--version'], env=env)
simple_shell(['python', '-c', '"import struct; print(struct.calcsize(\'P\') * 8)"'], env=env)
+ where_python = subprocess.check_output(['where', 'python'], env=env, universal_newlines=True).splitlines()[0].strip()
+ if where_python != os.path.join(installation_path, 'python.exe'):
+ print("cibuildwheel: python available on PATH doesn't match our installed instance. If you have modified PATH, ensure that you don't overwrite cibuildwheel's entry or insert python above it.", file=sys.stderr)
+ exit(1)
# make sure pip is installed
if not os.path.exists(os.path.join(installation_path, 'Scripts', 'pip.exe')):
simple_shell(['python', get_pip_script], env=env, cwd="C:\\cibw")
assert os.path.exists(os.path.join(installation_path, 'Scripts', 'pip.exe'))
+ where_pip = subprocess.check_output(['where', 'pip'], env=env, universal_newlines=True).splitlines()[0].strip()
+ if where_pip.strip() != os.path.join(installation_path, 'Scripts', 'pip.exe'):
+ print("cibuildwheel: pip available on PATH doesn't match our installed instance. If you have modified PATH, ensure that you don't overwrite cibuildwheel's entry or insert pip above it.", file=sys.stderr)
+ exit(1)
# prepare the Python environment
simple_shell(['python', '-m', 'pip', 'install', '--upgrade', 'pip'], env=env)
| diff --git a/test/05_environment/cibuildwheel_test.py b/test/05_environment/cibuildwheel_test.py
--- a/test/05_environment/cibuildwheel_test.py
+++ b/test/05_environment/cibuildwheel_test.py
@@ -1,5 +1,6 @@
import os
-
+import pytest
+import subprocess
import utils
@@ -17,3 +18,15 @@ def test():
# also check that we got the right wheels built
expected_wheels = utils.expected_wheels('spam', '0.1.0')
assert set(actual_wheels) == set(expected_wheels)
+
+
+def test_overridden_path(tmp_path):
+ project_dir = os.path.dirname(__file__)
+
+ # mess up PATH, somehow
+ with pytest.raises(subprocess.CalledProcessError):
+ utils.cibuildwheel_run(project_dir, output_dir=tmp_path, add_env={
+ 'CIBW_ENVIRONMENT': '''SOMETHING="$(mkdir new_path && touch new_path/python)" PATH="$(realpath new_path):$PATH"''',
+ 'CIBW_ENVIRONMENT_WINDOWS': '''SOMETHING="$(mkdir new_path && type nul > new_path/python.exe)" PATH="$CD\\new_path;$PATH"''',
+ })
+ assert len(os.listdir(str(tmp_path))) == 0
| Changing `PATH` in `CIBW_ENVIRONMENT` can cause wrong Python to be used
See https://github.com/samuelcolvin/rtoml/pull/8#issuecomment-580880863: if someone changes `PATH` in `CIBW_ENVIRONMENT`, the newly installed Python (from nuget on Windows, but I'm guessing the same is possible on macOS or Linux; still need to check) is not used, but some other Python version of the system is used.
So I think we should add some kind of check after applying `CIBW_ENVIRONMENT` that makes sure we're using the `python` executable we think we're using.
(I'm happy to actually do it myself, actually, if no one else is eager to do this; but I currently don't have time to do it immediately, so I thought I'd make sure to report it.)
| on linux there is not such problem because of different order of command execution. Before build is evaluated before extending path by python location.
MacOS has same command order like windows.
I think that solution from linux is best.
```bash
if [ ! -z {before_build} ]; then
PATH="$PYBIN:$PATH" sh -c {before_build}
fi
# Build the wheel
rm -rf /tmp/built_wheel
mkdir /tmp/built_wheel
PATH="$PYBIN:$PATH" "$PYBIN/pip" wheel . -w /tmp/built_wheel --no-deps {build_verbosity_flag}
built_wheel=(/tmp/built_wheel/*.whl)
```
> on linux there is not such problem because of different order of command execution. Before build is evaluated before extending path by python location.
Hmmmm, that's not consistent. I thought we'd made different platforms the same before.
> I think that solution from linux is best.
I don't know about that. It might be good to have access to the `python` that will be used when evaluating environment variables (e.g., `PYTHON_VERSION=$(python --version)` or so)? @joerick Any thoughts on the (implicitly) assumed order, here?
Actually; something weirder is going on: `CIBW_ENVIRONMENT` is only evaluated once (per architecture/manylinux image) on Linux, but it's reevaluated each time on macOS and Windows.
The documentation isn't clear on what's happening; it just says
> A space-separated list of environment variables to set during the build.
I would argue to do it for every build, after `python` is added to `PATH`, máybe after `pip` has been added (?), and before `CIBW_BEFORE_BUILD`. I would argue that the location of `python` is already part of the environment you're adapting.
But ... I don't really know. Maybe there's better arguments, so some input would be nice.
Afterthought: why do we do it before `CIBW_BEFORE_BUILD`? Actually, if you need environment variables in `CIBW_BEFORE_BUILD`, you can still set set them in there. The main reason we need `CIBW_ENVIRONMENT` is because `CIBW_BEFORE_BUILD` is run in a subshell. So would it make sense if in some use case `CIBW_BEFORE_BUILD` prepares a dependency, after which `CIBW_ENVIRONMENT` adds it to the `PATH`/environment?
> I would argue to do it for every build, after python is added to PATH, máybe after pip has been added (?), and before CIBW_BEFORE_BUILD. I would argue that the location of python is already part of the environment you're adapting.
I think I agree with you here @YannickJadoul, in fact just today I noticed that linux has different behaviour - by coincidence, that is fixed in #256 as a side effect of my changes.
It's important for the correct `python` and `pip` to be accessible at the top of the PATH, because user scripts or build systems might be using it. So while the old linux 'belt and braces' approach seems more reliable, it could be masking errors that'll cause weirder errors down the line.
So I'm in favour of your original suggestion - asserting that python and pip are correct after CIBW_ENVIRONMENT is evaluated. And once #256 is merged, the behaviour should be consistent between platforms.
Thanks, @joerick. I'll make a PR; shouldn't take too long. If #256 gets merged before, it should be easy enough to rebase :-) | 2020-02-03T13:28:17 |
pypa/cibuildwheel | 273 | pypa__cibuildwheel-273 | [
"269"
] | 65792bd70094f452d1d230283f0054c913b53dba | diff --git a/cibuildwheel/__main__.py b/cibuildwheel/__main__.py
--- a/cibuildwheel/__main__.py
+++ b/cibuildwheel/__main__.py
@@ -160,6 +160,9 @@ def main():
manylinux_x86_64_image = os.environ.get('CIBW_MANYLINUX_X86_64_IMAGE', 'manylinux2010')
manylinux_i686_image = os.environ.get('CIBW_MANYLINUX_I686_IMAGE', 'manylinux2010')
manylinux_pypy_x86_64_image = os.environ.get('CIBW_MANYLINUX_PYPY_X86_64_IMAGE', 'manylinux2010')
+ manylinux_aarch64_image = os.environ.get('CIBW_MANYLINUX_AARCH64_IMAGE', 'manylinux2014')
+ manylinux_ppc64le_image = os.environ.get('CIBW_MANYLINUX_PPC64LE_IMAGE', 'manylinux2014')
+ manylinux_s390x_image = os.environ.get('CIBW_MANYLINUX_S390X_IMAGE', 'manylinux2014')
default_manylinux_images_x86_64 = {'manylinux1': 'quay.io/pypa/manylinux1_x86_64',
'manylinux2010': 'quay.io/pypa/manylinux2010_x86_64',
@@ -168,11 +171,18 @@ def main():
'manylinux2010': 'quay.io/pypa/manylinux2010_i686',
'manylinux2014': 'quay.io/pypa/manylinux2014_i686'}
default_manylinux_images_pypy_x86_64 = {'manylinux2010': 'pypywheels/manylinux2010-pypy_x86_64'}
+ default_manylinux_images_aarch64 = {'manylinux2014': 'quay.io/pypa/manylinux2014_aarch64'}
+ default_manylinux_images_ppc64le = {'manylinux2014': 'quay.io/pypa/manylinux2014_ppc64le'}
+ default_manylinux_images_s390x = {'manylinux2014': 'quay.io/pypa/manylinux2014_s390x'}
build_options.update(
manylinux_images={'x86_64': default_manylinux_images_x86_64.get(manylinux_x86_64_image) or manylinux_x86_64_image,
'i686': default_manylinux_images_i686.get(manylinux_i686_image) or manylinux_i686_image,
- 'pypy_x86_64': default_manylinux_images_pypy_x86_64.get(manylinux_pypy_x86_64_image) or manylinux_pypy_x86_64_image},
+ 'pypy_x86_64': default_manylinux_images_pypy_x86_64.get(manylinux_pypy_x86_64_image) or manylinux_pypy_x86_64_image,
+ 'aarch64': default_manylinux_images_aarch64.get(manylinux_aarch64_image) or manylinux_aarch64_image,
+ 'ppc64le': default_manylinux_images_ppc64le.get(manylinux_ppc64le_image) or manylinux_ppc64le_image,
+ 's390x': default_manylinux_images_s390x.get(manylinux_s390x_image) or manylinux_s390x_image,
+ },
)
elif platform == 'macos':
pass
diff --git a/cibuildwheel/linux.py b/cibuildwheel/linux.py
--- a/cibuildwheel/linux.py
+++ b/cibuildwheel/linux.py
@@ -1,4 +1,5 @@
import os
+import platform
import shlex
import subprocess
import sys
@@ -12,6 +13,27 @@
)
+def matches_platform(identifier):
+ pm = platform.machine()
+ if pm == "x86_64":
+ # x86_64 machines can run i686 docker containers
+ if identifier.endswith('x86_64') or identifier.endswith('i686'):
+ return True
+ elif pm == "i686":
+ if identifier.endswith('i686'):
+ return True
+ elif pm == "aarch64":
+ if identifier.endswith('aarch64'):
+ return True
+ elif pm == "ppc64le":
+ if identifier.endswith('ppc64le'):
+ return True
+ elif pm == "s390x":
+ if identifier.endswith('s390x'):
+ return True
+ return False
+
+
def get_python_configurations(build_selector):
PythonConfiguration = namedtuple('PythonConfiguration', ['identifier', 'path'])
python_configurations = [
@@ -29,10 +51,21 @@ def get_python_configurations(build_selector):
PythonConfiguration(identifier='cp38-manylinux_i686', path='/opt/python/cp38-cp38'),
PythonConfiguration(identifier='pp27-manylinux_x86_64', path='/opt/python/pp27-pypy_73'),
PythonConfiguration(identifier='pp36-manylinux_x86_64', path='/opt/python/pp36-pypy36_pp73'),
+ PythonConfiguration(identifier='cp35-manylinux_aarch64', path='/opt/python/cp35-cp35m'),
+ PythonConfiguration(identifier='cp36-manylinux_aarch64', path='/opt/python/cp36-cp36m'),
+ PythonConfiguration(identifier='cp37-manylinux_aarch64', path='/opt/python/cp37-cp37m'),
+ PythonConfiguration(identifier='cp38-manylinux_aarch64', path='/opt/python/cp38-cp38'),
+ PythonConfiguration(identifier='cp35-manylinux_ppc64le', path='/opt/python/cp35-cp35m'),
+ PythonConfiguration(identifier='cp36-manylinux_ppc64le', path='/opt/python/cp36-cp36m'),
+ PythonConfiguration(identifier='cp37-manylinux_ppc64le', path='/opt/python/cp37-cp37m'),
+ PythonConfiguration(identifier='cp38-manylinux_ppc64le', path='/opt/python/cp38-cp38'),
+ PythonConfiguration(identifier='cp35-manylinux_s390x', path='/opt/python/cp35-cp35m'),
+ PythonConfiguration(identifier='cp36-manylinux_s390x', path='/opt/python/cp36-cp36m'),
+ PythonConfiguration(identifier='cp37-manylinux_s390x', path='/opt/python/cp37-cp37m'),
+ PythonConfiguration(identifier='cp38-manylinux_s390x', path='/opt/python/cp38-cp38'),
]
-
# skip builds as required
- return [c for c in python_configurations if build_selector(c.identifier)]
+ return [c for c in python_configurations if matches_platform(c.identifier) and build_selector(c.identifier)]
def build(project_dir, output_dir, test_command, test_requires, test_extras, before_build, build_verbosity, build_selector, repair_command, environment, manylinux_images):
@@ -49,6 +82,9 @@ def build(project_dir, output_dir, test_command, test_requires, test_extras, bef
platforms = [
('cp', 'manylinux_x86_64', manylinux_images['x86_64']),
('cp', 'manylinux_i686', manylinux_images['i686']),
+ ('cp', 'manylinux_aarch64', manylinux_images['aarch64']),
+ ('cp', 'manylinux_ppc64le', manylinux_images['ppc64le']),
+ ('cp', 'manylinux_s390x', manylinux_images['s390x']),
('pp', 'manylinux_x86_64', manylinux_images['pypy_x86_64']),
]
| diff --git a/test/01_basic/cibuildwheel_test.py b/test/01_basic/cibuildwheel_test.py
--- a/test/01_basic/cibuildwheel_test.py
+++ b/test/01_basic/cibuildwheel_test.py
@@ -1,4 +1,5 @@
import os
+import platform
import utils
@@ -20,7 +21,10 @@ def test_build_identifiers():
# after adding CIBW_MANYLINUX_IMAGE to support manylinux2010, there
# can be multiple wheels for each wheel, though, so we need to limit
# the expected wheels
- expected_wheels = [w for w in utils.expected_wheels('spam', '0.1.0')
- if '-manylinux' not in w or '-manylinux1' in w]
+ if platform.machine() in ['x86_64', 'i686']:
+ expected_wheels = [w for w in utils.expected_wheels('spam', '0.1.0')
+ if '-manylinux' not in w or '-manylinux1' in w]
+ else:
+ expected_wheels = utils.expected_wheels('spam', '0.1.0')
build_identifiers = utils.cibuildwheel_get_build_identifiers(project_dir)
assert len(expected_wheels) == len(build_identifiers)
diff --git a/test/06_docker_images/cibuildwheel_test.py b/test/06_docker_images/cibuildwheel_test.py
--- a/test/06_docker_images/cibuildwheel_test.py
+++ b/test/06_docker_images/cibuildwheel_test.py
@@ -1,4 +1,5 @@
import os
+import platform
import pytest
@@ -10,6 +11,8 @@ def test():
if utils.platform != 'linux':
pytest.skip('the test is only relevant to the linux build')
+ if platform.machine() not in ['x86_64', 'i686']:
+ pytest.skip('this test is currently only possible on x86_64/i686 due to availability of alternative images')
actual_wheels = utils.cibuildwheel_run(project_dir, add_env={
'CIBW_MANYLINUX_X86_64_IMAGE': 'dockcross/manylinux2010-x64',
diff --git a/test/08_manylinuxXXXX_only/cibuildwheel_test.py b/test/08_manylinuxXXXX_only/cibuildwheel_test.py
--- a/test/08_manylinuxXXXX_only/cibuildwheel_test.py
+++ b/test/08_manylinuxXXXX_only/cibuildwheel_test.py
@@ -1,4 +1,5 @@
import os
+import platform
import pytest
@@ -11,6 +12,9 @@ def test(manylinux_image):
if utils.platform != 'linux':
pytest.skip('the docker test is only relevant to the linux build')
+ elif platform.machine not in ['x86_64', 'i686']:
+ if manylinux_image in ['manylinux1', 'manylinux2010']:
+ pytest.skip("manylinux1 and 2010 doesn't exist for non-x86 architectures")
# build the wheels
# CFLAGS environment variable is necessary to fail on 'malloc_info' (on manylinux1) during compilation/linking,
@@ -20,6 +24,9 @@ def test(manylinux_image):
'CIBW_MANYLINUX_X86_64_IMAGE': manylinux_image,
'CIBW_MANYLINUX_I686_IMAGE': manylinux_image,
'CIBW_MANYLINUX_PYPY_X86_64_IMAGE': manylinux_image,
+ 'CIBW_MANYLINUX_AARCH64_IMAGE': manylinux_image,
+ 'CIBW_MANYLINUX_PPC64LE_IMAGE': manylinux_image,
+ 'CIBW_MANYLINUX_S390X_IMAGE': manylinux_image,
}
if manylinux_image == 'manylinux1':
# We don't have a manylinux1 image for PyPy
diff --git a/test/shared/utils.py b/test/shared/utils.py
--- a/test/shared/utils.py
+++ b/test/shared/utils.py
@@ -5,6 +5,7 @@
'''
import os
+import platform as pm
import shutil
import subprocess
import sys
@@ -70,7 +71,7 @@ def cibuildwheel_run(project_path, env=None, add_env=None, output_dir=None):
return wheels
-def expected_wheels(package_name, package_version, manylinux_versions=['manylinux1', 'manylinux2010'],
+def expected_wheels(package_name, package_version, manylinux_versions=None,
macosx_deployment_target=None):
'''
Returns a list of expected wheels from a run of cibuildwheel.
@@ -79,11 +80,21 @@ def expected_wheels(package_name, package_version, manylinux_versions=['manylinu
# {distribution}-{version}(-{build tag})?-{python tag}-{abi tag}-{platform tag}.whl
# {python tag} and {abi tag} are closely related to the python interpreter used to build the wheel
# so we'll merge them below as python_abi_tag
- python_abi_tags = ['cp27-cp27m', 'cp35-cp35m', 'cp36-cp36m', 'cp37-cp37m', 'cp38-cp38',
- 'pp27-pypy_73', 'pp36-pypy36_pp73']
+
+ python_abi_tags = ['cp35-cp35m', 'cp36-cp36m', 'cp37-cp37m', 'cp38-cp38']
+ extra_x86_python_abi_tags = ['cp27-cp27m', 'pp27-pypy_73', 'pp36-pypy36_pp73']
+
if platform == 'linux':
- python_abi_tags.append('cp27-cp27mu') # python 2.7 has 2 different ABI on manylinux
- architectures = {'cp': ['x86_64', 'i686'], 'pp': ['x86_64']}
+ if pm.machine() not in ['x86_64', 'i686']:
+ if manylinux_versions is None:
+ manylinux_versions = ['manylinux2014']
+ architectures = {'cp': [pm.machine()]}
+ else:
+ if manylinux_versions is None:
+ manylinux_versions = ['manylinux1', 'manylinux2010']
+ python_abi_tags += extra_x86_python_abi_tags
+ python_abi_tags.append('cp27-cp27mu') # python 2.7 has 2 different ABI on manylinux
+ architectures = {'cp': ['x86_64', 'i686'], 'pp': ['x86_64']}
platform_tags = {}
for python_implemention in architectures:
platform_tags[python_implemention] = [
@@ -96,12 +107,15 @@ def expected_wheels(package_name, package_version, manylinux_versions=['manylinu
def get_platform_tags(python_abi_tag):
return platform_tags[python_abi_tag[:2]]
elif platform == 'windows':
+ python_abi_tags += extra_x86_python_abi_tags
platform_tags = {'cp': ['win32', 'win_amd64'], 'pp': ['win32']}
def get_platform_tags(python_abi_tag):
return platform_tags[python_abi_tag[:2]]
elif platform == 'macos':
+ python_abi_tags += extra_x86_python_abi_tags
+
def get_platform_tags(python_abi_tag):
default_version = '10.7' if python_abi_tag.startswith('pp') else '10.9'
return ['macosx_{}_x86_64'.format((macosx_deployment_target or default_version).replace('.', '_'))]
| Add support for non-x86 architectures with manylinux2014
With manylinux2014 there is support for publishing wheels on non-x86 architectures. There are docker images available for all the supported architectures: https://github.com/pypa/manylinux#manylinux2014 and travis supports running CI on these architectures too: https://docs.travis-ci.com/user/multi-cpu-architectures/. Having options available to specify a non-x86 builds and docker images would be great for projects that want to support users on other architectures.
| Totally agree this would be great! By the way, nothing in the current manylinux build is specific to x86, so I tried one on the manylinux2014-aarch64 (arm64) build. It [ran, but didn't get very far](https://travis-ci.org/joerick/cibuildwheel-autopypi-example/builds/648057310), I guess the paths to each Python are different on those images.
PRs welcome! If anyone wants to take a crack at this, let's chat API and build identifiers before you start :)
EDIT: building only Python 3.8 gets around the python path issue above - [it seems to work](https://travis-ci.org/joerick/cibuildwheel-autopypi-example)! so changes would hopefully be pretty minimal :)
I think I might have time to take a crack at this over the next week or so. For the new identifiers I was thinking of just following the same pattern: `cp*--manylinux_aarch64`, `cp*-manylinux_ppc64le` and `cp*--manylinux_s390x`. Looking at the [manylinux2014 spec](https://www.python.org/dev/peps/pep-0599/#the-manylinux2014-policy) there is also support for `armv7l` and `ppc64` (big endian) but without a docker image or host env in CI for either I'm not sure we'll be able to support those.
I was thinking of also adding options for the docker image: `CIBW_MANYLINUX_AARCH64_IMAGE`, `CIBW_MANYLINUX_PPC64LE_IMAGE`, and `CIBW_MANYLINUX_S390X_IMAGE`.
@mtreinish That would be amazing. I agree with those names; they're the same as in the manylinux standard, so no need to make up something else if there's not specific reason :-)
PyPy also supports some of these architectures, btw: https://www.pypy.org/download.html#python2-7-compatible-pypy-7-3-0. Though I don't think they have manylinux images :-/ | 2020-02-20T18:56:47 |
pypa/cibuildwheel | 389 | pypa__cibuildwheel-389 | [
"388"
] | 41891277d33c728059fb89ac2ce0c2b32d6d459b | diff --git a/cibuildwheel/macos.py b/cibuildwheel/macos.py
--- a/cibuildwheel/macos.py
+++ b/cibuildwheel/macos.py
@@ -212,7 +212,7 @@ def build(options: BuildOptions) -> None:
repaired_wheel_dir.mkdir(parents=True)
if built_wheel.name.endswith('none-any.whl') or not options.repair_command:
# pure Python wheel or empty repair command
- built_wheel.rename(repaired_wheel_dir / built_wheel.name)
+ shutil.move(str(built_wheel), repaired_wheel_dir)
else:
repair_command_prepared = prepare_command(options.repair_command, wheel=built_wheel, dest_dir=repaired_wheel_dir)
call(repair_command_prepared, env=env, shell=True)
@@ -262,4 +262,4 @@ def build(options: BuildOptions) -> None:
shutil.rmtree(venv_dir)
# we're all done here; move it to output (overwrite existing)
- repaired_wheel.replace(options.output_dir / repaired_wheel.name)
+ shutil.move(str(repaired_wheel), options.output_dir)
diff --git a/cibuildwheel/windows.py b/cibuildwheel/windows.py
--- a/cibuildwheel/windows.py
+++ b/cibuildwheel/windows.py
@@ -229,7 +229,7 @@ def build(options: BuildOptions) -> None:
repaired_wheel_dir.mkdir(parents=True)
if built_wheel.name.endswith('none-any.whl') or not options.repair_command:
# pure Python wheel or empty repair command
- built_wheel.rename(repaired_wheel_dir / built_wheel.name)
+ shutil.move(str(built_wheel), repaired_wheel_dir)
else:
repair_command_prepared = prepare_command(options.repair_command, wheel=built_wheel, dest_dir=repaired_wheel_dir)
shell([repair_command_prepared], env=env)
@@ -283,4 +283,4 @@ def build(options: BuildOptions) -> None:
shutil.rmtree(venv_dir)
# we're all done here; move it to output (remove if already exists)
- repaired_wheel.replace(options.output_dir / repaired_wheel.name)
+ shutil.move(str(repaired_wheel), options.output_dir)
| OSError: [WinError 17] The system cannot move the file to a different disk drive
```
File "C:\hostedtoolcache\windows\Python\3.7.7\x64\lib\site-packages\cibuildwheel\windows.py", line 286, in build
repaired_wheel.replace(options.output_dir / repaired_wheel.name)
File "C:\hostedtoolcache\windows\Python\3.7.7\x64\lib\pathlib.py", line 1338, in replace
self._accessor.replace(self, target)
OSError: [WinError 17] The system cannot move the file to a different disk drive: 'C:\\Users\\RUNNER~1\\AppData\\Local\\Temp\\cibuildwheeli4q9t16s\\repaired_wheel\\OpenPIV-0.21.9a0-cp36-cp36m-win_amd64.whl' -> 'wheelhouse\\OpenPIV-0.21.9a0-cp36-cp36m-win_amd64.whl'
##[error]Process completed with exit code 1.
```
| Which CI?
@joerick So it looks like shutil move should be used again.
@alexlib downgrade to version 1.4.2 until 1.5.1 will be released
Github actions
suggested to change this line
https://github.com/joerick/cibuildwheel/blob/41891277d33c728059fb89ac2ce0c2b32d6d459b/cibuildwheel/windows.py#L286
to
https://docs.python.org/3/library/shutil.html#shutil.move
similar to the same bug noticed here https://github.com/k4yt3x/video2x/issues/249
I wrote about this. I do not understand why current test set doe not catch this.
https://github.com/joerick/cibuildwheel/runs/802745957?check_suite_focus=true
This is result of refactoring from #376 | 2020-06-24T22:32:28 |
|
pypa/cibuildwheel | 408 | pypa__cibuildwheel-408 | [
"406"
] | c884530a1da933814eaaaa9fa2db605e8ecaf116 | diff --git a/cibuildwheel/bashlex_eval.py b/cibuildwheel/bashlex_eval.py
--- a/cibuildwheel/bashlex_eval.py
+++ b/cibuildwheel/bashlex_eval.py
@@ -1,15 +1,14 @@
-import shlex
import subprocess
from typing import Callable, Dict, List, NamedTuple, Optional, Sequence
import bashlex # type: ignore
-# a function that takes a shell command and the environment, and returns the result
-EnvironmentExecutor = Callable[[str, Dict[str, str]], str]
+# a function that takes a command and the environment, and returns the result
+EnvironmentExecutor = Callable[[List[str], Dict[str, str]], str]
-def local_environment_executor(command: str, env: Dict[str, str]) -> str:
- return subprocess.check_output(shlex.split(command), env=env, universal_newlines=True)
+def local_environment_executor(command: List[str], env: Dict[str, str]) -> str:
+ return subprocess.check_output(command, env=env, universal_newlines=True)
class NodeExecutionContext(NamedTuple):
@@ -97,8 +96,7 @@ def evaluate_nodes_as_compound_command(nodes: Sequence[bashlex.ast.node], contex
def evaluate_nodes_as_simple_command(nodes: List[bashlex.ast.node], context: NodeExecutionContext):
- words = [evaluate_node(part, context=context) for part in nodes]
- command = ' '.join(words)
+ command = [evaluate_node(part, context=context) for part in nodes]
return context.executor(command, context.environment)
diff --git a/cibuildwheel/docker_container.py b/cibuildwheel/docker_container.py
--- a/cibuildwheel/docker_container.py
+++ b/cibuildwheel/docker_container.py
@@ -180,9 +180,9 @@ def get_environment(self) -> Dict[str, str]:
'import sys, json, os; json.dump(os.environ.copy(), sys.stdout)'
], capture_output=True))
- def environment_executor(self, command: str, environment: Dict[str, str]) -> str:
+ def environment_executor(self, command: List[str], environment: Dict[str, str]) -> str:
# used as an EnvironmentExecutor to evaluate commands and capture output
- return self.call(shlex.split(command), env=environment)
+ return self.call(command, env=environment)
def shell_quote(path: PurePath) -> str:
| diff --git a/unit_test/environment_test.py b/unit_test/environment_test.py
--- a/unit_test/environment_test.py
+++ b/unit_test/environment_test.py
@@ -40,7 +40,7 @@ def test_inheritance():
def test_shell_eval():
- environment_recipe = parse_environment('VAR="$(echo "a test" string)"')
+ environment_recipe = parse_environment('VAR="$(echo "a test" string)"')
env_copy = os.environ.copy()
env_copy.pop('VAR', None)
@@ -50,8 +50,8 @@ def test_shell_eval():
)
environment_cmds = environment_recipe.as_shell_commands()
- assert environment_dict['VAR'] == 'a test string'
- assert environment_cmds == ['export VAR="$(echo "a test" string)"']
+ assert environment_dict['VAR'] == 'a test string'
+ assert environment_cmds == ['export VAR="$(echo "a test" string)"']
def test_shell_eval_and_env():
| CIBW_ENVIRONMENT broken in 1.5.3
Somehow, my line of `export CIBW_ENVIRONMENT='... CCACHE_BASEDIR=`python -c "import tempfile; import os; print(os.path.realpath(tempfile.gettempdir()))"` ...'` (see https://github.com/YannickJadoul/Parselmouth/blob/1f3dd9abc63afdf4a1d26db40d210bb4187118a8/.travis.yml#L143; yes, I know I ought to clean this up) breaks:
```
+ /bin/true
+ mkdir -p /project
+ /opt/python/cp38-cp38/bin/python -c 'import sys, json, os; json.dump(os.environ.copy(), sys.stdout)'
+ uname -i
x86_64
+ python -c import 'tempfile;' import 'os;' 'print(os.path.realpath(tempfile.gettempdir()))'
File "<string>", line 1
import
```
See https://travis-ci.org/github/YannickJadoul/Parselmouth/jobs/709679114#L392
I'll start investigating, but is this perhaps related to #403, @joerick?
| OK, got it.
Simplified example:
```python
from cibuildwheel import environment
print(environment.parse_environment('X=`python -c "import antigravity"`').as_dictionary({}))
```
What's going wrong is that `node.word` is not the same as `node.word` is not the same as `context.input[part.pos[0]:part.pos[1]]` (the latter includes the quotes, the first one doesn't), so cibuildwheel loses the quotes, ends up with `python -c import antigravity`, which gets split by `shlex.split` into 4 parts, rather than 3.
Let's see if I can make a PR. | 2020-07-19T15:21:01 |
Subsets and Splits