repo
stringclasses 856
values | pull_number
int64 3
127k
| instance_id
stringlengths 12
58
| issue_numbers
sequencelengths 1
5
| base_commit
stringlengths 40
40
| patch
stringlengths 67
1.54M
| test_patch
stringlengths 0
107M
| problem_statement
stringlengths 3
307k
| hints_text
stringlengths 0
908k
| created_at
timestamp[s] |
---|---|---|---|---|---|---|---|---|---|
pypa/cibuildwheel | 410 | pypa__cibuildwheel-410 | [
"409"
] | 726bcdf372daa2ad2ae9a67fd090ebadcbd964db | diff --git a/cibuildwheel/docker_container.py b/cibuildwheel/docker_container.py
--- a/cibuildwheel/docker_container.py
+++ b/cibuildwheel/docker_container.py
@@ -27,12 +27,14 @@ class DockerContainer:
bash_stdin: IO[bytes]
bash_stdout: IO[bytes]
- def __init__(self, docker_image: str, simulate_32_bit=False):
+ def __init__(self, docker_image: str, simulate_32_bit: bool = False, cwd: Optional[Union[str, PathLike]] = None):
self.docker_image = docker_image
self.simulate_32_bit = simulate_32_bit
+ self.cwd = cwd
def __enter__(self) -> 'DockerContainer':
self.name = f'cibuildwheel-{uuid.uuid4()}'
+ cwd_args = ['-w', str(self.cwd)] if self.cwd else []
shell_args = ['linux32', '/bin/bash'] if self.simulate_32_bit else ['/bin/bash']
subprocess.run(
[
@@ -41,6 +43,7 @@ def __enter__(self) -> 'DockerContainer':
'--name', self.name,
'-i',
'-v', '/:/host', # ignored on CircleCI
+ *cwd_args,
self.docker_image,
*shell_args
],
diff --git a/cibuildwheel/linux.py b/cibuildwheel/linux.py
--- a/cibuildwheel/linux.py
+++ b/cibuildwheel/linux.py
@@ -110,8 +110,8 @@ def build(options: BuildOptions) -> None:
continue
try:
- with DockerContainer(docker_image, simulate_32_bit=platform_tag.endswith('i686')) as docker:
- docker.copy_into(Path.cwd(), Path('/project'))
+ with DockerContainer(docker_image, simulate_32_bit=platform_tag.endswith('i686'), cwd='/project') as docker:
+ docker.copy_into(Path.cwd(), PurePath('/project'))
if options.before_all:
env = docker.get_environment()
| diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -17,6 +17,7 @@ jobs:
matrix:
os: [ubuntu-18.04, windows-latest, macos-latest]
python_version: ['3.7']
+ timeout-minutes: 180
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
diff --git a/test/test_before_all.py b/test/test_before_all.py
--- a/test/test_before_all.py
+++ b/test/test_before_all.py
@@ -1,4 +1,5 @@
-import os
+import pytest
+import subprocess
import textwrap
from . import utils
@@ -21,7 +22,7 @@ def test(tmp_path):
project_dir = tmp_path / 'project'
project_with_before_build_asserts.generate(project_dir)
- with open(os.path.join(project_dir, "text_info.txt"), mode='w') as ff:
+ with (project_dir / 'text_info.txt').open(mode='w') as ff:
print("dummy text", file=ff)
# build the wheels
@@ -33,6 +34,30 @@ def test(tmp_path):
})
# also check that we got the right wheels
- os.remove(os.path.join(project_dir, "text_info.txt"))
+ (project_dir / 'text_info.txt').unlink()
+ expected_wheels = utils.expected_wheels('spam', '0.1.0')
+ assert set(actual_wheels) == set(expected_wheels)
+
+
+def test_failing_command(tmp_path):
+ project_dir = tmp_path / 'project'
+ test_projects.new_c_project().generate(project_dir)
+
+ with pytest.raises(subprocess.CalledProcessError):
+ utils.cibuildwheel_run(project_dir, add_env={
+ 'CIBW_BEFORE_ALL': 'false',
+ 'CIBW_BEFORE_ALL_WINDOWS': 'exit /b 1',
+ })
+
+
+def test_cwd(tmp_path):
+ project_dir = tmp_path / 'project'
+ test_projects.new_c_project().generate(project_dir)
+
+ actual_wheels = utils.cibuildwheel_run(project_dir, add_env={
+ 'CIBW_BEFORE_ALL': f'''python -c "import os; assert os.getcwd() == {str(project_dir)!r}"''',
+ 'CIBW_BEFORE_ALL_LINUX': '''python -c "import os; assert os.getcwd() == '/project'"''',
+ })
+
expected_wheels = utils.expected_wheels('spam', '0.1.0')
assert set(actual_wheels) == set(expected_wheels)
diff --git a/test/test_before_build.py b/test/test_before_build.py
--- a/test/test_before_build.py
+++ b/test/test_before_build.py
@@ -1,3 +1,5 @@
+import pytest
+import subprocess
import textwrap
from . import utils
@@ -32,14 +34,41 @@ def test(tmp_path):
project_dir = tmp_path / 'project'
project_with_before_build_asserts.generate(project_dir)
+ before_build = ('''python -c "import sys; open('{output_dir}pythonversion.txt', 'w').write(sys.version)" && '''
+ '''python -c "import sys; open('{output_dir}pythonexecutable.txt', 'w').write(sys.executable)"''')
+
# build the wheels
actual_wheels = utils.cibuildwheel_run(project_dir, add_env={
# write python version information to a temporary file, this is
# checked in setup.py
- 'CIBW_BEFORE_BUILD': '''python -c "import sys; open('/tmp/pythonversion.txt', 'w').write(sys.version)" && python -c "import sys; open('/tmp/pythonexecutable.txt', 'w').write(sys.executable)"''',
- 'CIBW_BEFORE_BUILD_WINDOWS': '''python -c "import sys; open('c:\\pythonversion.txt', 'w').write(sys.version)" && python -c "import sys; open('c:\\pythonexecutable.txt', 'w').write(sys.executable)"''',
+ 'CIBW_BEFORE_BUILD': before_build.format(output_dir='/tmp/'),
+ 'CIBW_BEFORE_BUILD_WINDOWS': before_build.format(output_dir=r'c:\\'),
})
# also check that we got the right wheels
expected_wheels = utils.expected_wheels('spam', '0.1.0')
assert set(actual_wheels) == set(expected_wheels)
+
+
+def test_failing_command(tmp_path):
+ project_dir = tmp_path / 'project'
+ test_projects.new_c_project().generate(project_dir)
+
+ with pytest.raises(subprocess.CalledProcessError):
+ utils.cibuildwheel_run(project_dir, add_env={
+ 'CIBW_BEFORE_BUILD': 'false',
+ 'CIBW_BEFORE_BUILD_WINDOWS': 'exit /b 1',
+ })
+
+
+def test_cwd(tmp_path):
+ project_dir = tmp_path / 'project'
+ test_projects.new_c_project().generate(project_dir)
+
+ actual_wheels = utils.cibuildwheel_run(project_dir, add_env={
+ 'CIBW_BEFORE_BUILD': f'''python -c "import os; assert os.getcwd() == {str(project_dir)!r}"''',
+ 'CIBW_BEFORE_BUILD_LINUX': '''python -c "import os; assert os.getcwd() == '/project'"''',
+ })
+
+ expected_wheels = utils.expected_wheels('spam', '0.1.0')
+ assert set(actual_wheels) == set(expected_wheels)
diff --git a/unit_test/docker_container_test.py b/unit_test/docker_container_test.py
--- a/unit_test/docker_container_test.py
+++ b/unit_test/docker_container_test.py
@@ -41,6 +41,13 @@ def test_environment():
assert container.call(['sh', '-c', 'echo $TEST_VAR'], env={'TEST_VAR': '1'}, capture_output=True) == '1\n'
[email protected]
+def test_cwd():
+ with DockerContainer(DEFAULT_IMAGE, cwd='/cibuildwheel/working_directory') as container:
+ assert container.call(['pwd'], capture_output=True) == '/cibuildwheel/working_directory\n'
+ assert container.call(['pwd'], capture_output=True, cwd='/opt') == '/opt\n'
+
+
@pytest.mark.docker
def test_container_removed():
with DockerContainer(DEFAULT_IMAGE) as container:
| The "/project/" path isn't there anymore for Linux in version 1.5.3
Upgraded from 1.5.1 to 1.5.3 in a branch here: https://github.com/Chia-Network/bls-signatures/tree/up-cibuildwheel
In 1.5.1 we would install cmake into `/project/cmake-3.17.3-Linux-x86_64` and build various dependencies from there as we add `/project/cmake-3.17.3-Linux-x86_64` to the front of PATH:
https://github.com/Chia-Network/bls-signatures/runs/880546025?check_suite_focus=true#step:10:299
In 1.5.3 cmake gets installed into `//cmake-3.17.3-Linux-x86_64` so the PATH we preset doesn't work and the build fails.
https://github.com/Chia-Network/bls-signatures/runs/887691441?check_suite_focus=true#step:10:284
Operative workflow here: https://github.com/Chia-Network/bls-signatures/blob/up-cibuildwheel/.github/workflows/build.yml
I can work around but seems like either an issue that needs a fix or some mention in documentation. I'm sure it came out of the move to docker - which otherwise looks cool.
| Confirmed the same behavior with version 1.5.4 as well.
While you're at it: how easy is it to try 1.5.2? It would be nice to pinpoint the exact release :-)
Trivial - give me 5 minutes.
Thanks. I assume it's going to be #386, so that would mean 1.5.2 should still be OK.
Indeed 1.5.2 works just fine - https://github.com/Chia-Network/bls-signatures/runs/887756210?check_suite_focus=true#step:10:297
OK, then I think that the problem is that `CIBW_BEFORE_BUILD` etc isn't executed inside of `/project` or with `/project` as `cwd`. Could you check this, @joerick?
I guess that would mean that passing `--prefix=/project` would solve your problem, @hoffmang9? Running `pwd` would confirm whether that would work as workaround.
But this seems like a regression (or at least an unintended change), so let me see if I can make a PR for this.
Queued up to push was a quick `echo $PATH` and pwd for as soon as the link above completed.
Two interesting things.
First pwd is `/` so definitely not `/project` has it's historically been (or we've been luckily misusing...)
Second, our trick to find the cmake path -
```
CIBW_ENVIRONMENT_LINUX: "PATH=/project/cmake-3.17.3-Linux-`uname -m`/bin:$PATH"
```
no longer works in 1.5.4. Not critical but interesting.
> Second, our trick to find the cmake path -
>
> ```
> CIBW_ENVIRONMENT_LINUX: "PATH=/project/cmake-3.17.3-Linux-`uname -m`/bin:$PATH"
> ```
>
> no longer works in 1.5.4. Not critical but interesting.
Well, that's the same issue, or do you mean `x86_64` isn't interpolated anymore? Again, we just changed some things (#408 and #403), so that's also possible. Do you have a link, perhaps?
> OK, then I think that the problem is that CIBW_BEFORE_BUILD etc isn't executed inside of /project or with /project as pwd. Could you check this, @joerick?
Erm, wow, okay! That's a pretty big regression! My apologies. I can't deal with it now, and I probably won't have time tomorrow either, so I'm thinking the best thing might be to pull 1.5.4 and 1.5.3 from PyPI for now, until we can look at these regressions properly. Would that be okay with you @YannickJadoul ?
> Two interesting things.
>
>
>
> First pwd is `/` so definitely not `/project` has it's historically been (or we've been luckily misusing...)
>
No, cwd should be /project , as you had assumed :) that's a bug
>
>
> Second, our trick to find the cmake path -
>
> ```
>
> CIBW_ENVIRONMENT_LINUX: "PATH=/project/cmake-3.17.3-Linux-`uname -m`/bin:$PATH"
>
> ```
>
> no longer works in 1.5.4. Not critical but interesting.
Also sounds like a bug!
> Would that be okay with you @YannickJadoul ?
I'll leave version management up to you, actually. It's not thát broken, because we pass the absolute path to `pip wheel`. Not sure it's worth pulling things of PyPI for, but again... up to you.
I was just looking, and it should be as easy as adding a few times `cwd='/project'` (or similar) to the `docker.call` calls. And a few tests. If this is intended (just saw your next comment pop up, so it was intended), I can quickly make a PR, still?
> If this is intended (just saw your next comment pop up, so it was intended)
Also, `CIBW_BEFORE_ALL` wás not executed from `/project` before. Was that intended, or should it also be run from `/project` for consistency?
@hoffmang9 After quick look to code I see one comment: ` # don't build i686 targets, can't seem to find cmake for these` You can get cmake from https://pypi.org/project/cmake/#files Then You need to play a little with PATH but should work.
Second think is tat you should `CIBW_BEFORE_BUILD` for external dependences (like cmake).
@YannickJadoul @joerick looking on this project `{project}` substitution should also be applied to environment.
I think that this line contains bug
https://github.com/joerick/cibuildwheel/blob/c0cb43800b6740677e373889705b61b17b879a8d/cibuildwheel/docker_container.py#L119
It should be:
`chdir = f'cd {cwd}' if cwd else 'cd /project'`
FYI I've yanked those versions.
> I was just looking, and it should be as easy as adding a few times cwd='/project' (or similar) to the docker.call calls. And a few tests. If this is intended (just saw your next comment pop up, so it was intended), I can quickly make a PR, still?
Please do!
And BEFORE_ALL should be from /project, too.
> Second think is tat you should `CIBW_BEFORE_BUILD` for external dependences (like cmake).
He is doing that, I saw. `CIBW_BEFORE_ALL` could be nicer, even, doing it only once?
> I think that this line contains bug
>
> https://github.com/joerick/cibuildwheel/blob/c0cb43800b6740677e373889705b61b17b879a8d/cibuildwheel/docker_container.py#L119
No, this should be out of `docker_container.py`. Nothing else in there refers to the specific situation we use it for, so this should probably also be in `linux.py`.
> @hoffmang9 After quick look to code I see one comment: ` # don't build i686 targets, can't seem to find cmake for these` You can get cmake from https://pypi.org/project/cmake/#files Then You need to play a little with PATH but should work.
That comment is ancient and wrong. We only build on x86_64 for uint128 reasons...
> Second think is tat you should `CIBW_BEFORE_BUILD` for external dependences (like cmake).
Only linux needs cmake installed so we're using CIBW_BEFORE_BUILD_LINUX. Building before everything would blow up MacOS and Windows.
Now that does lead me to mention the strong desire to be able to cache a phase of builds before going on to cp38 - but that's an enhancement for another issue... Installing cmake and compiling gmp and sodium twice - is no fun...
@YannickJadoul @hoffmang9 My bad. Yes.
Instead using CIBW_BEFORE_BUILD_LINUX you should use CIBW_BEFORE_ALL_LINUX
The thanks I have for the suggestion goes way beyond just a heart emoji! | 2020-07-19T22:47:19 |
pypa/cibuildwheel | 420 | pypa__cibuildwheel-420 | [
"421"
] | 43abc3525904620565f026e79869efd47bdd5bf6 | diff --git a/cibuildwheel/windows.py b/cibuildwheel/windows.py
--- a/cibuildwheel/windows.py
+++ b/cibuildwheel/windows.py
@@ -177,8 +177,14 @@ def pep_518_cp35_workaround(package_dir: Path, env: Dict[str, str]) -> None:
if 'build-system' in data
else []
)
+
if requirements:
- shell(['pip', 'install'] + requirements, env=env)
+ with tempfile.TemporaryDirectory() as d:
+ reqfile = Path(d) / "requirements.txt"
+ with reqfile.open("w") as f:
+ for r in requirements:
+ print(r, file=f)
+ call(['pip', 'install', '-r', reqfile], env=env)
def build(options: BuildOptions) -> None:
| diff --git a/test/test_pep518.py b/test/test_pep518.py
--- a/test/test_pep518.py
+++ b/test/test_pep518.py
@@ -1,6 +1,7 @@
import textwrap
from . import test_projects
from . import utils
+import os
basic_project = test_projects.new_c_project(
setup_py_add=textwrap.dedent(
@@ -27,6 +28,7 @@
[build-system]
requires = [
"setuptools >= 42",
+ "setuptools_scm[toml]>=4.1.2",
"wheel",
"requests==2.22.0; python_version<'3.6'",
"requests==2.23.0; python_version>='3.6'"
@@ -47,3 +49,10 @@ def test_pep518(tmp_path):
# check that the expected wheels are produced
expected_wheels = utils.expected_wheels("spam", "0.1.0")
assert set(actual_wheels) == set(expected_wheels)
+
+ # These checks ensure an extra file is not created when using custom
+ # workaround; see https://github.com/joerick/cibuildwheel/issues/421
+ assert not (project_dir / "42").exists()
+ assert not (project_dir / "4.1.2").exists()
+
+ assert len(os.listdir(project_dir)) == len(basic_project.files)
| [BUG] Pyproject.toml workaround can make a file
The following pyproject.toml on Windows:
```toml
requires = [
"setuptools_scm[toml]>=4.1.2"
]
```
produces a file
```
4.1.2
```
which breaks the setuptools_scm versioning, since it's now a dirty directory. Illustrated in #420. I think it used to work originally, as I made a few releases after the Windows 3.5 patch initially went in.
This _only_ breaks if there is a bracketed requirement.
| 2020-08-14T00:51:18 |
|
pypa/cibuildwheel | 447 | pypa__cibuildwheel-447 | [
"443"
] | 97dd4ee1c06f3cf271a26c61a59305f7598a0fef | diff --git a/cibuildwheel/macos.py b/cibuildwheel/macos.py
--- a/cibuildwheel/macos.py
+++ b/cibuildwheel/macos.py
@@ -11,7 +11,7 @@
from .environment import ParsedEnvironment
from .util import (BuildOptions, BuildSelector, NonPlatformWheelError, download,
get_build_verbosity_extra_flags, get_pip_script,
- prepare_command)
+ prepare_command, install_certifi_script)
def call(args: Union[str, Sequence[Union[str, PathLike]]], env: Optional[Dict[str, str]] = None, cwd: Optional[str] = None, shell: bool = False) -> int:
@@ -72,6 +72,9 @@ def install_cpython(version: str, url: str) -> Path:
# if this version of python isn't installed, get it from python.org and install
python_package_identifier = f'org.python.Python.PythonFramework-{version}'
+ python_executable = 'python3' if version[0] == '3' else 'python'
+ installation_bin_path = Path(f'/Library/Frameworks/Python.framework/Versions/{version}/bin')
+
if python_package_identifier not in installed_system_packages:
# download the pkg
download(url, Path('/tmp/Python.pkg'))
@@ -83,8 +86,8 @@ def install_cpython(version: str, url: str) -> Path:
download(open_ssl_patch_url, Path('/tmp/python-patch.tar.gz'))
call(['sudo', 'tar', '-C', f'/Library/Frameworks/Python.framework/Versions/{version}/', '-xmf', '/tmp/python-patch.tar.gz'])
- installation_bin_path = Path(f'/Library/Frameworks/Python.framework/Versions/{version}/bin')
- python_executable = 'python3' if version[0] == '3' else 'python'
+ call(["sudo", str(installation_bin_path/python_executable), str(install_certifi_script)])
+
pip_executable = 'pip3' if version[0] == '3' else 'pip'
make_symlinks(installation_bin_path, python_executable, pip_executable)
diff --git a/cibuildwheel/resources/install_certifi.py b/cibuildwheel/resources/install_certifi.py
new file mode 100644
--- /dev/null
+++ b/cibuildwheel/resources/install_certifi.py
@@ -0,0 +1,52 @@
+# Based on: https://github.com/python/cpython/blob/master/Mac/BuildScript/resources/install_certificates.command
+
+# install_certifi.py
+#
+# sample script to install or update a set of default Root Certificates
+# for the ssl module. Uses the certificates provided by the certifi package:
+# https://pypi.org/project/certifi/
+
+import os
+import os.path
+import ssl
+import stat
+import subprocess
+import sys
+
+STAT_0o775 = (stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
+ | stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP
+ | stat.S_IROTH | stat.S_IXOTH)
+
+if sys.version_info[0] == 2:
+ FileNotFoundError = OSError
+
+
+def main():
+ openssl_dir, openssl_cafile = os.path.split(
+ ssl.get_default_verify_paths().openssl_cafile)
+ print(" -- pip install --upgrade certifi")
+ subprocess.check_call([sys.executable,
+ "-E", "-s", "-m", "pip", "install", "--upgrade", "certifi"])
+
+ import certifi
+ # change working directory to the default SSL directory
+ if sys.version_info[0:2] == (3, 5):
+ os.makedirs(openssl_dir, exist_ok=True, mode=0o775)
+ os.chdir(openssl_dir)
+ relpath_to_certifi_cafile = os.path.relpath(certifi.where())
+
+ print(" -- removing any existing file or link")
+ try:
+ os.remove(openssl_cafile)
+ except FileNotFoundError:
+ pass
+ print(" -- creating symlink to certifi certificate bundle")
+ os.symlink(relpath_to_certifi_cafile, openssl_cafile)
+
+ print(" -- setting permissions")
+ os.chmod(openssl_cafile, STAT_0o775)
+ print(" -- update complete")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/cibuildwheel/util.py b/cibuildwheel/util.py
--- a/cibuildwheel/util.py
+++ b/cibuildwheel/util.py
@@ -127,6 +127,7 @@ class BuildOptions(NamedTuple):
resources_dir = Path(__file__).resolve().parent / 'resources'
get_pip_script = resources_dir / 'get-pip.py'
+install_certifi_script = resources_dir / "install_certifi.py"
class NonPlatformWheelError(Exception):
| diff --git a/test/test_ssl.py b/test/test_ssl.py
--- a/test/test_ssl.py
+++ b/test/test_ssl.py
@@ -13,11 +13,10 @@
else:
from urllib.request import urlopen
- if sys.version_info[0:2] == (3, 3):
- data = urlopen("https://www.nist.gov")
- else:
- context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
- data = urlopen("https://www.nist.gov", context=context)
+ context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
+ data = urlopen("https://www.nist.gov", context=context)
+ data = urlopen("https://raw.githubusercontent.com/joerick/cibuildwheel/master/CI.md", context=context)
+ data = urlopen("https://raw.githubusercontent.com/joerick/cibuildwheel/master/CI.md")
''')
)
| SSL issues with OSX on Azure
A contributor to my project recently enabled a test that attempts to download a resource using HTTPS. It seems that running these tests on Azure fails for the OSX environments we have with errors like:
```
(<urlopen error [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:841)>)
```
Which leads to issues on github like https://github.com/scikit-learn/scikit-learn/issues/10201#issuecomment-365734582. It seems the "proper" solution is to properly install the SSL certificates that come with the python installation: https://bugs.python.org/issue29480 which can apparently be fixed with:
```
sudo /Applications/Python\ 3.6/Install\ Certificates.command
```
I'm not really familiar with how cibuildwheel is using the base image from the build environment, but is this something I should be able to control in my Azure environments? Is this something that can and should be fixed in cibuildwheel? Thanks for any help.
I see there are issues already related to SSL stuff, but they all seem a little different than this or say that this should already be fixed. I must be missing something.
| > A contributor to my project recently enabled a test that attempts to download a resource using HTTPS. It seems that running these tests on Azure fails for the OSX environments we have with errors like:
Could you provide information about cibuildwheel version, python version (running test)? Which macos version?
Yes, sorry. Of course.
Here is the full azure config if necessary: https://github.com/vispy/vispy/blob/master/azure-pipelines.yml
The two OSX systems are both OSX 10.14, but one uses the default build target while the other does `MACOSX_DEPLOYMENT_TARGET="10.14"` before calling cibuildwheel.
Our currently build skips are:
```
CIBW_SKIP: "cp27-* cp34-* cp35-* cp39-* pp27-* pp36-*"
```
So python 3.6, 3.7, and 3.8 should be built.
Edit: And this should be using the latest cibuildwheel release (1.6.1).
As project is public. Could You provide link to failing build?
Let me know if you can see these: https://dev.azure.com/vispy/vispy/_build/results?buildId=574&view=results
*Ping @mayeut, who's been patching SSL in older Python versions for cibuildwheel before.*
@djhoese I see it.
Could You ignore `cp36` and check if everything pass?
It looks like outdated certificate center.
Maybe wee need to add below call to install python on macos
```
sudo /Applications/Python\ 3.6/Install\ Certificates.command
```
Did we know any MacPython expert who could tell us for which version of python this should be called?
I could, but I had our internal pytest test get skipped for python 3.6 and it then failed for python 3.7. My understanding based on the python issue mentioned above is that this is the intended way for Python 3+ to work now with regards to certificate installation on OSX.
Here's what I get for a list of CPython versions:
```
find /Applications/Python* -name 'Inst*'
/Applications/Python 2.7/Install Certificates.command
/Applications/Python 3.6/Install Certificates.command
/Applications/Python 3.7/Install Certificates.command
/Applications/Python 3.8/Install Certificates.command
/Applications/Python 3.9/Install Certificates.command
```
Only Python 3.5 doesn't come with that script but probably should (since it reports location of cert file like the others):
```
Matt$ /Library/Frameworks/Python.framework/Versions/3.5/bin/python3
Python 3.5.4 (v3.5.4:3f56838976, Aug 7 2017, 12:56:33)
[GCC 4.2.1 (Apple Inc. build 5666) (dot 3)] on darwin
Type "help", "copyright", "credits" or "license" for more information.
>>> import ssl
>>> ssl.get_default_verify_paths().openssl_cafile
'/Library/Frameworks/Python.framework/Versions/3.5/etc/openssl/cert.pem'
>>> ^D
Matt$ /Library/Frameworks/Python.framework/Versions/3.6/bin/python3
Python 3.6.5 (v3.6.5:f59c0932b4, Mar 28 2018, 03:03:55)
[GCC 4.2.1 (Apple Inc. build 5666) (dot 3)] on darwin
Type "help", "copyright", "credits" or "license" for more information.
>>> import ssl
>>> ssl.get_default_verify_paths().openssl_cafile
'/Library/Frameworks/Python.framework/Versions/3.6/etc/openssl/cert.pem'
>>> ^D
Matt$ /Library/Frameworks/Python.framework/Versions/3.7/bin/python3
Python 3.7.5 (v3.7.5:5c02a39a0b, Oct 14 2019, 19:07:34)
[GCC 4.2.1 (Apple Inc. build 5666) (dot 3)] on darwin
Type "help", "copyright", "credits" or "license" for more information.
>>> import ssl
>>> ssl.get_default_verify_paths().openssl_cafile
'/Library/Frameworks/Python.framework/Versions/3.7/etc/openssl/cert.pem'
>>> ^D
Matt$ /Library/Frameworks/Python.framework/Versions/3.8/bin/python3
Python 3.8.2 (v3.8.2:7b3ab5921f, Feb 24 2020, 17:52:18)
[Clang 6.0 (clang-600.0.57)] on darwin
Type "help", "copyright", "credits" or "license" for more information.
>>> import ssl
>>> ssl.get_default_verify_paths().openssl_cafile
'/Library/Frameworks/Python.framework/Versions/3.8/etc/openssl/cert.pem'
>>> ^D
Matt$ /Library/Frameworks/Python.framework/Versions/3.9/bin/python3
Python 3.9.0 (v3.9.0:9cf6752276, Oct 5 2020, 11:29:23)
[Clang 6.0 (clang-600.0.57)] on darwin
Type "help", "copyright", "credits" or "license" for more information.
>>> import ssl
>>> ssl.get_default_verify_paths().openssl_cafile
'/Library/Frameworks/Python.framework/Versions/3.9/etc/openssl/cert.pem'
>>> ^D
Matt$ /Library/Frameworks/Python.framework/Versions/2.7/bin/python
Python 2.7.17 (v2.7.17:c2f86d86e6, Oct 19 2019, 16:07:15)
[GCC 4.2.1 Compatible Apple LLVM 6.0 (clang-600.0.57)] on darwin
Type "help", "copyright", "credits" or "license" for more information.
>>> import ssl
>>> ssl.get_default_verify_paths().openssl_cafile
'/Library/Frameworks/Python.framework/Versions/2.7/etc/openssl/cert.pem'
>>> ^D
```
The script does the same thing for all versions, install/upgrade `certifi`, overwrite the `ssl.get_default_verify_paths().openssl_cafile` by a symlink to `certifi.where()` | 2020-10-08T21:48:31 |
pypa/cibuildwheel | 455 | pypa__cibuildwheel-455 | [
"452"
] | 1e164977767dcd3f532555fb7c30f0d1197a1295 | diff --git a/cibuildwheel/util.py b/cibuildwheel/util.py
--- a/cibuildwheel/util.py
+++ b/cibuildwheel/util.py
@@ -1,6 +1,8 @@
import os
import textwrap
+import certifi
import urllib.request
+import ssl
from fnmatch import fnmatch
from pathlib import Path
from time import sleep
@@ -66,10 +68,14 @@ def download(url: str, dest: Path) -> None:
if not dest_dir.exists():
dest_dir.mkdir(parents=True)
+ # we've had issues when relying on the host OS' CA certificates on Windows,
+ # so we use certifi (this sounds odd but requests also does this by default)
+ cafile = os.environ.get('SSL_CERT_FILE', certifi.where())
+ context = ssl.create_default_context(cafile=cafile)
repeat_num = 3
for i in range(repeat_num):
try:
- response = urllib.request.urlopen(url)
+ response = urllib.request.urlopen(url, context=context)
except Exception:
if i == repeat_num - 1:
raise
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -12,7 +12,7 @@
setup(
name='cibuildwheel',
version='1.6.3',
- install_requires=['bashlex!=0.13', 'toml'],
+ install_requires=['bashlex!=0.13', 'toml', 'certifi'],
description="Build Python wheels on CI with minimal configuration.",
long_description=long_description,
long_description_content_type='text/markdown',
| diff --git a/unit_test/download_test.py b/unit_test/download_test.py
new file mode 100644
--- /dev/null
+++ b/unit_test/download_test.py
@@ -0,0 +1,31 @@
+import certifi
+import pytest
+import ssl
+
+from cibuildwheel.util import download
+
+
+DOWNLOAD_URL = 'https://raw.githubusercontent.com/joerick/cibuildwheel/v1.6.3/requirements-dev.txt'
+
+
+def test_download(monkeypatch, tmp_path):
+ monkeypatch.delenv('SSL_CERT_FILE', raising=False)
+ dest = tmp_path / 'file.txt'
+ download(DOWNLOAD_URL, dest)
+ assert len(dest.read_bytes()) == 134
+
+
+def test_download_good_ssl_cert_file(monkeypatch, tmp_path):
+ monkeypatch.setenv('SSL_CERT_FILE', certifi.where())
+ dest = tmp_path / 'file.txt'
+ download(DOWNLOAD_URL, dest)
+ assert len(dest.read_bytes()) == 134
+
+
+def test_download_bad_ssl_cert_file(monkeypatch, tmp_path):
+ bad_cafile = tmp_path / 'ca.pem'
+ bad_cafile.write_text('bad certificates')
+ monkeypatch.setenv('SSL_CERT_FILE', str(bad_cafile))
+ dest = tmp_path / 'file.txt'
+ with pytest.raises(ssl.SSLError):
+ download(DOWNLOAD_URL, dest)
| Windows: urlopen error [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed
Hello,
Can you, please, help me to debug https://travis-ci.org/github/ets-labs/python-dependency-injector/jobs/739050288?
It occurs only on Windows. Error says: ``urllib.error.URLError: <urlopen error [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1108)>``. I'm unclear whos certificate has expired.
Best,
Roman
| Please bump cibuildwheel to the most recent version. it should be fixed in #447
Got it, will try. Thank you.
I'm currently seeing this as well on travis ci windows builds only, already running the latest version (1.6.3). I did a release yesterday and things worked fine, so somethings changed in the last day (likely the certs on the windows box).
I've worked around this by installing `certifi` and configuring the build to use certifi's certs, see here:
https://github.com/jcrist/quickle/pull/58/files#diff-6ac3f79fc25d95cd1e3d51da53a4b21b939437392578a35ae8cd6d5366ca5485R31
@jcrist , cool, thanks for sharing.
oh. So it is outdated certif on this python
Could you try to bump python version in travis file from 3.8.0 to 3.8.2
```
- choco install python --version 3.8.2
```
@jcrist @rmk135?
I'm already using 3.8.6 (in that PR), which didn't fix things. Installing certifi and forcing the build to use certifi certs did fix things.
starnge
@jcrist , did you face such issue on windows ``The command "python3 -m pip install certifi cibuildwheel==1.6.3" failed and exited with 127 during .``?
The reason looks strange:
```
Your build has been stopped.
/c/Users/travis/.travis/functions: line 109: python3: command not found
```
https://travis-ci.com/github/ets-labs/python-dependency-injector/jobs/411227719
I have it after tried your solution.
PS: I have recently migrated from `travis-ci.org` to `travis-ci.com`.
Fixed `python3: command not found` by adding a line from README into `before_install`:
```
- ln -s /c/Python38/python.exe /c/Python38/python3.exe
```
Seems that wasn't needed when I initially configured windows builds.
Thanks everybody for the help and special thanks to this lib creators and contributors for the great tool.
I just spotted this in an unrelated PR, this time on Appveyor:
https://ci.appveyor.com/project/joerick/cibuildwheel/builds/36032538/job/n4duoutlja3nbqit
It's happening in the host process, not in a version of Python installed by cibuildwheel. But still on that initial nuget download.
Perhaps cibuildwheel should require `certifi` as a dependency and explicitly use certifi's certs in e.g. `download` instead of relying on the system certs? `requests` does this, and it seems to work well: https://requests.readthedocs.io/en/master/user/advanced/#ca-certificates.
It looks rather like outdated windows images from CI provider. There is possibility to change certificate source after scripts run? | 2020-10-29T22:19:39 |
pypa/cibuildwheel | 553 | pypa__cibuildwheel-553 | [
"551"
] | eb700a164c69b17efa2698f14e1592ae5a615b9d | diff --git a/cibuildwheel/__main__.py b/cibuildwheel/__main__.py
--- a/cibuildwheel/__main__.py
+++ b/cibuildwheel/__main__.py
@@ -80,7 +80,7 @@ def main() -> None:
on this machine. Set this option to build an architecture
via emulation, for example, using binfmt_misc and QEMU.
Default: auto.
- Choices: auto, native, all, {}
+ Choices: auto, auto64, auto32, native, all, {}
'''.format(", ".join(a.name for a in Architecture)))
parser.add_argument('--output-dir',
default=os.environ.get('CIBW_OUTPUT_DIR', 'wheelhouse'),
diff --git a/cibuildwheel/architecture.py b/cibuildwheel/architecture.py
--- a/cibuildwheel/architecture.py
+++ b/cibuildwheel/architecture.py
@@ -4,7 +4,7 @@
from enum import Enum
from typing import Set
-from .typing import PlatformName, assert_never
+from .typing import Literal, PlatformName, assert_never
PRETTY_NAMES = {'linux': 'Linux', 'macos': 'macOS', 'windows': 'Windows'}
@@ -44,6 +44,10 @@ def parse_config(config: str, platform: PlatformName) -> 'Set[Architecture]':
result.add(Architecture(platform_module.machine()))
elif arch_str == 'all':
result |= Architecture.all_archs(platform=platform)
+ elif arch_str == 'auto64':
+ result |= Architecture.bitness_archs(platform=platform, bitness="64")
+ elif arch_str == 'auto32':
+ result |= Architecture.bitness_archs(platform=platform, bitness="32")
else:
result.add(Architecture(arch_str))
return result
@@ -77,6 +81,18 @@ def all_archs(platform: PlatformName) -> 'Set[Architecture]':
else:
assert_never(platform)
+ @staticmethod
+ def bitness_archs(platform: PlatformName, bitness: Literal['64', '32']) -> 'Set[Architecture]':
+ archs_32 = {Architecture.i686, Architecture.x86}
+ auto_archs = Architecture.auto_archs(platform)
+
+ if bitness == '64':
+ return auto_archs - archs_32
+ elif bitness == '32':
+ return auto_archs & archs_32
+ else:
+ assert_never(bitness)
+
def allowed_architectures_check(
platform: PlatformName,
| diff --git a/unit_test/main_tests/main_platform_test.py b/unit_test/main_tests/main_platform_test.py
--- a/unit_test/main_tests/main_platform_test.py
+++ b/unit_test/main_tests/main_platform_test.py
@@ -131,6 +131,39 @@ def test_archs_platform_native(platform, intercepted_build_args, monkeypatch):
assert build_options.architectures == {Architecture.x86_64}
+def test_archs_platform_auto64(platform, intercepted_build_args, monkeypatch):
+ monkeypatch.setenv('CIBW_ARCHS', 'auto64')
+
+ main()
+ build_options = intercepted_build_args.args[0]
+
+ if platform == 'linux':
+ assert build_options.architectures == {Architecture.x86_64}
+ elif platform == 'windows':
+ assert build_options.architectures == {Architecture.AMD64}
+ elif platform == 'macos':
+ assert build_options.architectures == {Architecture.x86_64}
+
+
+def test_archs_platform_auto32(platform, intercepted_build_args, monkeypatch):
+ monkeypatch.setenv('CIBW_ARCHS', 'auto32')
+
+ if platform == 'macos':
+ with pytest.raises(SystemExit) as exit:
+ main()
+ assert exit.value.args == (4,)
+
+ else:
+ main()
+
+ build_options = intercepted_build_args.args[0]
+
+ if platform == 'linux':
+ assert build_options.architectures == {Architecture.i686}
+ elif platform == 'windows':
+ assert build_options.architectures == {Architecture.x86}
+
+
def test_archs_platform_all(platform, intercepted_build_args, monkeypatch):
monkeypatch.setenv('CIBW_ARCHS', 'all')
| Idea: Arch 32/64 shortcut
When looking at the build matrix here:
https://github.com/deepcharles/ruptures/blob/404957563c0391f073200899b9b041caa9277cc6/.github/workflows/upload-to-pypi.yml#L40-L80
It seems we could have shortcut values for `ARCHS`, "32" and "64", that would select 32 bit and 64 bit native architectures. So on 64 bit Windows or Linux, you could select 32 or 64. On macOS, you could only select 64, 32 would be empty (so an error after #545 unless `--allow-empty` was passed) or we could just make it an error. Same thing for 64 on 32bit OS's. On alternate archs, these remain the "native" on that arch 32/64 values.
I believe @mayeut wanted something like this too? Don't have the issue reference handy for it.
This would take a lot of pressure off the build selector. ARCHS, like BUILD/SKIP, already helps define the build process.
<details><summary>Old version</summary>
```yaml
matrix:
os: [ubuntu-latest, windows-latest, macos-latest]
bitness: [32, 64]
python: [36, 37, 38, 39]
include:
# Run 32 and 64 bits version in parallel for Linux and Windows
- os: windows-latest
bitness: 64
platform_id: win_amd64
- os: windows-latest
bitness: 32
platform_id: win32
- os: ubuntu-latest
bitness: 64
platform_id: manylinux_x86_64
- os: ubuntu-latest
bitness: 32
platform_id: manylinux_i686
- os: macos-latest
bitness: 64
platform_id: macosx_x86_64
exclude:
- os: macos-latest
bitness: 32
- os: windows-latest
python: 39
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 0
- uses: actions/setup-python@v2
name: Install Python
with:
python-version: '3.8'
- name: Install cibuildwheel
run: |
python -m pip install --upgrade pip
python -m pip install cibuildwheel
- name: Build wheels
env:
CIBW_BUILD: cp${{ matrix.python }}-${{ matrix.platform_id }}
run: python -m cibuildwheel --output-dir wheelhouse
```
</details>
<details><summary>New version</summary>
```yaml
matrix:
os: [ubuntu-latest, windows-latest, macos-latest]
bitness: [32, 64]
python: [36, 37, 38, 39]
exclude:
- os: macos-latest
bitness: 32
- os: windows-latest
python: 39
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Build wheels
env:
CIBW_BUILD: cp${{ matrix.python }}-*
run: pipx run cibuildwheel==1.80 --output-dir wheelhouse --archs ${{ matrix.bitness }}
```
</details>
Thoughts? Hopefully @deepcharles doesn't mind me using his yaml as an example. :) Also used pipx for the "new" version, because why not, it's preinstalled. :)
| I didn't know you could do a partial exclude like that, by the way, nice. :)
all credit goes to @oboulant on that one
Maybe `native64`/`native32`, then? Just `32` or `64` seems a bit vague.
But then we have the weird issue that 32 isn't considered `native` on a 64-bit version of Linux, for example?
I don't think `--archs 32` or `CIBW_ARCHS: "32"` is at all vague when you are reading a config. Though I thought about suggesting `native32` or `native_32`, something like that. I guess we could do `auto32`, to indicate that it's the same as auto but only the 32 bit part, but I think just `32` is fine. Though `auto32`/`auto64` doesn't look that bad...
`32bit`/`64bit`, possibly?
`32`/`64` is my favorite, with `auto32`/`auto64` a very close second. | 2021-01-21T17:57:16 |
pypa/cibuildwheel | 570 | pypa__cibuildwheel-570 | [
"566"
] | 70d9e1d9699d24c99926ab6586869a92452991a7 | diff --git a/cibuildwheel/__main__.py b/cibuildwheel/__main__.py
--- a/cibuildwheel/__main__.py
+++ b/cibuildwheel/__main__.py
@@ -12,6 +12,7 @@
import cibuildwheel
import cibuildwheel.linux
import cibuildwheel.macos
+import cibuildwheel.util
import cibuildwheel.windows
from cibuildwheel.architecture import Architecture, allowed_architectures_check
from cibuildwheel.environment import EnvironmentParseError, parse_environment
@@ -281,14 +282,15 @@ def main() -> None:
if not output_dir.exists():
output_dir.mkdir(parents=True)
- if platform == 'linux':
- cibuildwheel.linux.build(build_options)
- elif platform == 'windows':
- cibuildwheel.windows.build(build_options)
- elif platform == 'macos':
- cibuildwheel.macos.build(build_options)
- else:
- assert_never(platform)
+ with cibuildwheel.util.print_new_wheels("{n} wheels produced in {m:.0f} minutes:", output_dir):
+ if platform == 'linux':
+ cibuildwheel.linux.build(build_options)
+ elif platform == 'windows':
+ cibuildwheel.windows.build(build_options)
+ elif platform == 'macos':
+ cibuildwheel.macos.build(build_options)
+ else:
+ assert_never(platform)
def detect_obsolete_options() -> None:
diff --git a/cibuildwheel/util.py b/cibuildwheel/util.py
--- a/cibuildwheel/util.py
+++ b/cibuildwheel/util.py
@@ -1,14 +1,16 @@
+import contextlib
import fnmatch
import itertools
import os
import re
import ssl
import textwrap
+import time
import urllib.request
from enum import Enum
from pathlib import Path
from time import sleep
-from typing import Dict, List, NamedTuple, Optional, Set
+from typing import Dict, Iterator, List, NamedTuple, Optional, Set
import bracex
import certifi
@@ -251,3 +253,23 @@ def unwrap(text: str) -> str:
text = text.strip()
# remove consecutive whitespace
return re.sub(r'\s+', ' ', text)
+
+
[email protected]
+def print_new_wheels(msg: str, output_dir: Path) -> Iterator[None]:
+ '''
+ Prints the new items in a directory upon exiting. The message to display
+ can include {n} for number of wheels, {s} for total number of seconds,
+ and/or {m} for total number of minutes. Does not print anything if this
+ exits via exception.
+ '''
+
+ start_time = time.time()
+ existing_contents = set(output_dir.iterdir())
+ yield
+ final_contents = set(output_dir.iterdir())
+ new_contents = final_contents - existing_contents
+ n = len(new_contents)
+ s = time.time() - start_time
+ m = s / 60
+ print(msg.format(n=n, s=s, m=m), *sorted(f" {f.name}" for f in new_contents), sep="\n")
| diff --git a/unit_test/main_tests/conftest.py b/unit_test/main_tests/conftest.py
--- a/unit_test/main_tests/conftest.py
+++ b/unit_test/main_tests/conftest.py
@@ -1,3 +1,4 @@
+import contextlib
import platform as platform_module
import subprocess
import sys
@@ -58,6 +59,15 @@ def mock_path_exists(path):
return args
[email protected](autouse=True)
+def disable_print_wheels(monkeypatch):
+ @contextlib.contextmanager
+ def empty_cm(*args, **kwargs):
+ yield
+
+ monkeypatch.setattr(util, 'print_new_wheels', empty_cm)
+
+
@pytest.fixture
def allow_empty(request, monkeypatch, fake_package_dir):
monkeypatch.setattr(sys, 'argv', fake_package_dir + ['--allow-empty'])
diff --git a/unit_test/test_wheel_print.py b/unit_test/test_wheel_print.py
new file mode 100644
--- /dev/null
+++ b/unit_test/test_wheel_print.py
@@ -0,0 +1,35 @@
+import pytest
+
+from cibuildwheel.util import print_new_wheels
+
+
+def test_printout_wheels(tmp_path, capsys):
+ tmp_path.joinpath("example.0").touch()
+ with print_new_wheels("TEST_MSG: {n}", tmp_path):
+ tmp_path.joinpath("example.1").touch()
+ tmp_path.joinpath("example.2").touch()
+
+ captured = capsys.readouterr()
+ assert captured.err == ""
+
+ assert "example.0" not in captured.out
+ assert "example.1\n" in captured.out
+ assert "example.2\n" in captured.out
+ assert "TEST_MSG:" in captured.out
+ assert "TEST_MSG: 2\n" in captured.out
+
+
+def test_no_printout_on_error(tmp_path, capsys):
+ tmp_path.joinpath("example.0").touch()
+ with pytest.raises(RuntimeError):
+ with print_new_wheels("TEST_MSG: {n}", tmp_path):
+ tmp_path.joinpath("example.1").touch()
+ raise RuntimeError()
+
+ captured = capsys.readouterr()
+ assert captured.err == ""
+
+ assert "example.0" not in captured.out
+ assert "example.1" not in captured.out
+ assert "example.2" not in captured.out
+ assert "TEST_MSG:" not in captured.out
| Tiny feat: Add printout of produced wheels at end of logs
One thing I have in common for all my uses of cibuildwheel is:
```yaml
- uses: joerick/[email protected]
- name: Show files
run: ls -lh wheelhouse
shell: bash
```
It might be a nice to list the built wheel names at the end of the logs, so it's easy to tell what's been produced. (If actions/upload-artifact printed the name of the files it was uploading, this wouldn't be so important, but it doesn't)
| Yeah, good idea! | 2021-02-01T02:34:16 |
pypa/cibuildwheel | 590 | pypa__cibuildwheel-590 | [
"563"
] | 08edf62e7941f2400935a60aa3c77d72847b7f2a | diff --git a/cibuildwheel/macos.py b/cibuildwheel/macos.py
--- a/cibuildwheel/macos.py
+++ b/cibuildwheel/macos.py
@@ -288,6 +288,7 @@ def build(options: BuildOptions) -> None:
if options.before_all:
log.step('Running before_all...')
env = options.environment.as_dictionary(prev_environment=os.environ)
+ env.setdefault('MACOSX_DEPLOYMENT_TARGET', '10.9')
before_all_prepared = prepare_command(options.before_all, project='.', package=options.package_dir)
call([before_all_prepared], shell=True, env=env)
| set MACOSX_DEPLOYMENT_TARGET for all stages?
I recently adopted cibuildwheel (it is wonderful, thank you!), and made a broken release on older macs. The mistake I made was failing to set MACOSX_DEPLOYMENT_TARGET for the CIBW_BEFORE_ALL stage. I was surprised to see that CIBW has the feature to set MACOSX_DEPLOYMENT_TARGET, but doesn't do it for all stages.
#306 sets MACOSX_DEPLOYMENT_TARGET for python commands, but I was building a library dependency in CIBW_BEFORE_ALL.
My wheels are roughly:
- CIBW_BEFORE_ALL compiles and installs a library
- build my wheel that links the library
- repair (auditwheel/delocate/delvewheel) bundles the library in the wheel
So my question is: should I have built the library in a different stage, or does it make sense to set the default MACOSX_DEPLOYMENT_TARGET for all commands, not just the python environment?
Fixing it was easy enough (set MACOSX_DEPLOYMENT_TARGET in global env)
| Ah, I see. Yes the BEFORE_ALL commands on macOS and Windows run outside of a cibuildwheel Python environment, so we haven't set our environment variables yet.
Your workflow sounds sensible to me. (The only snag I can think of will be that once `universal2` is supported (#484), you'll have to build your shared lib as universal2, also. But I think delocate should be able to handle it)
I'm not sure that we've built-in the assumption that BEFORE_ALL is for _building_ dependencies exactly, but, it does seem pretty harmless to add `env.setdefault('MACOSX_DEPLOYMENT_TARGET', '10.9')` there. The other question we could consider, is 'should we run the macos BEFORE_ALL inside one of our Python environments, rather than in the system environment?' We do that on Linux, but that was only because the system Python on `manylinux `was 2.6 or something ancient.
But it's probably easier to do the simple thing - adding `env.setdefault('MACOSX_DEPLOYMENT_TARGET', '10.9')` before calling BEFORE_ALL.
`env.setdefault('MACOSX_DEPLOYMENT_TARGET', '10.9')` would still provide strange behavior if someone set `CIBW_ENVIRONMENT: MACOSX_DEPLOYMENT_TARGET=10.14` (say, for C++17 support)? Seems like the correct thing would be to match Linux on Windows/macOS and make the environment affect all steps, since it already does that on Linux? We need to be careful not to override `MACOSX_DEPLOYMENT_TARGET` - most projects _should_ be setting it, really, for constancy and control, but many don't. | 2021-02-11T04:39:30 |
|
pypa/cibuildwheel | 661 | pypa__cibuildwheel-661 | [
"646"
] | d2529cfaf2c78d5fc2f157cea04bdb0bf58cb2c1 | diff --git a/bin/update_dependencies.py b/bin/update_dependencies.py
--- a/bin/update_dependencies.py
+++ b/bin/update_dependencies.py
@@ -1,141 +1,43 @@
#!/usr/bin/env python3
-from __future__ import annotations
+# This file supports 3.6+
-import configparser
import os
import shutil
import subprocess
import sys
-from typing import NamedTuple
+from pathlib import Path
-import requests
+DIR = Path(__file__).parent.resolve()
+RESOURCES = DIR.parent / "cibuildwheel/resources"
-os.chdir(os.path.dirname(__file__))
-os.chdir("..")
+python_version = "".join(str(v) for v in sys.version_info[:2])
+
+env = os.environ.copy()
# CUSTOM_COMPILE_COMMAND is a pip-compile option that tells users how to
# regenerate the constraints files
-os.environ["CUSTOM_COMPILE_COMMAND"] = "bin/update_dependencies.py"
-
-PYTHON_VERSIONS = ["36", "37", "38", "39"]
-
-if "--no-docker" in sys.argv:
- for python_version in PYTHON_VERSIONS:
- subprocess.run(
- [
- f"./env{python_version}/bin/pip-compile",
- "--allow-unsafe",
- "--upgrade",
- "cibuildwheel/resources/constraints.in",
- f"--output-file=cibuildwheel/resources/constraints-python{python_version}.txt",
- ],
- check=True,
- )
-else:
- image_runner = "quay.io/pypa/manylinux2010_x86_64:latest"
- subprocess.run(["docker", "pull", image_runner], check=True)
- for python_version in PYTHON_VERSIONS:
- abi_flags = "" if int(python_version) >= 38 else "m"
- python_path = f"/opt/python/cp{python_version}-cp{python_version}{abi_flags}/bin/"
- command = (
- f"{python_path}pip install pip-tools && "
- f"{python_path}pip-compile --allow-unsafe --upgrade "
- "cibuildwheel/resources/constraints.in "
- f"--output-file cibuildwheel/resources/constraints-python{python_version}.txt"
- )
- subprocess.run(
- [
- "docker",
- "run",
- "--rm",
- "--env=CUSTOM_COMPILE_COMMAND",
- f"--volume={os.getcwd()}:/volume",
- "--workdir=/volume",
- image_runner,
- "bash",
- "-c",
- command,
- ],
- check=True,
- )
-
-# default constraints.txt
-shutil.copyfile(
- f"cibuildwheel/resources/constraints-python{PYTHON_VERSIONS[-1]}.txt",
- "cibuildwheel/resources/constraints.txt",
+env["CUSTOM_COMPILE_COMMAND"] = "bin/update_dependencies.py"
+
+if python_version == "36":
+ # Bug with click and Python 3.6
+ env["LC_ALL"] = "C.UTF-8"
+ env["LANG"] = "C.UTF-8"
+
+subprocess.run(
+ [
+ "pip-compile",
+ "--allow-unsafe",
+ "--upgrade",
+ f"{RESOURCES}/constraints.in",
+ f"--output-file={RESOURCES}/constraints-python{python_version}.txt",
+ ],
+ check=True,
+ env=env,
)
-
-class Image(NamedTuple):
- manylinux_version: str
- platform: str
- image_name: str
- tag: str | None
-
-
-images = [
- Image("manylinux1", "x86_64", "quay.io/pypa/manylinux1_x86_64", None),
- Image("manylinux1", "i686", "quay.io/pypa/manylinux1_i686", None),
- # 2010 images
- Image("manylinux2010", "x86_64", "quay.io/pypa/manylinux2010_x86_64", None),
- Image("manylinux2010", "i686", "quay.io/pypa/manylinux2010_i686", None),
- Image("manylinux2010", "pypy_x86_64", "pypywheels/manylinux2010-pypy_x86_64", None),
- # 2014 images
- Image("manylinux2014", "x86_64", "quay.io/pypa/manylinux2014_x86_64", None),
- Image("manylinux2014", "i686", "quay.io/pypa/manylinux2014_i686", None),
- Image("manylinux2014", "aarch64", "quay.io/pypa/manylinux2014_aarch64", None),
- Image("manylinux2014", "ppc64le", "quay.io/pypa/manylinux2014_ppc64le", None),
- Image("manylinux2014", "s390x", "quay.io/pypa/manylinux2014_s390x", None),
- # 2_24 images
- Image("manylinux_2_24", "x86_64", "quay.io/pypa/manylinux_2_24_x86_64", None),
- Image("manylinux_2_24", "i686", "quay.io/pypa/manylinux_2_24_i686", None),
- Image("manylinux_2_24", "aarch64", "quay.io/pypa/manylinux_2_24_aarch64", None),
- Image("manylinux_2_24", "ppc64le", "quay.io/pypa/manylinux_2_24_ppc64le", None),
- Image("manylinux_2_24", "s390x", "quay.io/pypa/manylinux_2_24_s390x", None),
-]
-
-config = configparser.ConfigParser()
-
-for image in images:
- # get the tag name whose digest matches 'latest'
- if image.tag is not None:
- # image has been pinned, do not update
- tag_name = image.tag
- elif image.image_name.startswith("quay.io/"):
- _, _, repository_name = image.image_name.partition("/")
- response = requests.get(
- f"https://quay.io/api/v1/repository/{repository_name}?includeTags=true"
- )
- response.raise_for_status()
- repo_info = response.json()
- tags_dict = repo_info["tags"]
-
- latest_tag = tags_dict.pop("latest")
- # find the tag whose manifest matches 'latest'
- tag_name = next(
- name
- for (name, info) in tags_dict.items()
- if info["manifest_digest"] == latest_tag["manifest_digest"]
- )
- else:
- response = requests.get(f"https://hub.docker.com/v2/repositories/{image.image_name}/tags")
- response.raise_for_status()
- tags = response.json()["results"]
-
- latest_tag = next(tag for tag in tags if tag["name"] == "latest")
- # i don't know what it would mean to have multiple images per tag
- assert len(latest_tag["images"]) == 1
- digest = latest_tag["images"][0]["digest"]
-
- pinned_tag = next(
- tag for tag in tags if tag != latest_tag and tag["images"][0]["digest"] == digest
- )
- tag_name = pinned_tag["name"]
-
- if not config.has_section(image.platform):
- config[image.platform] = {}
-
- config[image.platform][image.manylinux_version] = f"{image.image_name}:{tag_name}"
-
-with open("cibuildwheel/resources/pinned_docker_images.cfg", "w") as f:
- config.write(f)
+# default constraints.txt
+if python_version == "39":
+ shutil.copyfile(
+ RESOURCES / f"constraints-python{python_version}.txt",
+ RESOURCES / "constraints.txt",
+ )
diff --git a/bin/update_docker.py b/bin/update_docker.py
new file mode 100755
--- /dev/null
+++ b/bin/update_docker.py
@@ -0,0 +1,86 @@
+#!/usr/bin/env python3
+from __future__ import annotations
+
+import configparser
+from pathlib import Path
+from typing import NamedTuple
+
+import requests
+
+DIR = Path(__file__).parent.resolve()
+RESOURCES = DIR.parent / "cibuildwheel/resources"
+
+
+class Image(NamedTuple):
+ manylinux_version: str
+ platform: str
+ image_name: str
+ tag: str | None # Set this to pin the image
+
+
+images = [
+ Image("manylinux1", "x86_64", "quay.io/pypa/manylinux1_x86_64", None),
+ Image("manylinux1", "i686", "quay.io/pypa/manylinux1_i686", None),
+ # 2010 images
+ Image("manylinux2010", "x86_64", "quay.io/pypa/manylinux2010_x86_64", None),
+ Image("manylinux2010", "i686", "quay.io/pypa/manylinux2010_i686", None),
+ Image("manylinux2010", "pypy_x86_64", "pypywheels/manylinux2010-pypy_x86_64", None),
+ # 2014 images
+ Image("manylinux2014", "x86_64", "quay.io/pypa/manylinux2014_x86_64", None),
+ Image("manylinux2014", "i686", "quay.io/pypa/manylinux2014_i686", None),
+ Image("manylinux2014", "aarch64", "quay.io/pypa/manylinux2014_aarch64", None),
+ Image("manylinux2014", "ppc64le", "quay.io/pypa/manylinux2014_ppc64le", None),
+ Image("manylinux2014", "s390x", "quay.io/pypa/manylinux2014_s390x", None),
+ # 2_24 images
+ Image("manylinux_2_24", "x86_64", "quay.io/pypa/manylinux_2_24_x86_64", None),
+ Image("manylinux_2_24", "i686", "quay.io/pypa/manylinux_2_24_i686", None),
+ Image("manylinux_2_24", "aarch64", "quay.io/pypa/manylinux_2_24_aarch64", None),
+ Image("manylinux_2_24", "ppc64le", "quay.io/pypa/manylinux_2_24_ppc64le", None),
+ Image("manylinux_2_24", "s390x", "quay.io/pypa/manylinux_2_24_s390x", None),
+]
+
+config = configparser.ConfigParser()
+
+for image in images:
+ # get the tag name whose digest matches 'latest'
+ if image.tag is not None:
+ # image has been pinned, do not update
+ tag_name = image.tag
+ elif image.image_name.startswith("quay.io/"):
+ _, _, repository_name = image.image_name.partition("/")
+ response = requests.get(
+ f"https://quay.io/api/v1/repository/{repository_name}?includeTags=true"
+ )
+ response.raise_for_status()
+ repo_info = response.json()
+ tags_dict = repo_info["tags"]
+
+ latest_tag = tags_dict.pop("latest")
+ # find the tag whose manifest matches 'latest'
+ tag_name = next(
+ name
+ for (name, info) in tags_dict.items()
+ if info["manifest_digest"] == latest_tag["manifest_digest"]
+ )
+ else:
+ response = requests.get(f"https://hub.docker.com/v2/repositories/{image.image_name}/tags")
+ response.raise_for_status()
+ tags = response.json()["results"]
+
+ latest_tag = next(tag for tag in tags if tag["name"] == "latest")
+ # i don't know what it would mean to have multiple images per tag
+ assert len(latest_tag["images"]) == 1
+ digest = latest_tag["images"][0]["digest"]
+
+ pinned_tag = next(
+ tag for tag in tags if tag != latest_tag and tag["images"][0]["digest"] == digest
+ )
+ tag_name = pinned_tag["name"]
+
+ if not config.has_section(image.platform):
+ config[image.platform] = {}
+
+ config[image.platform][image.manylinux_version] = f"{image.image_name}:{tag_name}"
+
+with open(RESOURCES / "pinned_docker_images.cfg", "w") as f:
+ config.write(f)
diff --git a/noxfile.py b/noxfile.py
new file mode 100644
--- /dev/null
+++ b/noxfile.py
@@ -0,0 +1,93 @@
+import shutil
+import sys
+from pathlib import Path
+
+import nox
+
+nox.options.sessions = ["lint", "tests"]
+
+PYTHON_ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9"]
+
+DIR = Path(__file__).parent.resolve()
+
+
[email protected]
+def tests(session):
+ """
+ Run the unit and regular tests.
+ """
+ unit_test_args = ["--run-docker"] if sys.platform.startswith("linux") else []
+ session.install("-e", ".[test]")
+ session.run("pytest", "unit_test", *unit_test_args)
+ session.run("pytest", "test", "-x", "--durations", "0", "--timeout=2400", "test")
+
+
[email protected]
+def lint(session):
+ """
+ Run the linter.
+ """
+ session.install("pre-commit")
+ session.run("pre-commit", "run", "--all-files")
+
+
[email protected](python=PYTHON_ALL_VERSIONS)
+def update_constraints(session):
+ """
+ Update the dependencies inplace.
+ """
+ session.install("requests", "pip-tools")
+ session.run("python", "bin/update_dependencies.py")
+
+
[email protected]
+def update_pins(session):
+ """
+ Update the python and docker pins version inplace.
+ """
+ session.install("-e", ".[dev]")
+ session.run("python", "bin/update_pythons.py", "--force")
+ session.run("python", "bin/update_docker.py")
+
+
[email protected]
+def update_proj(session):
+ """
+ Update the README inplace.
+ """
+ session.install("-e", ".[dev]")
+ session.run("./bin/projects.py", "docs/data/projects.yml", "--readme=README.md")
+
+
[email protected]
+def docs(session):
+ """
+ Build the docs.
+ """
+ session.install("-e", ".[docs]")
+
+ if session.posargs:
+ if "serve" in session.posargs:
+ session.run("mkdocs", "serve")
+ else:
+ print("Unrecognized args, use 'serve'")
+ else:
+ session.run("mkdocs", "build")
+
+
[email protected]
+def build(session):
+ """
+ Build an SDist and wheel.
+ """
+
+ build_p = DIR.joinpath("build")
+ if build_p.exists():
+ shutil.rmtree(build_p)
+
+ dist_p = DIR.joinpath("dist")
+ if dist_p.exists():
+ shutil.rmtree(dist_p)
+
+ session.install("build")
+ session.run("python", "-m", "build")
| Use Nox?
In bin, we have some code that likely could be run by nox, and at least one piece of code that could be replaced by Nox. This could simplify some of the scripts - mostly looking at the first half of update_dependencies.py. It could be then run either from a Nox docker container, using the Nox action, or locally if someone has installed all the required versions of Python, without having the current `--no-docker` duplication.
This could possibly be augmented over time to take support running all our development actions, so a new developer could easily run these in a controlled environment without having to set up anything, and get a matching environment to the CI. I wasn't that fond of tox - too rigid with too many options and assumptions , but nox is not bad, much cleaner and more natural with nicer output. `pip` uses nox and tox (slowly transitioning to nox-only, I think). `packaging` and `packaging.python.org` use nox. Pretty much all the other `pypa` projects use tox.
Common dev actions would mostly be still available as a script (though the nox runner would install the requirements for you), though some actions (esp. ones like update_dependencies) could only run in Nox (mostly ones that require multiple Python versions).
Thoughts?
PS: personally, I run `pipx run nox` which means I don't have to install nox.
| For an example of something very similar for manylinux, see https://github.com/pypa/manylinux/pull/1055 - noxfile https://github.com/pypa/manylinux/pull/1055/files#diff-f7a16a65f061822bcc73b8296f4dc837353d379d8d9cc5307982cb6941442835
Another nox example: https://github.com/scikit-build/scikit-build-sample-projects/pull/12/files
nox looks like a great tool! I never used tox myself, it always had this 'heavy' feeling, probably because of the configuration files.
> It could be then run either from a Nox docker container, using the Nox action, or locally if someone has installed all the required versions of Python, without having the current --no-docker duplication.
If this is possible, that would be great.
| 2021-05-08T04:32:23 |
|
pypa/cibuildwheel | 666 | pypa__cibuildwheel-666 | [
"629"
] | a972ee9fc74c26a70d821b6f3ffaf9ae4d1e4338 | diff --git a/bin/update_pythons.py b/bin/update_pythons.py
--- a/bin/update_pythons.py
+++ b/bin/update_pythons.py
@@ -114,9 +114,7 @@ def __init__(self, arch_str: ArchStr):
self.releases = [
r
for r in releases
- if not r["pypy_version"].is_prerelease
- and not r["pypy_version"].is_devrelease
- and not r["pypy_version"] == Version("7.3.4")
+ if not r["pypy_version"].is_prerelease and not r["pypy_version"].is_devrelease
]
self.arch = arch_str
diff --git a/cibuildwheel/linux.py b/cibuildwheel/linux.py
--- a/cibuildwheel/linux.py
+++ b/cibuildwheel/linux.py
@@ -15,7 +15,6 @@
get_build_verbosity_extra_flags,
prepare_command,
read_python_configs,
- resources_dir,
)
@@ -122,25 +121,6 @@ def build(options: BuildOptions) -> None:
log.build_start(config.identifier)
dependency_constraint_flags: List[PathOrStr] = []
- if config.identifier.startswith("pp36"):
- # Patch PyPy to make sure headers get installed into a venv
- patch_path = resources_dir / "pypy_venv.patch"
- patch_docker_path = PurePath("/pypy_venv.patch")
- docker.copy_into(patch_path, patch_docker_path)
- try:
- docker.call(
- [
- "patch",
- "--force",
- "-p1",
- "-d",
- config.path,
- "-i",
- patch_docker_path,
- ]
- )
- except subprocess.CalledProcessError:
- print("PyPy patch not applied", file=sys.stderr)
if options.dependency_constraints:
constraints_file = options.dependency_constraints.get_for_python_version(
diff --git a/cibuildwheel/macos.py b/cibuildwheel/macos.py
--- a/cibuildwheel/macos.py
+++ b/cibuildwheel/macos.py
@@ -23,7 +23,6 @@
install_certifi_script,
prepare_command,
read_python_configs,
- resources_dir,
unwrap,
)
@@ -167,9 +166,6 @@ def install_pypy(version: str, url: str) -> Path:
downloaded_tar_bz2 = Path("/tmp") / pypy_tar_bz2
download(url, downloaded_tar_bz2)
call(["tar", "-C", "/tmp", "-xf", downloaded_tar_bz2])
- # Patch PyPy to make sure headers get installed into a venv
- patch_path = resources_dir / "pypy_venv.patch"
- call(["patch", "--force", "-p1", "-d", installation_path, "-i", patch_path])
installation_bin_path = installation_path / "bin"
python_executable = "pypy3"
| diff --git a/test/test_testing.py b/test/test_testing.py
--- a/test/test_testing.py
+++ b/test/test_testing.py
@@ -66,10 +66,6 @@ def test_uname(self):
bits = struct.calcsize("P") * 8
if bits == 32:
self.assertEqual(platform.machine(), "i686")
-
- def test_time_to_remove_the_pypy_venv_patch(self):
- if sys.platform == "darwin":
- assert not hasattr(sys, "pypy_version_info") or sys.pypy_version_info < (7,3,4)
'''
diff --git a/test/utils.py b/test/utils.py
--- a/test/utils.py
+++ b/test/utils.py
@@ -121,7 +121,7 @@ def expected_wheels(
python_abi_tags = ["cp36-cp36m", "cp37-cp37m", "cp38-cp38", "cp39-cp39"]
if machine_arch in ["x86_64", "AMD64", "x86"]:
- python_abi_tags += ["pp36-pypy36_pp73", "pp37-pypy37_pp73"]
+ python_abi_tags += ["pp37-pypy37_pp73"]
if platform == "macos" and get_macos_version() >= (10, 16):
# 10.16 is sometimes reported as the macOS version on macOS 11.
| Update to PyPy 7.3.4
Still in draft, but preparing for when a release is made. The Python 3.6-compatible version of PyPy was dropped, as the Python 3.7-compatibility was deemed completed (cfr. https://doc.pypy.org/en/latest/release-v7.3.4.html).
I still need to get an updated manylinux docker image.
| > The Python 3.6-compatible version of PyPy was dropped, as the Python 3.7-compatibility was deemed completed (cfr. https://doc.pypy.org/en/latest/release-v7.3.4.html).
Something to think about, btw: since this is just a patch version increase of PyPy, do we actually want to keep building `pp36-pypy36_pp73` wheels with 7.3.3? I'll ask the same question when submitting a PR for PyPy's manylinux images.
EDIT: Same goes for 32-bit Windows.
The targets have changed in a patch version before (3.7 got added). I think it's fine to change on PyPy - 3.6 (due to NumPy dropping it) is rapidly getting harder to use.
Wait, are there no 32 bit binaries for Windows? They should have both, not drop 32 for 64? I know 32-bit CPython was rather popular (maybe heavily due to the fact it was the default download choice until very recently more than due to OS requirements).
> The targets have changed in a patch version before (3.7 got added). I think it's fine to change on PyPy - 3.6 (due to NumPy dropping it) is rapidly getting harder to use.
Right, but adding something is more backwards compatible, ofc. But yeah, fair enough; I guess it's PyPy's decision to release it as 7.3.4 (and ofc not worth breaking the ABI version for).
> Wait, are there no 32 bit binaries for Windows? They should have both, not drop 32 for 64? I know 32-bit CPython was rather popular (maybe heavily due to the fact it was the default download choice until very recently more than due to OS requirements).
I had already popped onto IRC to ask, yes. Nothing that seems to secret, so I'll post it here:
> ```
> <YannickJadoul> Hi all. I'm trying out PyPy 7.3.4rc1 in cibuildwheel, for the interested: https://github.com/joerick/cibuildwheel/pull/629
> <YannickJadoul> Quick question: there's no win32 anymore? PyPy won't provide both 32-bit as well as 64, just like CPython still seems to do?
> <mattip> hmm. Just because we can, should we? It is a small maintenance burden
> <YannickJadoul> Ah, no, not saying that. Just curious, if the switch will be total
> <YannickJadoul> Important to update our docs and think about releases and version numbers, etc
> <mattip> unless there is a user who says otherwise: yes, the switch will be total
> <YannickJadoul> OK, thanks!
> ```
I'll just ping @mattip, in case you'd like to convince him about the popularity of 32-bit :-)
I am willing to be convinced to release 32-bit windows, but it would have to be by a user who can explain how it is worth the (admittedly small) maintenance burden).
@Czaki, I don't remember the details, but I'm seeing "PyPy patch not applied". Do you know by heart whether we can remove that patch and workaround, or should I check the PR that introduced it again?
Where did you see this? In log?
Yep. The "Get some sample wheels" step of GHA:
```
32 wheels produced in 2 minutes:
spam-0.1.0-cp27-cp27m-manylinux1_i686.whl
PyPy patch not applied
spam-0.1.0-cp27-cp27m-manylinux1_x86_64.whl
spam-0.1.0-cp27-cp27m-manylinux2010_i686.whl
spam-0.1.0-cp27-cp27m-manylinux2010_x86_64.whl
spam-0.1.0-cp27-cp27mu-manylinux1_i686.whl
spam-0.1.0-cp27-cp27mu-manylinux1_x86_64.whl
...
```
Patch are here: https://github.com/joerick/cibuildwheel/blob/master/cibuildwheel/resources/pypy_venv_27.patch and https://github.com/joerick/cibuildwheel/blob/master/cibuildwheel/resources/pypy_venv.patch
If I good remember there was a problem that in virtualenv compiler cannot find "Python.h"
maybe current version fix it already.
> maybe current version fix it already.
Yeah, didn't you submit a PR/issue to PyPy? Any idea whether that got merged/resolved?
OK, I stopped being lazy for a second and looked it up myself: https://github.com/joerick/cibuildwheel/issues/501#issuecomment-751369614
OK, our `test_time_to_remove_the_pypy_venv_patch` was even telling us. But I hadn't noticed yet, because of the Windows issues.
So, something's wrong with the headers of [pypy2.7-v7.3.4rc1-win64.zip](https://downloads.python.org/pypy/pypy2.7-v7.3.4rc1-win64.zip); reported on IRC already. Apart from that, things seem to be working :-)
The headers are produced as part of an optional module in PyPy called cpyext that provides the C-API compatibility layer. We do not compile pypy2.7 on win64 with cpyext. Would it be hard to do `if pypy and windows and python2.7: <don't check compiling a library>` ? Let's see if there are any people who this is critical for them, then we can discuss whether to ship a 32-bit windows version (which does have cpyext) or to get cpyext going on win64-python2.7
<details>
I started work on this in the win64-cpyext-default branch, but then someone pointed out that tp_hash and hashes in general on python2.7 are `long`, which are 4-bytes even on win64. This messes with some of the RPython assumptions about `sizeof(long)` so it is a bit of a mess: we need to be careful not to mix `long` and pointers
> The headers are produced as part of an optional module in PyPy called cpyext that provides the C-API compatibility layer. We do not compile pypy2.7 on win64 with cpyext. Would it be hard to do `if pypy and windows and python2.7: <don't check compiling a library>` ? Let's see if there are any people who this is critical for them, then we can discuss whether to ship a 32-bit windows version (which does have cpyext) or to get cpyext going on win64-python2.7
But the main task of `cibuildwheel` is compiling libraries. And most of the extensions need `Python.h` header.
> Would it be hard to do `if pypy and windows and python2.7: <don't check compiling a library>` ?
I guess we could plainly not offer PyPy Python 2.7 on Windows, then? I think `cibuildwheel` is only useful in case of binary extensions, right?
Oh, no, wait. There's still `cffi`, of course. But `cibuildwheel` doesn't really have a way to test what kind of extension is getting compiled :-/
> This messes with some of the RPython assumptions about `sizeof(long)` so it is a bit of a mess: we need to be careful not to mix `long` and pointers
(sorry to hear about that btw; sounds like a horrible issue :-/ )
Biased for pybind11, but not having Python.h kills pybind11 extensions.
Not the best solution quite yet, but I guess needs to be mentioned: dropping Python 2 support would fix this issue...
> long, which are 4-bytes even on win64
ROOT has been fighting with this for a while, still don't have a 64-bit windows build due to it. It is clearly written in the standard that long does not have to be 64-bits, but that hasn't stopped assumptions based on Linux/macOS or 32-bit Windows.
> Biased for pybind11, but not having Python.h kills pybind11 extensions.
Right. How many projects have a user base that needs testing/releasing with PyPy on windows for python2.7? Before I spend a few days solving this, I would like to know it is going to be used.
Yeah, dropping 2.7 would of course also work. Or dropping PyPy 2.7 on Windows, for now? (or probably just everywhere to be consistent)
Apart from not being able to test it, I'm mostly concerned by offering PyPy 2.7 on Windows but having users' builds fail because they expect the C API to be there. I wouldn't be surprised if that resulted in a bunch of issues blaming `cibuildwheel` for offering `pp27-win_amd64`.
Honestly, I don't know how much of a user base PyPy2 has. With NumPy requiring 3.7 now, it's likely a bit limited. And 2.7 on Windows is traditionally small due to other compiler reasons (and the fact it's easier to upgrade on Windows than having a build-in Python 2 on Linux/macOS). I do think we are going to be dropping PyPy2 when dropping CPython 2.7 in 2.0. But that's not quite here yet.
So, we're still failing on PyPy 2.7's missing headers on Windows. @joerick, where would you like to go with this? Still build 2.7 32-bit for now, using 7.3.3? Or just drop PyPy 2.7 on Windows, or ... ?
I would vote for dropping it until you hear from users.
Reasoning: If you drop PyPy2.7 for windows, you would quickly hear from any users who need it, and then PyPy could invest the effort to support cpyext for that verison. Any other work-around will encourage users to stay with an older version.
@mattip, sure. This seems to be working. The downside is that users also won't be able to build cffi wheels for PyPy 2.7 (on Windows), ofc. After all, this is just a decision to be made by PyPy, I'd say. (Unless you want us to support PyPy 2.7 (for cffi wheels), without C API headers; then it'd be up to @joerick to decide whether that's risking a lot of complaints about failing builds.)
@YannickJadoul let's give it a shot. If it turns out lots of people want to package for pypy2.7 on windows, we can think harder about it.
I'm cool to drop 2.7 on Windows, too. We're dropping 2.7 as a whole in #596 anyway, so it probably won't be a big issue :)
Waiting for pypy/manylinux#16 to pull this out of "draft"
For the record:
> It turns out there is a problem with the win64 release: the c-extension platform name is win32. It should be win_amd64.
>
> This was reported by cgohlke in https://foss.heptapod.net/pypy/pypy/-/issues/3443.
>
>
> I will be releasing a 7.3.5 bugfix soon. In the meantime, please do not upload win64 wheels to PyPI built with the 7.3.4 release: they will be tagged as appropriate for win32 which will cause problems for people who have locally built a 32-bit windows pypy3.7 and use "pip install".
>
>
> Note we did not officially release a 32-bit windows, so the damage so far might be minimal.
>
>
> Matti
See https://mail.python.org/pipermail/pypy-dev/2021-April/016150.html
So we probably ought to be skipping 7.3.4 and immediately pin 7.3.5?
That makes sense, although it may be a week or two until 7.3.5 is released.
Thanks, @mattip! I don't think we're in a huge hurry to merge this, currently?
Not if it's broken. :) | 2021-05-09T01:42:58 |
pypa/cibuildwheel | 701 | pypa__cibuildwheel-701 | [
"690"
] | 75b707ffb74f695dc3005859b7c31502d47e05e9 | diff --git a/bin/make_dependency_update_pr.py b/bin/make_dependency_update_pr.py
--- a/bin/make_dependency_update_pr.py
+++ b/bin/make_dependency_update_pr.py
@@ -40,7 +40,7 @@ def main():
timestamp = time.strftime("%Y-%m-%dT%H-%M-%S", time.gmtime())
branch_name = f"update-constraints-{timestamp}"
- shell(f"git checkout -b {branch_name} origin/master", check=True)
+ shell(f"git checkout -b {branch_name} origin/main", check=True)
try:
shell("bin/update_dependencies.py", check=True)
@@ -63,7 +63,7 @@ def main():
"pr",
"create",
"--repo=pypa/cibuildwheel",
- "--base=master",
+ "--base=main",
"--title=Update dependencies",
f"--body='{body}'",
],
| diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -3,7 +3,7 @@ name: Test
on:
push:
branches:
- - master
+ - main
pull_request:
paths-ignore:
- 'docs/**'
diff --git a/test/test_ssl.py b/test/test_ssl.py
--- a/test/test_ssl.py
+++ b/test/test_ssl.py
@@ -11,8 +11,8 @@
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
data = urlopen("https://www.nist.gov", context=context)
- data = urlopen("https://raw.githubusercontent.com/pypa/cibuildwheel/master/CI.md", context=context)
- data = urlopen("https://raw.githubusercontent.com/pypa/cibuildwheel/master/CI.md")
+ data = urlopen("https://raw.githubusercontent.com/pypa/cibuildwheel/main/CI.md", context=context)
+ data = urlopen("https://raw.githubusercontent.com/pypa/cibuildwheel/main/CI.md")
"""
)
)
| Changing the default branch to `main`
This is just a heads up, I'm planning to change the default branch on this repo to `main` this week, let's say Wednesday 26th. Github have a tool to change it over, and update PRs to target the new branch, but you might have to update it on local checkouts and forks. Shouldn't be a big issue though, this is what [Github say](https://github.com/github/renaming#renaming-existing-branches) about it:
> Renaming a branch will:
>
> - Re-target any open pull requests
> - Update any draft releases based on the branch
> - Move any branch protection rules that explicitly reference the old name
> - Update the branch used to build GitHub Pages, if applicable
> - Show a notice to repository contributors, maintainers, and admins on the repository homepage with instructions to update local copies of the repository
> - Show a notice to contributors who git push to the old branch
> - Redirect web requests for the old branch name to the new branch name
> - Return a "Moved Permanently" response in API requests for the old branch name
---
Checklist for the switch:
- [x] Use the Github tool to change it over
- [x] Find/replace `master` to `main` in CI configs, docs, scripts, example code, etc
- [x] Change default branch on Readthedocs
| Experience from renaming pip's default branch is encoded here: https://github.com/pypa/pip/issues/8948
Everything on GitHub "just works", but you'll need to manually update:
- Any custom scripts.
- non-GitHub CI configuration.
- (maybe) CI commands and automation.
- ReadTheDocs configuration.
Thanks @pradyunsg ! That's very helpful :)
One extra personal detail - remember to delete your `master` branches locally afterwards. I find it's important that 'git switch master` fail locally or otherwise I've started making PRs based on the old deleted master more than once. :/ | 2021-05-27T08:23:54 |
pypa/cibuildwheel | 738 | pypa__cibuildwheel-738 | [
"740"
] | 30954282fd406f20ca57084e6e942b394aa1a547 | diff --git a/cibuildwheel/macos.py b/cibuildwheel/macos.py
--- a/cibuildwheel/macos.py
+++ b/cibuildwheel/macos.py
@@ -416,9 +416,8 @@ def build(options: BuildOptions) -> None:
config_setting = " ".join(verbosity_flags)
build_env = env.copy()
if options.dependency_constraints:
- build_env["PIP_CONSTRAINT"] = str(
- options.dependency_constraints.get_for_python_version(config.version)
- )
+ constr = options.dependency_constraints.get_for_python_version(config.version)
+ build_env["PIP_CONSTRAINT"] = constr.as_uri()
build_env["VIRTUALENV_PIP"] = get_pip_version(env)
call(
[
diff --git a/cibuildwheel/windows.py b/cibuildwheel/windows.py
--- a/cibuildwheel/windows.py
+++ b/cibuildwheel/windows.py
@@ -23,9 +23,11 @@
read_python_configs,
)
+CIBW_INSTALL_PATH = Path("C:\\cibw")
+
def call(
- args: Sequence[PathOrStr], env: Optional[Dict[str, str]] = None, cwd: Optional[str] = None
+ args: Sequence[PathOrStr], env: Optional[Dict[str, str]] = None, cwd: Optional[PathOrStr] = None
) -> None:
print("+ " + " ".join(str(a) for a in args))
# we use shell=True here, even though we don't need a shell due to a bug
@@ -33,7 +35,9 @@ def call(
subprocess.run([str(a) for a in args], env=env, cwd=cwd, shell=True, check=True)
-def shell(command: str, env: Optional[Dict[str, str]] = None, cwd: Optional[str] = None) -> None:
+def shell(
+ command: str, env: Optional[Dict[str, str]] = None, cwd: Optional[PathOrStr] = None
+) -> None:
print(f"+ {command}")
subprocess.run(command, env=env, cwd=cwd, shell=True, check=True)
@@ -49,7 +53,7 @@ def get_nuget_args(version: str, arch: str) -> List[str]:
"-FallbackSource",
"https://api.nuget.org/v3/index.json",
"-OutputDirectory",
- "C:\\cibw\\python",
+ str(CIBW_INSTALL_PATH / "python"),
]
@@ -102,9 +106,9 @@ def install_pypy(version: str, arch: str, url: str) -> Path:
zip_filename = url.rsplit("/", 1)[-1]
extension = ".zip"
assert zip_filename.endswith(extension)
- installation_path = Path("C:\\cibw") / zip_filename[: -len(extension)]
+ installation_path = CIBW_INSTALL_PATH / zip_filename[: -len(extension)]
if not installation_path.exists():
- pypy_zip = Path("C:\\cibw") / zip_filename
+ pypy_zip = CIBW_INSTALL_PATH / zip_filename
download(url, pypy_zip)
# Extract to the parent directory because the zip file still contains a directory
extract_zip(pypy_zip, installation_path.parent)
@@ -119,7 +123,7 @@ def setup_python(
build_frontend: BuildFrontend,
) -> Dict[str, str]:
- nuget = Path("C:\\cibw\\nuget.exe")
+ nuget = CIBW_INSTALL_PATH / "nuget.exe"
if not nuget.exists():
log.step("Downloading nuget...")
download("https://dist.nuget.org/win-x86-commandline/latest/nuget.exe", nuget)
@@ -184,7 +188,7 @@ def setup_python(
requires_reinstall = not (installation_path / "Scripts" / "pip.exe").exists()
if requires_reinstall:
# maybe pip isn't installed at all. ensurepip resolves that.
- call(["python", "-m", "ensurepip"], env=env, cwd="C:\\cibw")
+ call(["python", "-m", "ensurepip"], env=env, cwd=CIBW_INSTALL_PATH)
# upgrade pip to the version matching our constraints
# if necessary, reinstall it to ensure that it's available on PATH as 'pip.exe'
@@ -199,7 +203,7 @@ def setup_python(
*dependency_constraint_flags,
],
env=env,
- cwd="C:\\cibw",
+ cwd=CIBW_INSTALL_PATH,
)
assert (installation_path / "Scripts" / "pip.exe").exists()
@@ -313,22 +317,35 @@ def build(options: BuildOptions) -> None:
config_setting = " ".join(verbosity_flags)
build_env = env.copy()
if options.dependency_constraints:
- build_env["PIP_CONSTRAINT"] = str(
- options.dependency_constraints.get_for_python_version(config.version)
+ constraints_path = options.dependency_constraints.get_for_python_version(
+ config.version
+ )
+ # Bug in pip <= 21.1.3 - we can't have a space in the
+ # constraints file, and pip doesn't support drive letters
+ # in uhi. After probably pip 21.2, we can use uri. For
+ # now, use a temporary file.
+ if " " in str(constraints_path):
+ tmp_file = tempfile.NamedTemporaryFile(
+ "w", suffix="constraints.txt", delete=False, dir=CIBW_INSTALL_PATH
+ )
+ with tmp_file as new_constraints_file, open(constraints_path) as f:
+ new_constraints_file.write(f.read())
+ constraints_path = Path(new_constraints_file.name)
+
+ build_env["PIP_CONSTRAINT"] = str(constraints_path)
+ build_env["VIRTUALENV_PIP"] = get_pip_version(env)
+ call(
+ [
+ "python",
+ "-m",
+ "build",
+ options.package_dir,
+ "--wheel",
+ f"--outdir={built_wheel_dir}",
+ f"--config-setting={config_setting}",
+ ],
+ env=build_env,
)
- build_env["VIRTUALENV_PIP"] = get_pip_version(env)
- call(
- [
- "python",
- "-m",
- "build",
- options.package_dir,
- "--wheel",
- f"--outdir={built_wheel_dir}",
- f"--config-setting={config_setting}",
- ],
- env=build_env,
- )
else:
assert_never(options.build_frontend)
| diff --git a/test/test_dependency_versions.py b/test/test_dependency_versions.py
--- a/test/test_dependency_versions.py
+++ b/test/test_dependency_versions.py
@@ -122,7 +122,7 @@ def test_dependency_constraints_file(tmp_path, build_frontend_env):
"virtualenv": "20.0.35",
}
- constraints_file = tmp_path / "constraints.txt"
+ constraints_file = tmp_path / "constraints file.txt"
constraints_file.write_text(
textwrap.dedent(
"""
| bug: space in directory name can cause issues
When testing out build support in cibuildwheel 2.0.0a4, I run into this failure:
```pytb
python -m build . --wheel --outdir=C:\Users\RUNNER~1\AppData\Local\Temp\cibuildwheelt0jax362\built_wheel --config-setting=-v
ERROR: Could not open requirements file: [Errno 2] No such file or directory: 'C:\\Program'
Traceback (most recent call last):
File "C:\cibw\python\python.3.8.10\tools\lib\site-packages\build\__main__.py", line 302, in main
build_call(args.srcdir, outdir, distributions, config_settings, not args.no_isolation, args.skip_dependency_check)
File "C:\cibw\python\python.3.8.10\tools\lib\site-packages\build\__main__.py", line 145, in build_package
_build(isolation, builder, outdir, distribution, config_settings, skip_dependency_check)
File "C:\cibw\python\python.3.8.10\tools\lib\site-packages\build\__main__.py", line 101, in _build
return _build_in_isolated_env(builder, outdir, distribution, config_settings)
File "C:\cibw\python\python.3.8.10\tools\lib\site-packages\build\__main__.py", line 81, in _build_in_isolated_env
env.install(builder.build_system_requires)
File "C:\cibw\python\python.3.8.10\tools\lib\site-packages\build\env.py", line 169, in install
subprocess.check_call(cmd)
File "C:\cibw\python\python.3.8.10\tools\lib\subprocess.py", line 364, in check_call
raise CalledProcessError(retcode, cmd)
subprocess.CalledProcessError: Command '['C:\\Users\\runneradmin\\AppData\\Local\\Temp\\build-env-s3f7jwu0\\Scripts\\python.exe', '-Im', 'pip', 'install', '--use-pep517', '--no-warn-script-location', '-r', 'C:\\Users\\RUNNER~1\\AppData\\Local\\Temp\\build-reqs-ckde1r2k.txt']' returned non-zero exit status 1.
ERROR Command '['C:\\Users\\runneradmin\\AppData\\Local\\Temp\\build-env-s3f7jwu0\\Scripts\\python.exe', '-Im', 'pip', 'install', '--use-pep517', '--no-warn-script-location', '-r', 'C:\\Users\\RUNNER~1\\AppData\\Local\\Temp\\build-reqs-ckde1r2k.txt']' returned non-zero exit status 1.
```
I _think_ this is a problem with quoting paths on Windows probably in `build`, or maybe `pip`? The same build works using `pip wheel` from cibuildwheel. Seen in scikit-hep/boost-histogram#583.
| Where is the "Program" (presumably "Program Files") path coming from?
I haven't figured that out yet. The error is coming from pip: https://github.com/pypa/pip/blob/7a77484a492c8f1e1f5ef24eaf71a43df9ea47eb/src/pip/_internal/req/req_file.py#L549
`cibuildwheel` installs Python into the "normal" location for macOS and Windows, so that's likely in Program Files. But have no idea why there's a requirement file there. (I'll try to start up a Windows machine later to debug - could also try adding a space into the path that the CI runs in)
Wasn't a to reproduce in the test suite, but I have an idea - if true, it might be pip's fault.
I believe the issue is that `PIP_CONSTRAINTS="path with space.txt"` does not work - it's trying to define separate constraint files instead. This is the in the workaround from pypa/build#292.
Ideally, build would provide some interface to define constraints that would, for now, be converted to `--constraints` to pip but would be adapted to a new tool if pip was replaced - this workaround is only valid as long as pip is the package installer used.
Another work around is to use `file://` URLs (for which you can percent-escape the path). | 2021-06-29T00:00:21 |
pypa/cibuildwheel | 807 | pypa__cibuildwheel-807 | [
"793"
] | 41a47ff04ec9e99b8557d0aab462d4b0a9f2148f | diff --git a/cibuildwheel/linux.py b/cibuildwheel/linux.py
--- a/cibuildwheel/linux.py
+++ b/cibuildwheel/linux.py
@@ -306,18 +306,27 @@ def build(options: BuildOptions) -> None:
log.step_end_with_error(
f"Command {error.cmd} failed with code {error.returncode}. {error.stdout}"
)
- troubleshoot(options.package_dir, error)
+ troubleshoot(options, error)
sys.exit(1)
-def troubleshoot(package_dir: Path, error: Exception) -> None:
+def _matches_prepared_command(error_cmd: List[str], command_template: str) -> bool:
+ if len(error_cmd) < 3 or error_cmd[0:2] != ["sh", "-c"]:
+ return False
+ command_prefix = command_template.split("{", maxsplit=1)[0].strip()
+ return error_cmd[2].startswith(command_prefix)
+
+
+def troubleshoot(options: BuildOptions, error: Exception) -> None:
+
if isinstance(error, subprocess.CalledProcessError) and (
error.cmd[0:4] == ["python", "-m", "pip", "wheel"]
or error.cmd[0:3] == ["python", "-m", "build"]
+ or _matches_prepared_command(error.cmd, options.repair_command)
):
# the wheel build step failed
print("Checking for common errors...")
- so_files = list(package_dir.glob("**/*.so"))
+ so_files = list(options.package_dir.glob("**/*.so"))
if so_files:
print(
@@ -326,10 +335,17 @@ def troubleshoot(package_dir: Path, error: Exception) -> None:
NOTE: Shared object (.so) files found in this project.
These files might be built against the wrong OS, causing problems with
- auditwheel.
+ auditwheel. If possible, run cibuildwheel in a clean checkout.
If you're using Cython and have previously done an in-place build,
remove those build files (*.so and *.c) before starting cibuildwheel.
+
+ setuptools uses the build/ folder to store its build cache. It
+ may be necessary to remove those build files (*.so and *.o) before
+ starting cibuildwheel.
+
+ Files that belong to a virtual environment are probably not an issue
+ unless you used a custom command telling cibuildwheel to activate it.
"""
),
file=sys.stderr,
| diff --git a/test/test_troubleshooting.py b/test/test_troubleshooting.py
--- a/test/test_troubleshooting.py
+++ b/test/test_troubleshooting.py
@@ -3,25 +3,23 @@
import pytest
from . import utils
-from .test_projects import TestProject
+from .test_projects import TestProject, new_c_project
-so_file_project = TestProject()
+SO_FILE_WARNING = "NOTE: Shared object (.so) files found in this project."
-so_file_project.files["libnothing.so"] = ""
-so_file_project.files[
- "setup.py"
-] = """
-raise Exception('this build will fail')
-"""
[email protected]("project_contains_so_files", [False, True])
+def test_failed_build_with_so_files(tmp_path, capfd, build_frontend_env, project_contains_so_files):
+ project = TestProject()
+ project.files["setup.py"] = "raise Exception('this build will fail')\n"
+ if project_contains_so_files:
+ project.files["libnothing.so"] = ""
-
-def test_failed_project_with_so_files(tmp_path, capfd, build_frontend_env):
if utils.platform != "linux":
pytest.skip("this test is only relevant to the linux build")
project_dir = tmp_path / "project"
- so_file_project.generate(project_dir)
+ project.generate(project_dir)
with pytest.raises(subprocess.CalledProcessError):
utils.cibuildwheel_run(project_dir, add_env=build_frontend_env)
@@ -29,4 +27,34 @@ def test_failed_project_with_so_files(tmp_path, capfd, build_frontend_env):
captured = capfd.readouterr()
print("out", captured.out)
print("err", captured.err)
- assert "NOTE: Shared object (.so) files found in this project." in captured.err
+
+ if project_contains_so_files:
+ assert SO_FILE_WARNING in captured.err
+ else:
+ assert SO_FILE_WARNING not in captured.err
+
+
[email protected]("project_contains_so_files", [False, True])
+def test_failed_repair_with_so_files(tmp_path, capfd, project_contains_so_files):
+ if utils.platform != "linux":
+ pytest.skip("this test is only relevant to the linux build")
+
+ project = new_c_project()
+
+ if project_contains_so_files:
+ project.files["libnothing.so"] = ""
+
+ project_dir = tmp_path / "project"
+ project.generate(project_dir)
+
+ with pytest.raises(subprocess.CalledProcessError):
+ utils.cibuildwheel_run(project_dir, add_env={"CIBW_REPAIR_WHEEL_COMMAND": "false"})
+
+ captured = capfd.readouterr()
+ print("out", captured.out)
+ print("err", captured.err)
+
+ if project_contains_so_files:
+ assert SO_FILE_WARNING in captured.err
+ else:
+ assert SO_FILE_WARNING not in captured.err
| `troubleshoot()` misses .so errors that happen at the auditwheel step (was: when running cibuildwheel locally, a {project}/build directory may interfere)
To reproduce:
* using a Linux host machine that is significantly newer than the manylinux target (in my case, Ubuntu 21.04 and manylinux2010
* use the same version of Python on host and target
* git clone any package with C extensions
* `pip install -e .`
* `cibuildwheel --plat linux`
You will probably get error messages that complain about use of newer GLIBC symbols. These will coincide with the host machine's GLIBC.
The reason is that locally installing the package (`pip install [-e] .` or `python setup.py bdist_wheel`) will create built libraries in the `build/` directory. And then, cibuildwheel maps the local directory to `{project}/build` in the container, and when Docker container run pip, pip thinks that the C extension is already built and up to date, so it skips building it and uses the wrong version.
I suggest creating an additional volume mapping from e.g. `host:/tmp/cibuildwheel-build` to `container:/project/build`, so that the container is isolated from the local build directory (and so that these temporary objects can be accessed if the build fails).
| if you run build locally the `.so` files will be inside your project directory and the existence of `build` directory is irrelevant. `setuptools` will found that these `.so` files exist and will not perform build steep.
How do you mean it's irrelevant? The fact that setuptools/pip prevents the build step is precisely the issue being raised here.
The existence of a build directory, when building with setuptools is not irrelevant. Setuptools doesn't clean the directory before performing a build, so past build artifacts will get included in newer artifacts. See the pip 20.0 release fiasco for example.
> How do you mean it's irrelevant? The fact that setuptools/pip prevents the build step is precisely the issue being raised here.
because if you have `.so` files inside code (which happens when you call `pip install -e` for example) then removing the `build` directory will change nothing.
You could also change these behavior by `CIBW_BEFORE_ALL="rm -rf {project}/build`
for me build is to common name to block its usage.
> The existence of a build directory, when building with setuptools is not irrelevant. Setuptools doesn't clean the directory before performing a build, so past build artifacts will get included in newer artifacts. See the pip 20.0 release fiasco for example.
I know this. but `build` is too common a name to mock it by default.
`cibuildwheel` gives you a tool for setup clean steep (it could be done in `pyproject.toml` file).
I know the pain of cleaning artifacts on Linux machines, but the requested change could create more problems than solve.
Yea, I didn't mean to imply that you don't know that.
What I was hinting at is that it is relevant to how the user's project would be built, which is what they're using cibuildwheel for. I do agree that the approach suggested by OP is the solution here and that this is not something that `cibuildwheel` can solve.
And given that you've already provided the context that the user might have needed, I'm gonna bow out now.
I spent many hours investigating this issue. It's a nontrivial dirty state interaction, and the error message (through no fault of cibuildwheel) points in the wrong direction.
>because if you have .so files inside code (which happens when you call pip install -e for example) then removing the build directory will change nothing.
This is not actually true. When building a wheel only whitelisted files from the source tree are copied into the wheel. By default this is only .py files; everything else needs to be listed in the package's `package_data` definition, even innocuous files like `py.typed`. That means if your source tree contains `.so` files, they will be ignored. Compiled C extensions get added to the wheel in a separate step after adding the Python source tree. Having a directory `build/` directory will contaminate that separate step. I don't want to get into the weeds on this but if you build any wheel with `pip wheel -v .` to confirm how this actually functions.
I understand that one could mitigating it by explicitly deleting `build/`. But the whole point of CI is to have isolated repeatable build environments. cibuildwheel should identify the problem of a dirty `build/` on its own.
> I understand that one could mitigating it by explicitly deleting `build/`. But the whole point of CI is to have isolated repeatable build environments. cibuildwheel should identify the problem of a dirty `build/` on its own.
If you have build wheels on CI how you could have dirty `build/`. Dirty build you could have on the local machine. I do not see a scenario on CI when you will have a dirty build dir. CI all time should provide same starting state and repository should not contain `build` directory with artifacts from wheel build.
> This is not actually true. When building a wheel only whitelisted files from the source tree are copied into the wheel. By default this is only .py files; everything else needs to be listed in the package's package_data definition
I'm not sure. I need to check this. Last time when I do a similar mistake so files were copied. Maybe they fix it later. (I will check it today).
Ok, so I've done some experiments so we're all on the same page...
```console
$ pwd
/Users/joerick/Desktop/test-project
$ tree
.
├── setup.cfg
├── setup.py
└── src
└── spam.c
1 directory, 3 files
```
Installing it normally-
```console
$ pip install .
Processing /Users/joerick/Desktop/test-project
DEPRECATION: A future pip version will change local packages to be built in-place without first copying to a temporary directory. We recommend you use --use-feature=in-tree-build to test your packages with this new behavior before it becomes the default.
pip 21.3 will remove support for this functionality. You can find discussion regarding this at https://github.com/pypa/pip/issues/7555.
Using legacy 'setup.py install' for spam, since package 'wheel' is not installed.
Installing collected packages: spam
Attempting uninstall: spam
Found existing installation: spam 0.1.0
Uninstalling spam-0.1.0:
Successfully uninstalled spam-0.1.0
Running setup.py install for spam ... done
Successfully installed spam-0.1.0
$ tree
.
├── setup.cfg
├── setup.py
└── src
└── spam.c
1 directory, 3 files
```
So far, so good. No build artifacts added. Let's try installing that with `-e`.
```console
$ git clean -df
$ tree
.
├── setup.cfg
├── setup.py
└── src
└── spam.c
1 directory, 3 files
$ pip install -e .
Obtaining file:///Users/joerick/Desktop/test-project
Installing collected packages: spam
Running setup.py develop for spam
Successfully installed spam-0.1.0
$ tree
.
├── build
│ ├── lib.macosx-10.9-x86_64-3.8
│ │ └── spam.cpython-38-darwin.so
│ └── temp.macosx-10.9-x86_64-3.8
│ └── src
│ └── spam.o
├── setup.cfg
├── setup.py
├── spam.cpython-38-darwin.so
├── spam.egg-info
│ ├── PKG-INFO
│ ├── SOURCES.txt
│ ├── dependency_links.txt
│ └── top_level.txt
└── src
└── spam.c
6 directories, 10 files
```
So, a few things to note here. The build added `build` and `span.egg-info` dirs. It also added `spam.cpython-38-darwin.so`, outside of the `build` tree. The binary `.o` and `.so` files are the issue here, they shouldn't be moved across to the Docker container.
So we could write rules to exclude the `build` dir, but I believe the isolated `spam.cpython-38-darwin.so` is the issue that @czaki is mentioning. These files get spread throughout the build repository, in my experience, whereever the `.c` file is located. I don't think we can automatically exclude .so files when copying, because some users _will_ want to bring .so files into the compile process (e.g. a binary dependency). We discussed this issue in the past, it's at #139. In response to that, we added a message on build failure. Did you see this message @jbarlow83?
---
As an aside, I was curious what would happen when `--use-feature=in-tree-build` becomes the default. Here's what I found:
```console
$ git clean -df
Removing build/
Removing spam.cpython-38-darwin.so
Removing spam.egg-info/
$ tree
.
├── setup.cfg
├── setup.py
└── src
└── spam.c
1 directory, 3 files
$ pip install --use-feature=in-tree-build .
Processing /Users/joerick/Desktop/test-project
Using legacy 'setup.py install' for spam, since package 'wheel' is not installed.
Installing collected packages: spam
Running setup.py install for spam ... done
Successfully installed spam-0.1.0
$ tree
.
├── build
│ ├── lib.macosx-10.9-x86_64-3.8
│ │ └── spam.cpython-38-darwin.so
│ └── temp.macosx-10.9-x86_64-3.8
│ └── src
│ └── spam.o
├── setup.cfg
├── setup.py
├── spam.egg-info
│ ├── PKG-INFO
│ ├── SOURCES.txt
│ ├── dependency_links.txt
│ └── top_level.txt
└── src
└── spam.c
```
This is slightly different from the `-e .`, in that it doesn't put the `.so` files around. But it _does_ create the `build` directory.
> Did you see this message @jbarlow83?
No. This is the failure path:
```
Repairing wheel...
+ sh -c 'auditwheel repair -w /tmp/cibuildwheel/repaired_wheel /tmp/cibuildwheel/built_wheel/pikepdf-3.0.0b2.dev9+g0bd6795b-cp39-cp39-linux_x86_64.whl'
INFO:auditwheel.main_repair:Repairing pikepdf-3.0.0b2.dev9+g0bd6795b-cp39-cp39-linux_x86_64.whl
usage: auditwheel [-h] [-V] [-v] command ...
auditwheel: error: cannot repair "/tmp/cibuildwheel/built_wheel/pikepdf-3.0.0b2.dev9+g0bd6795b-cp39-cp39-linux_x86_64.whl" to "manylinux2010_x86_64" ABI because of the presence of too-recent versioned symbols. You'll need to compile the wheel on an older toolchain.
✕ 1.27s
Error: Command ['sh', '-c', 'auditwheel repair -w /tmp/cibuildwheel/repaired_wheel /tmp/cibuildwheel/built_wheel/pikepdf-3.0.0b2.dev9+g0bd6795b-cp39-cp39-linux_x86_64.whl'] failed with code 2
```
Looking around the code, the troubleshoot message does not print when `auditwheel` fails. Perhaps with earlier versions of pip wheel, behavior/failure modes were a bit different.
https://github.com/pypa/cibuildwheel/blob/1ceaeb4fb2107c1af844134aecfd3b647ba400ab/cibuildwheel/linux.py#L313-L317
Thanks for getting back @jbarlow83!
> Perhaps with earlier versions of pip wheel, behavior/failure modes were a bit different.
Yes, that sounds possible. I think we should adapt this clause to catch auditwheel failures too, and print the same message. I don't think we can do anything further, even if we did delete the `build` directory on copying we'd still have the `.so` files scattered around so it wouldn't solve the problem.
If anyone has time to send a PR updating that `troubleshoot` clause to include auditwheel failures, I'd be grateful.
setuptools uses `build/temp*` and `build/lib*` as a build cache, and cibuildwheel inadvertently shares the build cache from the host and container by copying the whole source tree. I apologize but I can't get around the fact that unintentionally sharing the build cache from host to container is counterproductive when isolating builds is the goal. I believe that Cython has its own build cache that it drops elsewhere in `build/`.
By the time we get to `troubleshoot()`, because the build folder is going to contain *mostly valid* `.o` and `.so` files. Searching for these files isn't going to help; we expect a bunch of them. It would work to warn before the build that these folders contain files that may interfere with auditwheel. I can throw together a PR along those lines.
The core devs have noticed this is a problem in other areas and there's a draft PEP to make the cache directory configurable. That's what would be best when it's available - use a /tmp directory and avoid these issues, and there's no problem for users who happen to be doing strange things in their build folder.
https://github.com/python/peps/pull/1976/files
> setuptools uses build/temp* and build/lib* as a build cache, and cibuildwheel inadvertently shares the build cache from the host and container by copying the whole source tree. I apologize but I can't get around the fact that unintentionally sharing the build cache from host to container is counterproductive when isolating builds is the goal.
This is true, but the caches are not _just_ in `./build`. See my example above:
> ```console
> $ git clean -df
> $ tree
> .
> ├── setup.cfg
> ├── setup.py
> └── src
> └── spam.c
>
> 1 directory, 3 files
> $ pip install -e .
> Obtaining file:///Users/joerick/Desktop/test-project
> Installing collected packages: spam
> Running setup.py develop for spam
> Successfully installed spam-0.1.0
> $ tree
> .
> ├── build
> │ ├── lib.macosx-10.9-x86_64-3.8
> │ │ └── spam.cpython-38-darwin.so
> │ └── temp.macosx-10.9-x86_64-3.8
> │ └── src
> │ └── spam.o
> ├── setup.cfg
> ├── setup.py
> ├── spam.cpython-38-darwin.so <-------------------------------- this file
> ├── spam.egg-info
> │ ├── PKG-INFO
> │ ├── SOURCES.txt
> │ ├── dependency_links.txt
> │ └── top_level.txt
> └── src
> └── spam.c
>
> 6 directories, 10 files
> ```
There are also caches written that are stored alongside the source files, as well as in `build`. I don't know why setuptools does this, maybe somebody does? But the point is that removing the `build` dir wouldn't solve the problem, as these files would still be copied over.
> By the time we get to `troubleshoot()`, because the build folder is going to contain _mostly valid_ `.o` and `.so` files. Searching for these files isn't going to help; we expect a bunch of them.
Not true, I think. The `troubleshoot()` function scans the host's version of the source tree, so it's not affected by the build that happened in the Docker container.
`spam.cpython-38-darwin.so` is not a cache file, it's an output that gets dumped in the source tree. While it will be copied to the container, it only gets included in the wheel if the package manifest includes that specific file (e.g. by adding it to package data in setup.cfg). `pip wheel` only copies files on the manifest to the wheel.
You can easily confirm that the compiled `.so` files that get copied to the wheel always come from `build/` and copies outside of build get ignored. If you check `pip wheel -v .` you'll see something like:
```
copying build/lib.linux-x86_64-3.9/spam.cpython-39-x86_64-linux-gnu.so -> build/bdist.linux-x86_64/wheel/spam
```
Hack `./spam*.so` to be different from `build/lib*/spam*.so` and use `wheel unpack` to confirm that the latter is what gets copied to the wheel.
>Not true, I think. The troubleshoot() function scans the host's version of the source tree, so it's not affected by the build that happened in the Docker container.
I stand corrected. Container changes won't show up in the host, so troubleshoot() may be viable after all.
> You can easily confirm that the compiled `.so` files that get copied to the wheel always come from `build/` and copies outside of build get ignored. If you check `pip wheel -v .` you'll see something like:
did you try this with a cleaned `build`? I cannot check now, but in the past, if the `build` was deleted and proper `*.so` files were present in the project structure then they are used instead of creating new ones.
I still do not understand what is clue. `cibuildwheel` is a tool for building a wheel on CI services. Not on a local machine. I do not see a scenario where on CI you will have a polluted file structure (other than keeping binary in a repository).
As far as I understand you try to push some solution for local `cibuildwheel` usage which may produce multiple problems for other persons using it on CI services. | 2021-08-26T22:34:30 |
pypa/cibuildwheel | 829 | pypa__cibuildwheel-829 | [
"827"
] | fded547220d97f03121dbb50a9104d7341bea802 | diff --git a/cibuildwheel/__main__.py b/cibuildwheel/__main__.py
--- a/cibuildwheel/__main__.py
+++ b/cibuildwheel/__main__.py
@@ -291,9 +291,9 @@ def main() -> None:
for build_platform in MANYLINUX_ARCHS:
pinned_images = all_pinned_docker_images[build_platform]
- config_value = options(f"manylinux-{build_platform}-image")
+ config_value = options(f"manylinux-{build_platform}-image", ignore_empty=True)
- if config_value is None:
+ if not config_value:
# default to manylinux2010 if it's available, otherwise manylinux2014
image = pinned_images.get("manylinux2010") or pinned_images.get("manylinux2014")
elif config_value in pinned_images:
diff --git a/cibuildwheel/options.py b/cibuildwheel/options.py
--- a/cibuildwheel/options.py
+++ b/cibuildwheel/options.py
@@ -19,15 +19,26 @@ class ConfigOptionError(KeyError):
pass
-def _dig_first(*pairs: Tuple[Mapping[str, Any], str]) -> Setting:
+def _dig_first(*pairs: Tuple[Mapping[str, Setting], str], ignore_empty: bool = False) -> Setting:
"""
Return the first dict item that matches from pairs of dicts and keys.
- Final result is will throw a KeyError if missing.
+ Will throw a KeyError if missing.
_dig_first((dict1, "key1"), (dict2, "key2"), ...)
"""
- (dict_like, key), *others = pairs
- return dict_like.get(key, _dig_first(*others)) if others else dict_like[key]
+ if not pairs:
+ raise ValueError("pairs cannot be empty")
+
+ for dict_like, key in pairs:
+ if key in dict_like:
+ value = dict_like[key]
+
+ if ignore_empty and value == "":
+ continue
+
+ return value
+
+ raise KeyError(key)
class ConfigOptions:
@@ -62,7 +73,7 @@ def __init__(
defaults_path = resources_dir / "defaults.toml"
self.default_options, self.default_platform_options = self._load_file(defaults_path)
- # load the project config file
+ # Load the project config file
config_options: Dict[str, Any] = {}
config_platform_options: Dict[str, Any] = {}
@@ -75,7 +86,7 @@ def __init__(
if pyproject_toml_path.exists():
config_options, config_platform_options = self._load_file(pyproject_toml_path)
- # validate project config
+ # Validate project config
for option_name in config_options:
if not self._is_valid_global_option(option_name):
raise ConfigOptionError(f'Option "{option_name}" not supported in a config file')
@@ -129,6 +140,7 @@ def __call__(
env_plat: bool = True,
sep: Optional[str] = None,
table: Optional[TableFmt] = None,
+ ignore_empty: bool = False,
) -> str:
"""
Get and return the value for the named option from environment,
@@ -136,7 +148,8 @@ def __call__(
accept platform versions of the environment variable. If this is an
array it will be merged with "sep" before returning. If it is a table,
it will be formatted with "table['item']" using {k} and {v} and merged
- with "table['sep']".
+ with "table['sep']". Empty variables will not override if ignore_empty
+ is True.
"""
if name not in self.default_options and name not in self.default_platform_options:
@@ -155,6 +168,7 @@ def __call__(
(self.config_options, name),
(self.default_platform_options, name),
(self.default_options, name),
+ ignore_empty=ignore_empty,
)
if isinstance(result, dict):
| diff --git a/unit_test/options_toml_test.py b/unit_test/options_toml_test.py
--- a/unit_test/options_toml_test.py
+++ b/unit_test/options_toml_test.py
@@ -1,6 +1,6 @@
import pytest
-from cibuildwheel.options import ConfigOptionError, ConfigOptions
+from cibuildwheel.options import ConfigOptionError, ConfigOptions, _dig_first
PYPROJECT_1 = """
[tool.cibuildwheel]
@@ -181,10 +181,68 @@ def test_disallowed_a(tmp_path):
tmp_path.joinpath("pyproject.toml").write_text(
"""
[tool.cibuildwheel.windows]
-manylinux-x64_86-image = "manylinux1"
+manylinux-x86_64-image = "manylinux1"
"""
)
- disallow = {"windows": {"manylinux-x64_86-image"}}
+ disallow = {"windows": {"manylinux-x86_64-image"}}
ConfigOptions(tmp_path, platform="linux", disallow=disallow)
with pytest.raises(ConfigOptionError):
ConfigOptions(tmp_path, platform="windows", disallow=disallow)
+
+
+def test_environment_override_empty(tmp_path, monkeypatch):
+ tmp_path.joinpath("pyproject.toml").write_text(
+ """
+[tool.cibuildwheel]
+manylinux-i686-image = "manylinux1"
+manylinux-x86_64-image = ""
+"""
+ )
+
+ monkeypatch.setenv("CIBW_MANYLINUX_I686_IMAGE", "")
+ monkeypatch.setenv("CIBW_MANYLINUX_AARCH64_IMAGE", "manylinux1")
+
+ options = ConfigOptions(tmp_path, platform="linux")
+
+ assert options("manylinux-x86_64-image") == ""
+ assert options("manylinux-i686-image") == ""
+ assert options("manylinux-aarch64-image") == "manylinux1"
+
+ assert options("manylinux-x86_64-image", ignore_empty=True) == "manylinux2010"
+ assert options("manylinux-i686-image", ignore_empty=True) == "manylinux1"
+ assert options("manylinux-aarch64-image", ignore_empty=True) == "manylinux1"
+
+
[email protected]("ignore_empty", (True, False))
+def test_dig_first(ignore_empty):
+ d1 = {"random": "thing"}
+ d2 = {"this": "that", "empty": ""}
+ d3 = {"other": "hi"}
+ d4 = {"this": "d4", "empty": "not"}
+
+ answer = _dig_first(
+ (d1, "empty"),
+ (d2, "empty"),
+ (d3, "empty"),
+ (d4, "empty"),
+ ignore_empty=ignore_empty,
+ )
+ assert answer == ("not" if ignore_empty else "")
+
+ answer = _dig_first(
+ (d1, "this"),
+ (d2, "this"),
+ (d3, "this"),
+ (d4, "this"),
+ ignore_empty=ignore_empty,
+ )
+ assert answer == "that"
+
+ with pytest.raises(KeyError):
+ _dig_first(
+ (d1, "this"),
+ (d2, "other"),
+ (d3, "this"),
+ (d4, "other"),
+ ignore_empty=ignore_empty,
+ )
| Empty MANYLINUX env variable produces invalid result
If the MANYLINUX environment variables are empty, then cibuildwheel fails, rather than falling back to either the value in the config or the default value. This means code like this is broken:
```yaml
build_wheels:
strategy:
fail-fast: false
matrix:
os: [windows-latest, macos-latest, ubuntu-latest]
arch: [auto64]
build: ["*"]
include:
- os: ubuntu-latest
arch: auto32
build: "*"
- os: ubuntu-latest
type: ManyLinux1
arch: auto
build: "cp{36,37,38}-*"
CIBW_MANYLINUX_X86_64_IMAGE: skhep/manylinuxgcc-x86_64
CIBW_MANYLINUX_I686_IMAGE: skhep/manylinuxgcc-i686
steps:
- uses: actions/checkout@v2
- uses: pypa/[email protected]
env:
CIBW_BUILD: ${{ matrix.build }}
CIBW_MANYLINUX_I686_IMAGE: ${{ matrix.CIBW_MANYLINUX_I686_IMAGE }}
CIBW_MANYLINUX_X86_64_IMAGE: ${{ matrix.CIBW_MANYLINUX_X86_64_IMAGE }}
CIBW_ARCHS: ${{ matrix.arch }}
```
This sets `CIBW_MANYLINUX_I686_IMAGE` to an empty string on most jobs, which is then an invalid docker container. This should either be the default, or better yet, the pyproject.toml file value or the default.
I don't think this generalizes to all parameters, though; setting things like the repair command to an empty string is valid.
| I also think this used to work, so it might be a regression. Pretty sure we discussed it in the past somewhere, and I think fixed it back then. | 2021-09-17T02:35:22 |
pypa/cibuildwheel | 881 | pypa__cibuildwheel-881 | [
"742"
] | f01d6ed8fed72dd3364436db2230f7634264a85e | diff --git a/cibuildwheel/windows.py b/cibuildwheel/windows.py
--- a/cibuildwheel/windows.py
+++ b/cibuildwheel/windows.py
@@ -112,7 +112,6 @@ def install_pypy(version: str, arch: str, url: str) -> Path:
download(url, pypy_zip)
# Extract to the parent directory because the zip file still contains a directory
extract_zip(pypy_zip, installation_path.parent)
- (installation_path / "python.exe").symlink_to(installation_path / "pypy3.exe")
return installation_path
| diff --git a/test/utils.py b/test/utils.py
--- a/test/utils.py
+++ b/test/utils.py
@@ -130,7 +130,7 @@ def expected_wheels(
python_abi_tags = ["cp36-cp36m", "cp37-cp37m", "cp38-cp38", "cp39-cp39", "cp310-cp310"]
if machine_arch in ["x86_64", "AMD64", "x86", "aarch64"]:
- python_abi_tags += ["pp37-pypy37_pp73"]
+ python_abi_tags += ["pp37-pypy37_pp73", "pp38-pypy38_pp73"]
if platform == "macos" and machine_arch == "arm64":
# currently, arm64 macs are only supported by cp39 & cp310
diff --git a/unit_test/build_selector_test.py b/unit_test/build_selector_test.py
--- a/unit_test/build_selector_test.py
+++ b/unit_test/build_selector_test.py
@@ -53,6 +53,9 @@ def test_skip():
assert not build_selector("pp36-manylinux_x86_64")
assert build_selector("pp37-manylinux_x86_64")
+ assert build_selector("pp38-manylinux_x86_64")
+ assert build_selector("pp37-manylinux_i686")
+ assert build_selector("pp38-manylinux_i686")
assert build_selector("cp36-manylinux_x86_64")
assert build_selector("cp37-manylinux_x86_64")
assert not build_selector("cp36-manylinux_i686")
diff --git a/unit_test/option_prepare_test.py b/unit_test/option_prepare_test.py
--- a/unit_test/option_prepare_test.py
+++ b/unit_test/option_prepare_test.py
@@ -11,7 +11,7 @@
from cibuildwheel import linux, util
from cibuildwheel.__main__ import main
-ALL_IDS = {"cp36", "cp37", "cp38", "cp39", "cp310", "pp37"}
+ALL_IDS = {"cp36", "cp37", "cp38", "cp39", "cp310", "pp37", "pp38"}
@pytest.fixture
@@ -132,7 +132,9 @@ def test_build_with_override_launches(mock_build_docker, monkeypatch, tmp_path):
assert not kwargs["docker"]["simulate_32_bit"]
identifiers = {x.identifier for x in kwargs["platform_configs"]}
- assert identifiers == {f"{x}-manylinux_x86_64" for x in ALL_IDS - {"cp36", "cp310", "pp37"}}
+ assert identifiers == {
+ f"{x}-manylinux_x86_64" for x in ALL_IDS - {"cp36", "cp310", "pp37", "pp38"}
+ }
assert kwargs["options"].build_options("cp37-manylinux_x86_64").before_all == ""
kwargs = build_on_docker.call_args_list[2][1]
@@ -140,7 +142,11 @@ def test_build_with_override_launches(mock_build_docker, monkeypatch, tmp_path):
assert kwargs["docker"]["cwd"] == Path("/project")
assert not kwargs["docker"]["simulate_32_bit"]
identifiers = {x.identifier for x in kwargs["platform_configs"]}
- assert identifiers == {"cp310-manylinux_x86_64", "pp37-manylinux_x86_64"}
+ assert identifiers == {
+ "cp310-manylinux_x86_64",
+ "pp37-manylinux_x86_64",
+ "pp38-manylinux_x86_64",
+ }
kwargs = build_on_docker.call_args_list[3][1]
assert "quay.io/pypa/manylinux2010_i686" in kwargs["docker"]["docker_image"]
| feat: use feature in-tree-build for pip
Closes #696
| FYI @henryiii, I was curious if this was now available with our latest dep updates, so I merged `main`.
I think that it does work, but that dependency versions test pins pip to too old a version.
The latest pip (21.2) should also fix the issue with uri's not working, so we could update. Since this will be the new default, should we just force it in our next minor release or wait till we get pip 21.3?
@joerick, @mayeut, or @YannickJadoul should we do this, or just wait till 21.3?
I'm not sure we should do anything about it except maybe updating the docs.
Let's keep pip default behavior and allow users to change it by passing `PIP_USE_FEATURE` environment variable.
With this PR, we know all tests are passing with the option.
I agree, we don't need to rush this, let's just wait for pip 21.3. I think that `PIP_USE_DEPRECATED=out-of-tree-build` will be available as a temporary fallback for users hitting this change unexpectedly.
Let's try to get 21.3 into the next version. manylinux will get it either on Monday or hopefully might get manually triggered sooner (@mayeut?).
https://github.com/pypa/manylinux/pull/1207
@henryiii, I've triggered the workflows for manylinux as you've seen.
I won't be available until November 6th after that so I don't expect anything to happen there & I didn't had time to review other PRs here yet. | 2021-10-18T06:11:15 |
pypa/cibuildwheel | 889 | pypa__cibuildwheel-889 | [
"884"
] | 2710202f18d8f054ddc9faa048722db878cd3152 | diff --git a/cibuildwheel/util.py b/cibuildwheel/util.py
--- a/cibuildwheel/util.py
+++ b/cibuildwheel/util.py
@@ -48,14 +48,56 @@
)
+def format_safe(template: str, **kwargs: Any) -> str:
+ """
+ Works similarly to `template.format(**kwargs)`, except that unmatched
+ fields in `template` are passed through untouched.
+
+ >>> format_safe('{a} {b}', a='123')
+ '123 {b}'
+ >>> format_safe('{a} {b[4]:3f}', a='123')
+ '123 {b[4]:3f}'
+
+ To avoid variable expansion, precede with a single backslash e.g.
+ >>> format_safe('\\{a} {b}', a='123')
+ '{a} {b}'
+ """
+
+ result = template
+
+ for key, value in kwargs.items():
+ find_pattern = re.compile(
+ fr"""
+ (?<!\#) # don't match if preceded by a hash
+ {{ # literal open curly bracket
+ {re.escape(key)} # the field name
+ }} # literal close curly bracket
+ """,
+ re.VERBOSE,
+ )
+
+ # we use a lambda for repl to prevent re.sub interpreting backslashes
+ # in repl as escape sequences
+ result = re.sub(
+ pattern=find_pattern,
+ repl=lambda _: str(value),
+ string=result,
+ )
+
+ # transform escaped sequences into their literal equivalents
+ result = result.replace(f"#{{{key}}}", f"{{{key}}}")
+
+ return result
+
+
def prepare_command(command: str, **kwargs: PathOrStr) -> str:
"""
Preprocesses a command by expanding variables like {python}.
For example, used in the test_command option to specify the path to the
- project's root.
+ project's root. Unmatched syntax will mostly be allowed through.
"""
- return command.format(python="python", pip="pip", **kwargs)
+ return format_safe(command, python="python", pip="pip", **kwargs)
def get_build_verbosity_extra_flags(level: int) -> List[str]:
| diff --git a/unit_test/utils_test.py b/unit_test/utils_test.py
new file mode 100644
--- /dev/null
+++ b/unit_test/utils_test.py
@@ -0,0 +1,48 @@
+from cibuildwheel.util import format_safe, prepare_command
+
+
+def test_format_safe():
+ assert format_safe("{wheel}", wheel="filename.whl") == "filename.whl"
+ assert format_safe("command #{wheel}", wheel="filename.whl") == "command {wheel}"
+ assert format_safe("{command #{wheel}}", wheel="filename.whl") == "{command {wheel}}"
+
+ # check unmatched brackets
+ assert format_safe("{command {wheel}", wheel="filename.whl") == "{command filename.whl"
+
+ # check positional-style arguments i.e. {}
+ assert (
+ format_safe("find . -name * -exec ls -a {} \\;", project="/project")
+ == "find . -name * -exec ls -a {} \\;"
+ )
+
+ assert format_safe("{param} {param}", param="1") == "1 1"
+ assert format_safe("# {param} {param}", param="1") == "# 1 1"
+ assert format_safe("#{not_a_param} {param}", param="1") == "#{not_a_param} 1"
+
+
+def test_prepare_command():
+ assert prepare_command("python -m {project}", project="project") == "python -m project"
+ assert prepare_command("python -m {something}", project="project") == "python -m {something}"
+ assert (
+ prepare_command("python -m {something.abc}", project="project")
+ == "python -m {something.abc}"
+ )
+
+ assert (
+ prepare_command("python -m {something.abc[4]:3f}", project="project")
+ == "python -m {something.abc[4]:3f}"
+ )
+
+ # test backslashes in the replacement
+ assert (
+ prepare_command(
+ "command {wheel} \\Users\\Temp\\output_dir", wheel="\\Temporary Files\\cibw"
+ )
+ == "command \\Temporary Files\\cibw \\Users\\Temp\\output_dir"
+ )
+
+ # test some unusual syntax that used to trip up the str.format approach
+ assert (
+ prepare_command("{a}{a,b}{b:.2e}{c}{d%s}{e:3}{f[0]}", a="42", b="3.14159")
+ == "42{a,b}{b:.2e}{c}{d%s}{e:3}{f[0]}"
+ )
| fix: pass through unexpected format strings
Closes #840.
| 2021-10-23T13:50:25 |
|
pypa/cibuildwheel | 906 | pypa__cibuildwheel-906 | [
"904"
] | 18ba7700f6045d7725592d206e58f5a568191542 | diff --git a/cibuildwheel/docker_container.py b/cibuildwheel/docker_container.py
--- a/cibuildwheel/docker_container.py
+++ b/cibuildwheel/docker_container.py
@@ -1,6 +1,7 @@
import io
import json
import os
+import platform
import shlex
import subprocess
import sys
@@ -9,6 +10,8 @@
from types import TracebackType
from typing import IO, Dict, List, Optional, Sequence, Type, cast
+from cibuildwheel.util import CIProvider, detect_ci_provider
+
from .typing import PathOrStr, PopenBytes
@@ -44,6 +47,15 @@ def __init__(
def __enter__(self) -> "DockerContainer":
self.name = f"cibuildwheel-{uuid.uuid4()}"
cwd_args = ["-w", str(self.cwd)] if self.cwd else []
+
+ # work-around for Travis-CI PPC64le Docker runs since 2021:
+ # this avoids network splits
+ # https://github.com/pypa/cibuildwheel/issues/904
+ # https://github.com/conda-forge/conda-smithy/pull/1520
+ network_args = []
+ if detect_ci_provider() == CIProvider.travis_ci and platform.machine() == "ppc64le":
+ network_args = ["--network=host"]
+
shell_args = ["linux32", "/bin/bash"] if self.simulate_32_bit else ["/bin/bash"]
subprocess.run(
[
@@ -53,6 +65,7 @@ def __enter__(self) -> "DockerContainer":
f"--name={self.name}",
"--interactive",
"--volume=/:/host", # ignored on CircleCI
+ *network_args,
*cwd_args,
self.docker_image,
*shell_args,
| Travis ppc64le network issue work-around
### Description
Hi,
I currently see network issues with docker containers on Travis CI ppc64le builders.
Essentially, connections seem to time out, e.g., on `yum update`.
The same problem seems to be known in conda-forge and adding `--network=host` to `docker run` seems to solve the issue.
Do you know how I can most simply apply the same work-around in cibuildwheel?
X-ref:
- https://github.com/conda-forge/conda-smithy/pull/1520
### Build log
X-ref:
- https://github.com/openPMD/openPMD-api/pull/1136
### CI config
- https://github.com/openPMD/openPMD-api/tree/wheels
| 2021-11-04T18:01:03 |
||
pypa/cibuildwheel | 917 | pypa__cibuildwheel-917 | [
"915"
] | 823d4af1ad29ce0d79dfb536c74fb4940b89f76e | diff --git a/cibuildwheel/windows.py b/cibuildwheel/windows.py
--- a/cibuildwheel/windows.py
+++ b/cibuildwheel/windows.py
@@ -97,6 +97,10 @@ def install_cpython(version: str, arch: str, nuget: Path) -> Path:
nuget_args = get_nuget_args(version, arch)
installation_path = Path(nuget_args[-1]) / (nuget_args[0] + "." + version) / "tools"
call([nuget, "install", *nuget_args])
+ # "python3" is not included in the vanilla nuget package,
+ # though it can be present if modified (like on Azure).
+ if not (installation_path / "python3.exe").exists():
+ (installation_path / "python3.exe").symlink_to(installation_path / "python.exe")
return installation_path
| Windows: python and python3 don't point to the same installation
### Description
Hi guys!
I am building wheels with cibw, but noticed some issues with windows builds, where installed packages during `CIBW_BEFORE_BUILD` step would not show up as installed during the build step.
As it turn out, `python3` is not pointing to the installed python version, but tp the hosts python version (?).
Here is an example from the build log linked to, where `cp37` is used:
| cmd | path |
| --- | --- |
| `python3` | `/c/hostedtoolcache/windows/Python/3.10.0/x64/python3`
| `python` | `/c/cibw/python/python.3.7.9/tools/python` |
This differs from the behavior in linux and macos, where both `python` and `python3` point to the _same_ installation, examples below.
| platform | cmd | path |
| --- | --- | --- |
| linux | `python3` | `/opt/python/cp310-cp310/bin/python3`
| linux | `python` | `/opt/python/cp310-cp310/bin/python` |
| macos | `python3` | `/Library/Frameworks/Python.framework/Versions/3.7/bin/python3`
| macos | `python` | `/tmp/cibw_bin/python` (which looks like a link to the version above) |
It would be great, if `python` on windows builds can also point to the installed cpython version, what do you think?
### Build log
https://github.com/lukeparser/lukeparser/runs/4182800043?check_suite_focus=true
### CI config
https://github.com/lukeparser/lukeparser/blob/88fcaf2de9599c62fffd26c4f85fee99c17278ec/.github/workflows/cibuildwheel.yml#L100
| On a second note, cibuildwheel explicitly calls `python -m pip wheel ...` - on every platform, see [windows.py#L196](https://github.com/pypa/cibuildwheel/blob/main/cibuildwheel/windows.py#L196), [macos.py#L224](https://github.com/pypa/cibuildwheel/blob/main/cibuildwheel/macos.py#L224), [linux.py#L198](https://github.com/pypa/cibuildwheel/blob/main/cibuildwheel/linux.py#L198) - which may not be the python version/installation cibuildwheel is invoked with (e.g. `python3 -m cibuildwheel ...`).
As a user I would expect that cibuildwheel would use the same one.
A few sanity checks are performed to ensure the installed cpython version matches the one `python` links to (see [linux.py#L164](https://github.com/pypa/cibuildwheel/blob/main/cibuildwheel/linux.py#L164), [macos.py#L208](https://github.com/pypa/cibuildwheel/blob/main/cibuildwheel/macos.py#L208), [windows.py#L178](https://github.com/pypa/cibuildwheel/blob/main/cibuildwheel/windows.py#L178)).
Here, a check if the invoked from python and the actually used `python` matched would be very usefull to the user, too!
On note one, that looks like a bug, we should be making sure both python and python3 are valid (and we do that on macOS, it seems, and I know manylinux does this, just not Windows). I'm a little surprised it's not there, but we should add it. How do you do that on Windows? I know symlinking isn't always available on Windows.
On your second note, you never want cibuildwheel to use the host Python. That is only used to call cibuildwheel; there are no special requirements on it; on macOS, it probably will always be an unacceptable version of Python to build with (only official downloads have the proper compilation settings), and besides, it's not even related to the version you are trying to build. Python 3.10 host can build wheels for all versions of Python, it's not using the host Python 3.10.
Yes, you are right. For a moment I forgot that there is another layer of abstraction in there. Sorry for the confusion!
But, the python used in `before_all` or `before_build` can differ from the one used by cibw, which can go unnoticed. A check here would be nice, but cumbersome to implement, I guess.
Back to the original issue: nuget only installs `python.exe`, there is a [note](https://docs.python.org/3/using/windows.html#the-microsoft-store-package) for the microsoft store package, which installs commands with version number suffixes, but other than that ...
I know there are some issues with symlinking on windows, so can we just copy `python.exe` and call it `python3.exe`?
Thanks for the detailed bug report, @sbrodehl ! I agree, this looks like a bug on Windows. I think that Windows doesn't normally have the python3 alias, but if runners have it available on PATH, that's gonna cause issues.
We already do symlinking with the pypy install on windows to make `pypy3.exe` available as `python.exe`,
https://github.com/pypa/cibuildwheel/blob/08a694485ded60cad4680897874fcf4a940a29c2/cibuildwheel/windows.py#L115
so I expect `Path.symlink_to()` should work. | 2021-11-12T19:59:01 |
|
pypa/cibuildwheel | 926 | pypa__cibuildwheel-926 | [
"909"
] | 1176024473010e400598a088ded069f36422b1ba | diff --git a/cibuildwheel/options.py b/cibuildwheel/options.py
--- a/cibuildwheel/options.py
+++ b/cibuildwheel/options.py
@@ -469,10 +469,8 @@ def build_options(self, identifier: Optional[str]) -> BuildOptions:
)
if not config_value:
- # default to manylinux2010 if it's available, otherwise manylinux2014
- image = pinned_images.get("manylinux2010") or pinned_images.get(
- "manylinux2014"
- )
+ # default to manylinux2014
+ image = pinned_images.get("manylinux2014")
elif config_value in pinned_images:
image = pinned_images[config_value]
else:
| diff --git a/test/test_docker_images.py b/test/test_docker_images.py
--- a/test/test_docker_images.py
+++ b/test/test_docker_images.py
@@ -35,8 +35,8 @@ def test(tmp_path):
actual_wheels = utils.cibuildwheel_run(
project_dir,
add_env={
- "CIBW_MANYLINUX_X86_64_IMAGE": "dockcross/manylinux2010-x64",
- "CIBW_MANYLINUX_I686_IMAGE": "dockcross/manylinux2010-x86",
+ "CIBW_MANYLINUX_X86_64_IMAGE": "dockcross/manylinux2014-x64",
+ "CIBW_MANYLINUX_I686_IMAGE": "dockcross/manylinux2014-x86",
"CIBW_BUILD": "cp3{6,7,8,9}-manylinux*",
},
)
diff --git a/test/utils.py b/test/utils.py
--- a/test/utils.py
+++ b/test/utils.py
@@ -120,7 +120,12 @@ def expected_wheels(
if manylinux_versions is None:
if machine_arch == "x86_64":
- manylinux_versions = ["manylinux_2_5", "manylinux1", "manylinux_2_12", "manylinux2010"]
+ manylinux_versions = [
+ "manylinux_2_5",
+ "manylinux1",
+ "manylinux_2_17",
+ "manylinux2014",
+ ]
else:
manylinux_versions = ["manylinux_2_17", "manylinux2014"]
diff --git a/unit_test/main_tests/main_options_test.py b/unit_test/main_tests/main_options_test.py
--- a/unit_test/main_tests/main_options_test.py
+++ b/unit_test/main_tests/main_options_test.py
@@ -71,19 +71,19 @@ def test_empty_selector(platform, intercepted_build_args, monkeypatch):
@pytest.mark.parametrize(
"architecture, image, full_image",
[
- ("x86_64", None, "quay.io/pypa/manylinux2010_x86_64:*"),
+ ("x86_64", None, "quay.io/pypa/manylinux2014_x86_64:*"),
("x86_64", "manylinux1", "quay.io/pypa/manylinux1_x86_64:*"),
("x86_64", "manylinux2010", "quay.io/pypa/manylinux2010_x86_64:*"),
("x86_64", "manylinux2014", "quay.io/pypa/manylinux2014_x86_64:*"),
("x86_64", "manylinux_2_24", "quay.io/pypa/manylinux_2_24_x86_64:*"),
("x86_64", "custom_image", "custom_image"),
- ("i686", None, "quay.io/pypa/manylinux2010_i686:*"),
+ ("i686", None, "quay.io/pypa/manylinux2014_i686:*"),
("i686", "manylinux1", "quay.io/pypa/manylinux1_i686:*"),
("i686", "manylinux2010", "quay.io/pypa/manylinux2010_i686:*"),
("i686", "manylinux2014", "quay.io/pypa/manylinux2014_i686:*"),
("i686", "manylinux_2_24", "quay.io/pypa/manylinux_2_24_i686:*"),
("i686", "custom_image", "custom_image"),
- ("pypy_x86_64", None, "quay.io/pypa/manylinux2010_x86_64:*"),
+ ("pypy_x86_64", None, "quay.io/pypa/manylinux2014_x86_64:*"),
("pypy_x86_64", "manylinux1", "manylinux1"), # Does not exist
("pypy_x86_64", "manylinux2010", "quay.io/pypa/manylinux2010_x86_64:*"),
("pypy_x86_64", "manylinux2014", "quay.io/pypa/manylinux2014_x86_64:*"),
diff --git a/unit_test/option_prepare_test.py b/unit_test/option_prepare_test.py
--- a/unit_test/option_prepare_test.py
+++ b/unit_test/option_prepare_test.py
@@ -52,7 +52,7 @@ def test_build_default_launches(mock_build_docker, fake_package_dir, monkeypatch
# In Python 3.8+, this can be simplified to [0].kwargs
kwargs = build_on_docker.call_args_list[0][1]
- assert "quay.io/pypa/manylinux2010_x86_64" in kwargs["docker"]["docker_image"]
+ assert "quay.io/pypa/manylinux2014_x86_64" in kwargs["docker"]["docker_image"]
assert kwargs["docker"]["cwd"] == Path("/project")
assert not kwargs["docker"]["simulate_32_bit"]
@@ -60,7 +60,7 @@ def test_build_default_launches(mock_build_docker, fake_package_dir, monkeypatch
assert identifiers == {f"{x}-manylinux_x86_64" for x in ALL_IDS}
kwargs = build_on_docker.call_args_list[1][1]
- assert "quay.io/pypa/manylinux2010_i686" in kwargs["docker"]["docker_image"]
+ assert "quay.io/pypa/manylinux2014_i686" in kwargs["docker"]["docker_image"]
assert kwargs["docker"]["cwd"] == Path("/project")
assert kwargs["docker"]["simulate_32_bit"]
@@ -94,13 +94,13 @@ def test_build_with_override_launches(mock_build_docker, monkeypatch, tmp_path):
cibw_toml.write_text(
"""
[tool.cibuildwheel]
-manylinux-x86_64-image = "manylinux2014"
+manylinux-x86_64-image = "manylinux_2_24"
-# Before Python 3.10, manylinux2010 is the most compatible
+# Before Python 3.10, use manylinux2014
[[tool.cibuildwheel.overrides]]
select = "cp3?-*"
-manylinux-x86_64-image = "manylinux2010"
-manylinux-i686-image = "manylinux2010"
+manylinux-x86_64-image = "manylinux2014"
+manylinux-i686-image = "manylinux2014"
[[tool.cibuildwheel.overrides]]
select = "cp36-manylinux_x86_64"
@@ -118,7 +118,7 @@ def test_build_with_override_launches(mock_build_docker, monkeypatch, tmp_path):
assert build_on_docker.call_count == 6
kwargs = build_on_docker.call_args_list[0][1]
- assert "quay.io/pypa/manylinux2010_x86_64" in kwargs["docker"]["docker_image"]
+ assert "quay.io/pypa/manylinux2014_x86_64" in kwargs["docker"]["docker_image"]
assert kwargs["docker"]["cwd"] == Path("/project")
assert not kwargs["docker"]["simulate_32_bit"]
@@ -127,7 +127,7 @@ def test_build_with_override_launches(mock_build_docker, monkeypatch, tmp_path):
assert kwargs["options"].build_options("cp36-manylinux_x86_64").before_all == "true"
kwargs = build_on_docker.call_args_list[1][1]
- assert "quay.io/pypa/manylinux2010_x86_64" in kwargs["docker"]["docker_image"]
+ assert "quay.io/pypa/manylinux2014_x86_64" in kwargs["docker"]["docker_image"]
assert kwargs["docker"]["cwd"] == Path("/project")
assert not kwargs["docker"]["simulate_32_bit"]
@@ -138,7 +138,7 @@ def test_build_with_override_launches(mock_build_docker, monkeypatch, tmp_path):
assert kwargs["options"].build_options("cp37-manylinux_x86_64").before_all == ""
kwargs = build_on_docker.call_args_list[2][1]
- assert "quay.io/pypa/manylinux2014_x86_64" in kwargs["docker"]["docker_image"]
+ assert "quay.io/pypa/manylinux_2_24_x86_64" in kwargs["docker"]["docker_image"]
assert kwargs["docker"]["cwd"] == Path("/project")
assert not kwargs["docker"]["simulate_32_bit"]
identifiers = {x.identifier for x in kwargs["platform_configs"]}
@@ -149,7 +149,7 @@ def test_build_with_override_launches(mock_build_docker, monkeypatch, tmp_path):
}
kwargs = build_on_docker.call_args_list[3][1]
- assert "quay.io/pypa/manylinux2010_i686" in kwargs["docker"]["docker_image"]
+ assert "quay.io/pypa/manylinux2014_i686" in kwargs["docker"]["docker_image"]
assert kwargs["docker"]["cwd"] == Path("/project")
assert kwargs["docker"]["simulate_32_bit"]
diff --git a/unit_test/options_toml_test.py b/unit_test/options_toml_test.py
--- a/unit_test/options_toml_test.py
+++ b/unit_test/options_toml_test.py
@@ -60,7 +60,7 @@ def test_simple_settings(tmp_path, platform, fname):
)
assert options_reader.get("manylinux-x86_64-image") == "manylinux1"
- assert options_reader.get("manylinux-i686-image") == "manylinux2010"
+ assert options_reader.get("manylinux-i686-image") == "manylinux2014"
with pytest.raises(ConfigOptionError):
options_reader.get("environment", sep=" ")
@@ -71,7 +71,7 @@ def test_simple_settings(tmp_path, platform, fname):
def test_envvar_override(tmp_path, platform, monkeypatch):
monkeypatch.setenv("CIBW_BUILD", "cp38*")
- monkeypatch.setenv("CIBW_MANYLINUX_X86_64_IMAGE", "manylinux2014")
+ monkeypatch.setenv("CIBW_MANYLINUX_X86_64_IMAGE", "manylinux_2_24")
monkeypatch.setenv("CIBW_TEST_COMMAND", "mytest")
monkeypatch.setenv("CIBW_TEST_REQUIRES", "docs")
monkeypatch.setenv("CIBW_TEST_REQUIRES_LINUX", "scod")
@@ -84,8 +84,8 @@ def test_envvar_override(tmp_path, platform, monkeypatch):
assert options_reader.get("archs", sep=" ") == "auto"
assert options_reader.get("build", sep=" ") == "cp38*"
- assert options_reader.get("manylinux-x86_64-image") == "manylinux2014"
- assert options_reader.get("manylinux-i686-image") == "manylinux2010"
+ assert options_reader.get("manylinux-x86_64-image") == "manylinux_2_24"
+ assert options_reader.get("manylinux-i686-image") == "manylinux2014"
assert (
options_reader.get("test-requires", sep=" ")
@@ -222,7 +222,7 @@ def test_environment_override_empty(tmp_path, monkeypatch):
assert options_reader.get("manylinux-i686-image") == ""
assert options_reader.get("manylinux-aarch64-image") == "manylinux1"
- assert options_reader.get("manylinux-x86_64-image", ignore_empty=True) == "manylinux2010"
+ assert options_reader.get("manylinux-x86_64-image", ignore_empty=True) == "manylinux2014"
assert options_reader.get("manylinux-i686-image", ignore_empty=True) == "manylinux1"
assert options_reader.get("manylinux-aarch64-image", ignore_empty=True) == "manylinux1"
| Move the default for PyPy to manylinux2014
@mattip suggested moving to manylinux2014, since pypy is looking at dropping manylinux2010 in the future. cibuildwheel 2.3 (which will add pypy3.8) might be a good place to do it.
| One thought; when do we plan to move from manylinux2010 to manylinux2014 as the default for everything? manylinux2014 is already a much better choice (#901) for Python 3.10, NumPy doesn't even bother providing 2010 wheels for it, and that would avoid two docker launches if someone left the default alone. I think most users pick the manylinux image they want to use, anyway; usually because they want manylinux1.
@joerick (and @mayeut and @YannickJadoul if you have any opinions here) what do you think? Should we bump PyPy to 2014 by default? And/or all Pythons? Due to the fact that 2014 is the best choice for Python 3.10, I'm mildly in favor of bumping all defaults to 2014.
I don't have a strong opinion on this, so I'd defer to @mattip - happy to bump the PyPy default to 2014.
> One thought; when do we plan to move from manylinux2010 to manylinux2014 as the default for everything?
An excellent question, for which I don't have a firm answer on! I think it's fairly reasonable to lag here, since most packagers will normally want to build on the oldest (most compatible) image that works for their package (assuming the image is still maintained/secure).
With the default being something old, either they'll succeed on the first build, which gives them the most compatible wheel, or they'll keep trying newer and newer images until it works.
Do we fully understand the nature of the 3.10 problems you mention? I've [not had any trouble](https://github.com/joerick/pyinstrument/runs/4203588010?check_suite_focus=true) building 3.10 wheels on manylinux2010.
I think #901 was hitting an issue with manylinux2010, but mostly, since any Pip that supports Python 3.10 is new enough to understand manylinux2014, there's not much reason in using a past EOL distribution to build images. NumPy does not provide manylinux2010 wheels, only 2014, so anyone using NumPy will have to try to build NumPy from scratch, etc. If someone doesn't know what they are doing, they are more likely to hit issues with manylinux2010 than 2014 for Python 3.10.
Pinning 3.10 to manylinux2014 will take a bit of refactoring, since currently the [pinned_docker_images.cfg](https://github.com/pypa/cibuildwheel/blob/main/cibuildwheel/resources/pinned_docker_images.cfg) only relates to platform, not to particular combinations of platform,python_version.
I think we should just pin everything to 2014, simpler for consistency and users, and anyone wanting 2010 would just manually pin it, just like they do already with manylinux1. The slice of users serviced by 2010 is really pretty small; most users are stuck on pip 9 (manylinux1) or they have a pip from the last few years and are fine with manylinux2014. See the orange slices in https://github.com/pypa/manylinux/issues/994#issuecomment-792210874 - Python 3.7 is the only one with a significant number of manylinux2010 as a maximum (3.6 is stuck with Pip 9 more often). The orange is always less than manylinux1, so it's less harsh than the manylinux1 -> manylinux2010 that we already default to.
Another way to see it, https://mayeut.github.io/manylinux-timeline/ under "Policy readiness for python 3.*" plots. There's a small sliver for 2010 for 3.6, a wider but shrinking sliver for 2010 in 3.7, and it's simply too small to really see on any newer version.
> With the default being something old, either they'll succeed on the first build, which gives them the most compatible wheel, or they'll keep trying newer and newer images until it works.
This is not how we handle our current selection; we've been on manylinux2010 for years; by this logic, we should set manylinux1 (and last I checked, 70% of our packages did exactly this). I think the logic is (and should be) we select a recommended default that a "basic" user should be happy with, and allow an advanced user to select whatever they want - if they really need extra compatibility, they can set manylinux2010 (or manylinux1, as they do now). A small, "normal" package should be happy with manylinux2014 (at least if they were with manylinux2010) - and manylinux2010 is based on a past-EOL operating system.
The reason for manylinux2014 is that it's now a significantly better choice for Python 3.10, but also, it's consistent - aarch64 and family all use 2014 already, since that's the earliest version that supported them, and now PyPy will be 2014 too; I think it's easier to explain if we just say 2014 is the default across the board. We even save a docker launch if pypy and cpython match. | 2021-11-18T15:39:01 |
pypa/cibuildwheel | 956 | pypa__cibuildwheel-956 | [
"942"
] | 52f7801799043bd5a734617925c5b9542ef9c81d | diff --git a/cibuildwheel/macos.py b/cibuildwheel/macos.py
--- a/cibuildwheel/macos.py
+++ b/cibuildwheel/macos.py
@@ -192,24 +192,10 @@ def setup_python(
# https://github.com/pypa/virtualenv/issues/620
# Also see https://github.com/python/cpython/pull/9516
env.pop("__PYVENV_LAUNCHER__", None)
- env = environment.as_dictionary(prev_environment=env)
# we version pip ourselves, so we don't care about pip version checking
env["PIP_DISABLE_PIP_VERSION_CHECK"] = "1"
- # check what version we're on
- call(["which", "python"], env=env)
- call(["python", "--version"], env=env)
- which_python = subprocess.run(
- ["which", "python"], env=env, universal_newlines=True, check=True, stdout=subprocess.PIPE
- ).stdout.strip()
- if which_python != "/tmp/cibw_bin/python":
- print(
- "cibuildwheel: python available on PATH doesn't match our installed instance. If you have modified PATH, ensure that you don't overwrite cibuildwheel's entry or insert python above it.",
- file=sys.stderr,
- )
- sys.exit(1)
-
# Install pip
requires_reinstall = not (installation_bin_path / "pip").exists()
@@ -233,6 +219,10 @@ def setup_python(
cwd="/tmp",
)
+ # Apply our environment after pip is ready
+ env = environment.as_dictionary(prev_environment=env)
+
+ # check what pip version we're on
assert (installation_bin_path / "pip").exists()
call(["which", "pip"], env=env)
call(["pip", "--version"], env=env)
@@ -246,6 +236,19 @@ def setup_python(
)
sys.exit(1)
+ # check what Python version we're on
+ call(["which", "python"], env=env)
+ call(["python", "--version"], env=env)
+ which_python = subprocess.run(
+ ["which", "python"], env=env, universal_newlines=True, check=True, stdout=subprocess.PIPE
+ ).stdout.strip()
+ if which_python != "/tmp/cibw_bin/python":
+ print(
+ "cibuildwheel: python available on PATH doesn't match our installed instance. If you have modified PATH, ensure that you don't overwrite cibuildwheel's entry or insert python above it.",
+ file=sys.stderr,
+ )
+ sys.exit(1)
+
# Set MACOSX_DEPLOYMENT_TARGET to 10.9, if the user didn't set it.
# PyPy defaults to 10.7, causing inconsistencies if it's left unset.
env.setdefault("MACOSX_DEPLOYMENT_TARGET", "10.9")
diff --git a/cibuildwheel/windows.py b/cibuildwheel/windows.py
--- a/cibuildwheel/windows.py
+++ b/cibuildwheel/windows.py
@@ -157,31 +157,6 @@ def setup_python(
)
env["PIP_DISABLE_PIP_VERSION_CHECK"] = "1"
- # update env with results from CIBW_ENVIRONMENT
- env = environment.as_dictionary(prev_environment=env)
-
- # for the logs - check we're running the right version of python
- call(["where", "python"], env=env)
- call(["python", "--version"], env=env)
- call(["python", "-c", "\"import struct; print(struct.calcsize('P') * 8)\""], env=env)
- where_python = (
- subprocess.run(
- ["where", "python"],
- env=env,
- universal_newlines=True,
- check=True,
- stdout=subprocess.PIPE,
- )
- .stdout.splitlines()[0]
- .strip()
- )
- if where_python != str(installation_path / "python.exe"):
- print(
- "cibuildwheel: python available on PATH doesn't match our installed instance. If you have modified PATH, ensure that you don't overwrite cibuildwheel's entry or insert python above it.",
- file=sys.stderr,
- )
- sys.exit(1)
-
log.step("Installing build tools...")
# Install pip
@@ -230,6 +205,32 @@ def setup_python(
cwd=CIBW_INSTALL_PATH,
)
+ # update env with results from CIBW_ENVIRONMENT
+ env = environment.as_dictionary(prev_environment=env)
+
+ # check what Python version we're on
+ call(["where", "python"], env=env)
+ call(["python", "--version"], env=env)
+ call(["python", "-c", "\"import struct; print(struct.calcsize('P') * 8)\""], env=env)
+ where_python = (
+ subprocess.run(
+ ["where", "python"],
+ env=env,
+ universal_newlines=True,
+ check=True,
+ stdout=subprocess.PIPE,
+ )
+ .stdout.splitlines()[0]
+ .strip()
+ )
+ if where_python != str(installation_path / "python.exe"):
+ print(
+ "cibuildwheel: python available on PATH doesn't match our installed instance. If you have modified PATH, ensure that you don't overwrite cibuildwheel's entry or insert python above it.",
+ file=sys.stderr,
+ )
+ sys.exit(1)
+
+ # check what pip version we're on
assert (installation_path / "Scripts" / "pip.exe").exists()
where_pip = (
subprocess.run(
| Setting PIP options can break environment setup
### Description
Setting a pip option, like `PIP_USE_DEPRECATED=out-of-tree-build` can break the initial setup, because this option gets applied to the original pip version (21.1 on Windows) instead of our pinned version. We should possibly ignore PIP_* variables or all environment variables during this initial setup for the pinned versions of things.
See https://github.com/pybind/cmake_example/pull/66
Example of problematic line: https://github.com/pypa/cibuildwheel/blob/f717468ff60fbd2ee76b6327596d078ad0ec13d5/cibuildwheel/macos.py#L232
### Build log
https://github.com/pybind/cmake_example/runs/4346290813?check_suite_focus=true
### CI config
https://github.com/pybind/cmake_example/blob/c06fc016af76bd0f6a637705afc5d4cbfa554f5a/.github/workflows/wheels.yml#L49
| Hmm yes, I can see the argument. We could also defer applying CIBW_ENVIRONMENT until after our internal tools install? Not sure which is preferable though.
It would be simpler to defer till after the internal tools install, since we set a few pip variables (like the version check). | 2021-12-09T16:26:59 |
|
pypa/cibuildwheel | 975 | pypa__cibuildwheel-975 | [
"734"
] | d6bede539df95695af71f5834c9c8d3fc99c8efd | diff --git a/cibuildwheel/util.py b/cibuildwheel/util.py
--- a/cibuildwheel/util.py
+++ b/cibuildwheel/util.py
@@ -12,7 +12,7 @@
from enum import Enum
from pathlib import Path
from time import sleep
-from typing import Any, Dict, Iterable, Iterator, List, Optional, TextIO
+from typing import Any, Dict, Iterable, Iterator, List, NamedTuple, Optional, TextIO
import bracex
import certifi
@@ -352,11 +352,27 @@ def print_new_wheels(msg: str, output_dir: Path) -> Iterator[None]:
existing_contents = set(output_dir.iterdir())
yield
final_contents = set(output_dir.iterdir())
- new_contents = final_contents - existing_contents
+
+ class FileReport(NamedTuple):
+ name: str
+ size: str
+
+ new_contents = [
+ FileReport(wheel.name, f"{(wheel.stat().st_size + 1023) // 1024:,d}")
+ for wheel in final_contents - existing_contents
+ ]
+ max_name_len = max(len(f.name) for f in new_contents)
+ max_size_len = max(len(f.size) for f in new_contents)
n = len(new_contents)
s = time.time() - start_time
m = s / 60
- print(msg.format(n=n, s=s, m=m), *sorted(f" {f.name}" for f in new_contents), sep="\n")
+ print(
+ msg.format(n=n, s=s, m=m),
+ *sorted(
+ f" {f.name:<{max_name_len}s} {f.size:>{max_size_len}s} kB" for f in new_contents
+ ),
+ sep="\n",
+ )
def get_pip_version(env: Dict[str, str]) -> str:
| diff --git a/unit_test/wheel_print_test.py b/unit_test/wheel_print_test.py
--- a/unit_test/wheel_print_test.py
+++ b/unit_test/wheel_print_test.py
@@ -6,15 +6,15 @@
def test_printout_wheels(tmp_path, capsys):
tmp_path.joinpath("example.0").touch()
with print_new_wheels("TEST_MSG: {n}", tmp_path):
- tmp_path.joinpath("example.1").touch()
- tmp_path.joinpath("example.2").touch()
+ tmp_path.joinpath("example.1").write_bytes(b"0" * 1023)
+ tmp_path.joinpath("example.2").write_bytes(b"0" * 1025)
captured = capsys.readouterr()
assert captured.err == ""
assert "example.0" not in captured.out
- assert "example.1\n" in captured.out
- assert "example.2\n" in captured.out
+ assert "example.1 1 kB\n" in captured.out
+ assert "example.2 2 kB\n" in captured.out
assert "TEST_MSG:" in captured.out
assert "TEST_MSG: 2\n" in captured.out
| feat: include size in the final printout
Would it be helpful to include the size in the final wheel printout? I find myself checking things like this:
```
11 wheels produced in 14 minutes:
cmake_example-0.0.1-cp27-cp27m-macosx_10_9_x86_64.whl
cmake_example-0.0.1-cp35-cp35m-macosx_10_9_x86_64.whl
cmake_example-0.0.1-cp36-cp36m-macosx_10_9_x86_64.whl
cmake_example-0.0.1-cp37-cp37m-macosx_10_9_x86_64.whl
cmake_example-0.0.1-cp38-cp38-macosx_10_9_universal2.whl
cmake_example-0.0.1-cp38-cp38-macosx_10_9_x86_64.whl
cmake_example-0.0.1-cp39-cp39-macosx_10_9_universal2.whl
cmake_example-0.0.1-cp39-cp39-macosx_10_9_x86_64.whl
cmake_example-0.0.1-pp27-pypy_73-macosx_10_9_x86_64.whl
cmake_example-0.0.1-pp36-pypy36_pp73-macosx_10_9_x86_64.whl
cmake_example-0.0.1-pp37-pypy37_pp73-macosx_10_9_x86_64.whl
```
And wondering if the Universal2 part worked. If the size was listed at the end, that would help.
| I think that it will be helpful.
Agree! | 2021-12-29T17:11:13 |
pypa/cibuildwheel | 977 | pypa__cibuildwheel-977 | [
"976"
] | d6bede539df95695af71f5834c9c8d3fc99c8efd | diff --git a/cibuildwheel/projectfiles.py b/cibuildwheel/projectfiles.py
--- a/cibuildwheel/projectfiles.py
+++ b/cibuildwheel/projectfiles.py
@@ -70,7 +70,7 @@ def get_requires_python_str(package_dir: Path) -> Optional[str]:
pass
try:
- with (package_dir / "setup.py").open() as f2:
+ with (package_dir / "setup.py").open(encoding="utf8") as f2:
return setup_py_python_requires(f2.read())
except FileNotFoundError:
pass
| diff --git a/unit_test/projectfiles_test.py b/unit_test/projectfiles_test.py
--- a/unit_test/projectfiles_test.py
+++ b/unit_test/projectfiles_test.py
@@ -25,7 +25,7 @@ def test_read_setup_py_simple(tmp_path):
def test_read_setup_py_full(tmp_path):
- with open(tmp_path / "setup.py", "w") as f:
+ with open(tmp_path / "setup.py", "w", encoding="utf8") as f:
f.write(
dedent(
"""
@@ -35,6 +35,7 @@ def test_read_setup_py_full(tmp_path):
setuptools.setup(
name = "hello",
+ description = "≥“”ü",
other = 23,
example = ["item", "other"],
python_requires = "1.24",
@@ -43,7 +44,9 @@ def test_read_setup_py_full(tmp_path):
)
)
- assert setup_py_python_requires(tmp_path.joinpath("setup.py").read_text()) == "1.24"
+ assert (
+ setup_py_python_requires(tmp_path.joinpath("setup.py").read_text(encoding="utf8")) == "1.24"
+ )
assert get_requires_python_str(tmp_path) == "1.24"
| on windows, setup_py_python_requires attempts to open utf-8 setup.py as Windows-1252 and fails
### Description
This [setup.py file](https://github.com/fgregg/fastcluster/blob/master/setup.py) is valid utf-8, and has a few non-ascii characters. In a windows build, `setup_py_python_requires` appears to be opening this file as if it was encoded like Windows-1252 and thus fails on some non-ascii characters.
### Build log
https://github.com/fgregg/fastcluster/runs/4660766954?check_suite_focus=true#step:5:40
### CI config
https://github.com/fgregg/fastcluster/blob/master/.github/workflows/pythonpackage.yml#L41-L47
| 2021-12-29T18:45:18 |
|
pypa/cibuildwheel | 978 | pypa__cibuildwheel-978 | [
"672"
] | 73b7de0a7562578851b0ff232f736ddad9027e5c | diff --git a/cibuildwheel/macos.py b/cibuildwheel/macos.py
--- a/cibuildwheel/macos.py
+++ b/cibuildwheel/macos.py
@@ -1,13 +1,12 @@
import os
import platform
import re
-import shlex
import shutil
import subprocess
import sys
import tempfile
from pathlib import Path
-from typing import Any, Dict, List, NamedTuple, Optional, Sequence, Set, Tuple, cast
+from typing import Any, Dict, List, NamedTuple, Sequence, Set, Tuple, cast
from .architecture import Architecture
from .environment import ParsedEnvironment
@@ -18,31 +17,18 @@
BuildFrontend,
BuildSelector,
NonPlatformWheelError,
+ call,
download,
get_build_verbosity_extra_flags,
get_pip_version,
install_certifi_script,
prepare_command,
read_python_configs,
+ shell,
unwrap,
)
-def call(
- args: Sequence[PathOrStr],
- env: Optional[Dict[str, str]] = None,
- cwd: Optional[str] = None,
- shell: bool = False,
-) -> None:
- # print the command executing for the logs
- if shell:
- print(f"+ {args}")
- else:
- print("+ " + " ".join(shlex.quote(str(a)) for a in args))
-
- subprocess.run(args, env=env, cwd=cwd, shell=shell, check=True)
-
-
def get_macos_version() -> Tuple[int, int]:
"""
Returns the macOS major/minor version, as a tuple, e.g. (10, 15) or (11, 0)
@@ -58,13 +44,7 @@ def get_macos_version() -> Tuple[int, int]:
def get_macos_sdks() -> List[str]:
- output = subprocess.run(
- ["xcodebuild", "-showsdks"],
- universal_newlines=True,
- check=True,
- stdout=subprocess.PIPE,
- ).stdout
-
+ output = call("xcodebuild", "-showsdks", capture_stdout=True)
return [m.group(1) for m in re.finditer(r"-sdk (macosx\S+)", output)]
@@ -114,9 +94,7 @@ def make_symlinks(installation_bin_path: Path, python_executable: str, pip_execu
def install_cpython(version: str, url: str) -> Path:
- installed_system_packages = subprocess.run(
- ["pkgutil", "--pkgs"], universal_newlines=True, check=True, stdout=subprocess.PIPE
- ).stdout.splitlines()
+ installed_system_packages = call("pkgutil", "--pkgs", capture_stdout=True).splitlines()
# if this version of python isn't installed, get it from python.org and install
python_package_identifier = f"org.python.Python.PythonFramework-{version}"
@@ -127,10 +105,10 @@ def install_cpython(version: str, url: str) -> Path:
# download the pkg
download(url, Path("/tmp/Python.pkg"))
# install
- call(["sudo", "installer", "-pkg", "/tmp/Python.pkg", "-target", "/"])
+ call("sudo", "installer", "-pkg", "/tmp/Python.pkg", "-target", "/")
env = os.environ.copy()
env["PIP_DISABLE_PIP_VERSION_CHECK"] = "1"
- call([str(installation_bin_path / python_executable), str(install_certifi_script)], env=env)
+ call(str(installation_bin_path / python_executable), str(install_certifi_script), env=env)
pip_executable = "pip3"
make_symlinks(installation_bin_path, python_executable, pip_executable)
@@ -147,7 +125,7 @@ def install_pypy(version: str, url: str) -> Path:
if not installation_path.exists():
downloaded_tar_bz2 = Path("/tmp") / pypy_tar_bz2
download(url, downloaded_tar_bz2)
- call(["tar", "-C", "/tmp", "-xf", downloaded_tar_bz2])
+ call("tar", "-C", "/tmp", "-xf", downloaded_tar_bz2)
installation_bin_path = installation_path / "bin"
python_executable = "pypy3"
@@ -203,20 +181,18 @@ def setup_python(
requires_reinstall = not (installation_bin_path / "pip").exists()
if requires_reinstall:
# maybe pip isn't installed at all. ensurepip resolves that.
- call(["python", "-m", "ensurepip"], env=env, cwd="/tmp")
+ call("python", "-m", "ensurepip", env=env, cwd="/tmp")
# upgrade pip to the version matching our constraints
# if necessary, reinstall it to ensure that it's available on PATH as 'pip'
call(
- [
- "python",
- "-m",
- "pip",
- "install",
- "--force-reinstall" if requires_reinstall else "--upgrade",
- "pip",
- *dependency_constraint_flags,
- ],
+ "python",
+ "-m",
+ "pip",
+ "install",
+ "--force-reinstall" if requires_reinstall else "--upgrade",
+ "pip",
+ *dependency_constraint_flags,
env=env,
cwd="/tmp",
)
@@ -226,11 +202,9 @@ def setup_python(
# check what pip version we're on
assert (installation_bin_path / "pip").exists()
- call(["which", "pip"], env=env)
- call(["pip", "--version"], env=env)
- which_pip = subprocess.run(
- ["which", "pip"], env=env, universal_newlines=True, check=True, stdout=subprocess.PIPE
- ).stdout.strip()
+ call("which", "pip", env=env)
+ call("pip", "--version", env=env)
+ which_pip = call("which", "pip", env=env, capture_stdout=True).strip()
if which_pip != "/tmp/cibw_bin/pip":
print(
"cibuildwheel: pip available on PATH doesn't match our installed instance. If you have modified PATH, ensure that you don't overwrite cibuildwheel's entry or insert pip above it.",
@@ -239,11 +213,9 @@ def setup_python(
sys.exit(1)
# check what Python version we're on
- call(["which", "python"], env=env)
- call(["python", "--version"], env=env)
- which_python = subprocess.run(
- ["which", "python"], env=env, universal_newlines=True, check=True, stdout=subprocess.PIPE
- ).stdout.strip()
+ call("which", "python", env=env)
+ call("python", "--version", env=env)
+ which_python = call("which", "python", env=env, capture_stdout=True).strip()
if which_python != "/tmp/cibw_bin/python":
print(
"cibuildwheel: python available on PATH doesn't match our installed instance. If you have modified PATH, ensure that you don't overwrite cibuildwheel's entry or insert python above it.",
@@ -298,27 +270,23 @@ def setup_python(
log.step("Installing build tools...")
if build_frontend == "pip":
call(
- [
- "pip",
- "install",
- "--upgrade",
- "setuptools",
- "wheel",
- "delocate",
- *dependency_constraint_flags,
- ],
+ "pip",
+ "install",
+ "--upgrade",
+ "setuptools",
+ "wheel",
+ "delocate",
+ *dependency_constraint_flags,
env=env,
)
elif build_frontend == "build":
call(
- [
- "pip",
- "install",
- "--upgrade",
- "delocate",
- "build[virtualenv]",
- *dependency_constraint_flags,
- ],
+ "pip",
+ "install",
+ "--upgrade",
+ "delocate",
+ "build[virtualenv]",
+ *dependency_constraint_flags,
env=env,
)
else:
@@ -347,7 +315,7 @@ def build(options: Options) -> None:
before_all_prepared = prepare_command(
before_all_options.before_all, project=".", package=before_all_options.package_dir
)
- call([before_all_prepared], shell=True, env=env)
+ shell(before_all_prepared, env=env)
for config in python_configurations:
build_options = options.build_options(config.identifier)
@@ -375,7 +343,7 @@ def build(options: Options) -> None:
before_build_prepared = prepare_command(
build_options.before_build, project=".", package=build_options.package_dir
)
- call(before_build_prepared, env=env, shell=True)
+ shell(before_build_prepared, env=env)
log.step("Building wheel...")
if built_wheel_dir.exists():
@@ -388,16 +356,14 @@ def build(options: Options) -> None:
# Path.resolve() is needed. Without it pip wheel may try to fetch package from pypi.org
# see https://github.com/pypa/cibuildwheel/pull/369
call(
- [
- "python",
- "-m",
- "pip",
- "wheel",
- build_options.package_dir.resolve(),
- f"--wheel-dir={built_wheel_dir}",
- "--no-deps",
- *verbosity_flags,
- ],
+ "python",
+ "-m",
+ "pip",
+ "wheel",
+ build_options.package_dir.resolve(),
+ f"--wheel-dir={built_wheel_dir}",
+ "--no-deps",
+ *verbosity_flags,
env=env,
)
elif build_options.build_frontend == "build":
@@ -410,15 +376,13 @@ def build(options: Options) -> None:
build_env["PIP_CONSTRAINT"] = constraint_path.as_uri()
build_env["VIRTUALENV_PIP"] = get_pip_version(env)
call(
- [
- "python",
- "-m",
- "build",
- build_options.package_dir,
- "--wheel",
- f"--outdir={built_wheel_dir}",
- f"--config-setting={config_setting}",
- ],
+ "python",
+ "-m",
+ "build",
+ build_options.package_dir,
+ "--wheel",
+ f"--outdir={built_wheel_dir}",
+ f"--config-setting={config_setting}",
env=build_env,
)
else:
@@ -449,7 +413,7 @@ def build(options: Options) -> None:
dest_dir=repaired_wheel_dir,
delocate_archs=delocate_archs,
)
- call(repair_command_prepared, env=env, shell=True)
+ shell(repair_command_prepared, env=env)
else:
shutil.move(str(built_wheel), repaired_wheel_dir)
@@ -514,7 +478,7 @@ def build(options: Options) -> None:
# set up a virtual environment to install and test from, to make sure
# there are no dependencies that were pulled in at build time.
- call(["pip", "install", "virtualenv", *dependency_constraint_flags], env=env)
+ call("pip", "install", "virtualenv", *dependency_constraint_flags, env=env)
venv_dir = Path(tempfile.mkdtemp())
arch_prefix = []
@@ -528,18 +492,16 @@ def build(options: Options) -> None:
)
# define a custom 'call' function that adds the arch prefix each time
- def call_with_arch(args: Sequence[PathOrStr], **kwargs: Any) -> None:
- if isinstance(args, str):
- args = " ".join(arch_prefix) + " " + args
- else:
- args = [*arch_prefix, *args]
- call(args, **kwargs)
+ def call_with_arch(*args: PathOrStr, **kwargs: Any) -> None:
+ call(*arch_prefix, *args, **kwargs)
+
+ def shell_with_arch(command: str, **kwargs: Any) -> None:
+ command = " ".join(arch_prefix) + " " + command
+ shell(command, **kwargs)
# Use --no-download to ensure determinism by using seed libraries
# built into virtualenv
- call_with_arch(
- ["python", "-m", "virtualenv", "--no-download", venv_dir], env=env
- )
+ call_with_arch("python", "-m", "virtualenv", "--no-download", venv_dir, env=env)
virtualenv_env = env.copy()
virtualenv_env["PATH"] = os.pathsep.join(
@@ -550,7 +512,7 @@ def call_with_arch(args: Sequence[PathOrStr], **kwargs: Any) -> None:
)
# check that we are using the Python from the virtual environment
- call_with_arch(["which", "python"], env=virtualenv_env)
+ call_with_arch("which", "python", env=virtualenv_env)
if build_options.before_test:
before_test_prepared = prepare_command(
@@ -558,18 +520,20 @@ def call_with_arch(args: Sequence[PathOrStr], **kwargs: Any) -> None:
project=".",
package=build_options.package_dir,
)
- call_with_arch(before_test_prepared, env=virtualenv_env, shell=True)
+ shell_with_arch(before_test_prepared, env=virtualenv_env)
# install the wheel
call_with_arch(
- ["pip", "install", f"{repaired_wheel}{build_options.test_extras}"],
+ "pip",
+ "install",
+ f"{repaired_wheel}{build_options.test_extras}",
env=virtualenv_env,
)
# test the wheel
if build_options.test_requires:
call_with_arch(
- ["pip", "install"] + build_options.test_requires, env=virtualenv_env
+ "pip", "install", *build_options.test_requires, env=virtualenv_env
)
# run the tests from $HOME, with an absolute path in the command
@@ -580,11 +544,8 @@ def call_with_arch(args: Sequence[PathOrStr], **kwargs: Any) -> None:
project=Path(".").resolve(),
package=build_options.package_dir.resolve(),
)
- call_with_arch(
- test_command_prepared,
- cwd=os.environ["HOME"],
- env=virtualenv_env,
- shell=True,
+ shell_with_arch(
+ test_command_prepared, cwd=os.environ["HOME"], env=virtualenv_env
)
# clean up
diff --git a/cibuildwheel/util.py b/cibuildwheel/util.py
--- a/cibuildwheel/util.py
+++ b/cibuildwheel/util.py
@@ -3,6 +3,7 @@
import itertools
import os
import re
+import shlex
import ssl
import subprocess
import sys
@@ -12,7 +13,18 @@
from enum import Enum
from pathlib import Path
from time import sleep
-from typing import Any, Dict, Iterable, Iterator, List, NamedTuple, Optional, TextIO
+from typing import (
+ Any,
+ Dict,
+ Iterable,
+ Iterator,
+ List,
+ NamedTuple,
+ Optional,
+ TextIO,
+ cast,
+ overload,
+)
import bracex
import certifi
@@ -47,6 +59,60 @@
"s390x",
)
+IS_WIN = sys.platform.startswith("win")
+
+
+@overload
+def call(
+ *args: PathOrStr,
+ env: Optional[Dict[str, str]] = None,
+ cwd: Optional[PathOrStr] = None,
+ capture_stdout: Literal[False] = ...,
+) -> None:
+ ...
+
+
+@overload
+def call(
+ *args: PathOrStr,
+ env: Optional[Dict[str, str]] = None,
+ cwd: Optional[PathOrStr] = None,
+ capture_stdout: Literal[True],
+) -> str:
+ ...
+
+
+def call(
+ *args: PathOrStr,
+ env: Optional[Dict[str, str]] = None,
+ cwd: Optional[PathOrStr] = None,
+ capture_stdout: bool = False,
+) -> Optional[str]:
+ """
+ Run subprocess.run, but print the commands first. Takes the commands as
+ *args. Uses shell=True on Windows due to a bug. Also converts to
+ Paths to strings, due to Windows behavior at least on older Pythons.
+ https://bugs.python.org/issue8557
+ """
+ args_ = [str(arg) for arg in args]
+ # print the command executing for the logs
+ print("+ " + " ".join(shlex.quote(a) for a in args_))
+ kwargs: Dict[str, Any] = {}
+ if capture_stdout:
+ kwargs["universal_newlines"] = True
+ kwargs["stdout"] = subprocess.PIPE
+ result = subprocess.run(args_, check=True, shell=IS_WIN, env=env, cwd=cwd, **kwargs)
+ if not capture_stdout:
+ return None
+ return cast(str, result.stdout)
+
+
+def shell(
+ command: str, env: Optional[Dict[str, str]] = None, cwd: Optional[PathOrStr] = None
+) -> None:
+ print(f"+ {command}")
+ subprocess.run(command, env=env, cwd=cwd, shell=True, check=True)
+
def format_safe(template: str, **kwargs: Any) -> str:
"""
@@ -376,11 +442,8 @@ class FileReport(NamedTuple):
def get_pip_version(env: Dict[str, str]) -> str:
- # we use shell=True here for windows, even though we don't need a shell due to a bug
- # https://bugs.python.org/issue8557
- shell = sys.platform.startswith("win")
- versions_output_text = subprocess.check_output(
- ["python", "-m", "pip", "freeze", "--all"], universal_newlines=True, shell=shell, env=env
+ versions_output_text = call(
+ "python", "-m", "pip", "freeze", "--all", capture_stdout=True, env=env
)
(pip_version,) = (
version[5:]
diff --git a/cibuildwheel/windows.py b/cibuildwheel/windows.py
--- a/cibuildwheel/windows.py
+++ b/cibuildwheel/windows.py
@@ -18,32 +18,18 @@
BuildFrontend,
BuildSelector,
NonPlatformWheelError,
+ call,
download,
get_build_verbosity_extra_flags,
get_pip_version,
prepare_command,
read_python_configs,
+ shell,
)
CIBW_INSTALL_PATH = Path("C:\\cibw")
-def call(
- args: Sequence[PathOrStr], env: Optional[Dict[str, str]] = None, cwd: Optional[PathOrStr] = None
-) -> None:
- print("+ " + " ".join(str(a) for a in args))
- # we use shell=True here, even though we don't need a shell due to a bug
- # https://bugs.python.org/issue8557
- subprocess.run([str(a) for a in args], env=env, cwd=cwd, shell=True, check=True)
-
-
-def shell(
- command: str, env: Optional[Dict[str, str]] = None, cwd: Optional[PathOrStr] = None
-) -> None:
- print(f"+ {command}")
- subprocess.run(command, env=env, cwd=cwd, shell=True, check=True)
-
-
def get_nuget_args(version: str, arch: str) -> List[str]:
platform_suffix = {"32": "x86", "64": "", "ARM64": "arm64"}
python_name = "python" + platform_suffix[arch]
@@ -94,7 +80,7 @@ def extract_zip(zip_src: Path, dest: Path) -> None:
def install_cpython(version: str, arch: str, nuget: Path) -> Path:
nuget_args = get_nuget_args(version, arch)
installation_path = Path(nuget_args[-1]) / (nuget_args[0] + "." + version) / "tools"
- call([nuget, "install", *nuget_args])
+ call(nuget, "install", *nuget_args)
# "python3" is not included in the vanilla nuget package,
# though it can be present if modified (like on Azure).
if not (installation_path / "python3.exe").exists():
@@ -165,7 +151,7 @@ def setup_python(
if requires_reinstall:
# maybe pip isn't installed at all. ensurepip resolves that.
- call(["python", "-m", "ensurepip"], env=env, cwd=CIBW_INSTALL_PATH)
+ call("python", "-m", "ensurepip", env=env, cwd=CIBW_INSTALL_PATH)
# pip older than 21.3 builds executables such as pip.exe for x64 platform.
# The first re-install of pip updates pip module but builds pip.exe using
@@ -175,16 +161,14 @@ def setup_python(
# pip versions newer than 21.3.
if python_configuration.arch == "ARM64" and Version(get_pip_version(env)) < Version("21.3"):
call(
- [
- "python",
- "-m",
- "pip",
- "install",
- "--force-reinstall",
- "--upgrade",
- "pip",
- *dependency_constraint_flags,
- ],
+ "python",
+ "-m",
+ "pip",
+ "install",
+ "--force-reinstall",
+ "--upgrade",
+ "pip",
+ *dependency_constraint_flags,
env=env,
cwd=CIBW_INSTALL_PATH,
)
@@ -192,15 +176,13 @@ def setup_python(
# upgrade pip to the version matching our constraints
# if necessary, reinstall it to ensure that it's available on PATH as 'pip.exe'
call(
- [
- "python",
- "-m",
- "pip",
- "install",
- "--force-reinstall" if requires_reinstall else "--upgrade",
- "pip",
- *dependency_constraint_flags,
- ],
+ "python",
+ "-m",
+ "pip",
+ "install",
+ "--force-reinstall" if requires_reinstall else "--upgrade",
+ "pip",
+ *dependency_constraint_flags,
env=env,
cwd=CIBW_INSTALL_PATH,
)
@@ -209,20 +191,10 @@ def setup_python(
env = environment.as_dictionary(prev_environment=env)
# check what Python version we're on
- call(["where", "python"], env=env)
- call(["python", "--version"], env=env)
- call(["python", "-c", "\"import struct; print(struct.calcsize('P') * 8)\""], env=env)
- where_python = (
- subprocess.run(
- ["where", "python"],
- env=env,
- universal_newlines=True,
- check=True,
- stdout=subprocess.PIPE,
- )
- .stdout.splitlines()[0]
- .strip()
- )
+ call("where", "python", env=env)
+ call("python", "--version", env=env)
+ call("python", "-c", "\"import struct; print(struct.calcsize('P') * 8)\"", env=env)
+ where_python = call("where", "python", env=env, capture_stdout=True).splitlines()[0].strip()
if where_python != str(installation_path / "python.exe"):
print(
"cibuildwheel: python available on PATH doesn't match our installed instance. If you have modified PATH, ensure that you don't overwrite cibuildwheel's entry or insert python above it.",
@@ -232,13 +204,7 @@ def setup_python(
# check what pip version we're on
assert (installation_path / "Scripts" / "pip.exe").exists()
- where_pip = (
- subprocess.run(
- ["where", "pip"], env=env, universal_newlines=True, check=True, stdout=subprocess.PIPE
- )
- .stdout.splitlines()[0]
- .strip()
- )
+ where_pip = call("where", "pip", env=env, capture_stdout=True).splitlines()[0].strip()
if where_pip.strip() != str(installation_path / "Scripts" / "pip.exe"):
print(
"cibuildwheel: pip available on PATH doesn't match our installed instance. If you have modified PATH, ensure that you don't overwrite cibuildwheel's entry or insert pip above it.",
@@ -246,23 +212,25 @@ def setup_python(
)
sys.exit(1)
- call(["pip", "--version"], env=env)
+ call("pip", "--version", env=env)
if build_frontend == "pip":
call(
- [
- "pip",
- "install",
- "--upgrade",
- "setuptools",
- "wheel",
- *dependency_constraint_flags,
- ],
+ "pip",
+ "install",
+ "--upgrade",
+ "setuptools",
+ "wheel",
+ *dependency_constraint_flags,
env=env,
)
elif build_frontend == "build":
call(
- ["pip", "install", "--upgrade", "build[virtualenv]", *dependency_constraint_flags],
+ "pip",
+ "install",
+ "--upgrade",
+ "build[virtualenv]",
+ *dependency_constraint_flags,
env=env,
)
else:
@@ -330,16 +298,14 @@ def build(options: Options) -> None:
# Path.resolve() is needed. Without it pip wheel may try to fetch package from pypi.org
# see https://github.com/pypa/cibuildwheel/pull/369
call(
- [
- "python",
- "-m",
- "pip",
- "wheel",
- options.globals.package_dir.resolve(),
- f"--wheel-dir={built_wheel_dir}",
- "--no-deps",
- *get_build_verbosity_extra_flags(build_options.build_verbosity),
- ],
+ "python",
+ "-m",
+ "pip",
+ "wheel",
+ options.globals.package_dir.resolve(),
+ f"--wheel-dir={built_wheel_dir}",
+ "--no-deps",
+ *get_build_verbosity_extra_flags(build_options.build_verbosity),
env=env,
)
elif build_options.build_frontend == "build":
@@ -364,15 +330,13 @@ def build(options: Options) -> None:
build_env["PIP_CONSTRAINT"] = str(constraints_path)
build_env["VIRTUALENV_PIP"] = get_pip_version(env)
call(
- [
- "python",
- "-m",
- "build",
- build_options.package_dir,
- "--wheel",
- f"--outdir={built_wheel_dir}",
- f"--config-setting={config_setting}",
- ],
+ "python",
+ "-m",
+ "build",
+ build_options.package_dir,
+ "--wheel",
+ f"--outdir={built_wheel_dir}",
+ f"--config-setting={config_setting}",
env=build_env,
)
else:
@@ -403,12 +367,12 @@ def build(options: Options) -> None:
log.step("Testing wheel...")
# set up a virtual environment to install and test from, to make sure
# there are no dependencies that were pulled in at build time.
- call(["pip", "install", "virtualenv", *dependency_constraint_flags], env=env)
+ call("pip", "install", "virtualenv", *dependency_constraint_flags, env=env)
venv_dir = Path(tempfile.mkdtemp())
# Use --no-download to ensure determinism by using seed libraries
# built into virtualenv
- call(["python", "-m", "virtualenv", "--no-download", venv_dir], env=env)
+ call("python", "-m", "virtualenv", "--no-download", venv_dir, env=env)
virtualenv_env = env.copy()
virtualenv_env["PATH"] = os.pathsep.join(
@@ -419,7 +383,7 @@ def build(options: Options) -> None:
)
# check that we are using the Python from the virtual environment
- call(["where", "python"], env=virtualenv_env)
+ call("where", "python", env=virtualenv_env)
if build_options.before_test:
before_test_prepared = prepare_command(
@@ -431,13 +395,15 @@ def build(options: Options) -> None:
# install the wheel
call(
- ["pip", "install", str(repaired_wheel) + build_options.test_extras],
+ "pip",
+ "install",
+ str(repaired_wheel) + build_options.test_extras,
env=virtualenv_env,
)
# test the wheel
if build_options.test_requires:
- call(["pip", "install"] + build_options.test_requires, env=virtualenv_env)
+ call("pip", "install", *build_options.test_requires, env=virtualenv_env)
# run the tests from c:\, with an absolute path in the command
# (this ensures that Python runs the tests against the installed wheel
| refactor: use run-like syntax
This standardizes the "call" and "shell" commands into a single "run" utility. This utility is just a wrapper around `subprocess.run` that prints the command before running it, and has a better default on Windows due to a Python bug. The goal here is really not to simplify, or to even help out #665 anymore, but to better match the stdlib run syntax to be more recognizable to readers. This is a natural followup to #592 which changed `subprocess.check_call(...)` to `subprocess.run(..., check=True)`. I think if this was developed from scratch today, it would look more like this syntax vs. the old `call`.
Mapping:
* `call(...)` -> `run(..., check=True)`
* `shell(...)` -> `run(..., check=True, shell=True)` (on Windows, for now, this is the same as the above line, but highlights that it is intentionally needing/using the shell, while the one above does not except for the mentioned bug)
* `call_with_arch(...)` -> `run_with_arch(..., check=True)`
I did not change the docker `self.call`, as that's already pretty far from `subprocess.run`, AFAICT, and therefore benefits less from mimicking it. Though it could be changed.
Not absolutely glued to the idea if others have objections. The other direction would be to try to simply the call command to diverge farther from `subprocess.run`, say by accepting `*args`, smarter cwd default, etc. This could then allow `pip = partial(call, "python", "-m", "pip")`, for example.
| I'm not sure I agree with the motivation behind this change. I think a key motivator for the existing `call` wrapper is to ensure that we don't accidentally miss `check=True` on a function call. I understand that matching the stdlib might be a bit more familiar, but `check=False` is the wrong default for our use-case, IMO.
The original call wrapper was a wrapper for `check_call` with an extra printout. Now `run(check=True)` is the recommended method for this instead of `check_call`. What about making `check=` a required keyword argument for the wrapper? That would force it to be clearly included in every usage.
That would help, yeah. But I still don't understand the need to unify the two platforms `run` functions - they're different. The Windows one is quite weird, actually - it calls `run()` with an _array_ of args and `shell=True`. This works because subprocess knows how to escape sequences of params when passing them to a shell on Windows. That isn't the case on Mac, nor does it need to be - the argument list remains an argument list, but just prefixed with `['/bin/sh', '-c']`. So on POSIX, `shell=True` is different - the first argument of `args` becomes special - it's a shell script, not a command. But all other args are passed to it. So on POSIX, `shell=True` completely changes how an argument list is interpreted-
```python
>>> subprocess.run(['/bin/echo', 'hello!'])
hello!
CompletedProcess(args=['/bin/echo', 'hello!'], returncode=0)
>>> subprocess.run(['/bin/echo', 'hello!'], shell=True)
CompletedProcess(args=['/bin/echo', 'hello!'], returncode=0)
```
Note the missing output on the second example. It's missing because the 'hello!' argument went to `/bin/sh` not `/bin/echo`.
(sorry for going deep, but I had to make sure I wasn't making a mountain out of a molehill!)
On windows, this is simulating what _would_ be done if Windows didn't have the (linked) bug. If that bug was fixed, then you could remove the custom handling and it would look like the others. But currently, we have to pretend that `shell=False` even when we have to add `shell=True`. When you pass `shell=True`, you need to pass a string, not a list, and that's what's being done here.
Apologies for the delay in getting back to this @henryiii ! Probably I've been dragging my feet because I'm conflict-averse and I didn't see it as a high priority, but I do see the value in unifying some kind of wrapper, as we saw in #521. So I return to this with slightly more understanding of the why.
That said,
1. I don't think `check=True` needs to be placed at every call site, when 99% of usages want that. A default of `check=True` makes sense to me (it's kind of crazy that this isn't the default in subprocess... but I digress)
2. I think that executing commands (lists) are different from running shell scripts (strings). This is true in types, but also in [behaviour](https://github.com/pypa/cibuildwheel/pull/672#issuecomment-846159871). So we should have an API that reflects that. In particular, the `shell=True` flag that allows a list to be passed is problematic, because this has a different behaviour on POSIX versus windows. (this is why the original `call` and `shell` functions are separate)
It would be lovely to match the subprocess.run API for familiarity, but I think that the above concerns are more important, and we wouldn't have a wrapper if we weren't changing behaviour (logging plus the windows workaround), so there is some logic in changing the name.
I've gone around the houses on this, but settled on something quite neat I think. So my counter proposal might look something like:
```python
def call(
args: Sequence[PathOrStr],
env: Optional[Dict[str, str]] = None,
cwd: Optional[PathOrStr] = None,
) -> None:
"""
Executes a command, like subprocess.run(..., check=True), but:
- Prints the command before executing it
- Works around a couple of bugs on Windows
"""
print("+ " + " ".join(shlex.quote(str(a)) for a in args))
# execute in a shell on Windows to workaround a PATH resolution bug
# https://bugs.python.org/issue8557
shell = sys.platform.startswith("win")
# converts Paths to strings, due to Windows behavior at least on older Pythons.
subprocess.run([str(a) for a in args], shell=shell, env=env, cwd=cwd, check=True)
```
- I've used a different name, to deliberately differ from subprocess.run. Not wedded to `call` necessarily, but it makes sense to me and is consistent with existing code and `docker.call`
- This is not a shell interface - it's just for running commands. I realised that we don't use the `shell=True` at all on the linux build, we instead do `call(['sh', '-c', script])`, which is actually better - it's completely explicit, and familiar, and the logs show how the script is invoked. So I'd propose that we do the same on macOS too, replacing `shell()` with `call(['sh', '-c', script])`. I'm not sure if this would be required on windows too, but if needed, there we could also do `call(['cmd', '/c', script])`. This gets us around all the nasty ambiguity of the shell argument having different behaviours and default values on different platforms.
Curious what you think @henryiii. I realise that this doesn't match your original idea, but I think maybe this would unify the API in a way that we can both agree on?
I was always fine going the other way too; I have [not been a fan](https://plumbum.readthedocs.io/en/latest/) of the subprocess syntax since 2015. :)
> The other direction would be to try to simply the call command to diverge farther from subprocess.run, say by accepting *args, smarter cwd default, etc
Since we are coming up with a new syntax, what about:
```python
def call(
*args: PathOrStr,
env: Optional[Dict[str, str]] = None,
cwd: Optional[PathOrStr] = None,
) -> None:
```
Adding the brackets each time isn't very necessary, and it allows this misusage of a plus sign:
```python
call(my_args + ["--other"])
```
to be written like this:
```python
call(*my_args, "--other")
``` | 2021-12-30T16:05:20 |
|
pypa/cibuildwheel | 1,017 | pypa__cibuildwheel-1017 | [
"1015"
] | b52698dad6af15f292cef3e9d2f148969ac730fa | diff --git a/cibuildwheel/util.py b/cibuildwheel/util.py
--- a/cibuildwheel/util.py
+++ b/cibuildwheel/util.py
@@ -1,4 +1,5 @@
import contextlib
+import dataclasses
import fnmatch
import itertools
import os
@@ -16,6 +17,7 @@
from time import sleep
from typing import (
Any,
+ ClassVar,
Dict,
Iterable,
Iterator,
@@ -205,6 +207,8 @@ def selector_matches(patterns: str, string: str) -> bool:
return any(fnmatch.fnmatch(string, pat) for pat in expanded_patterns)
+# Once we require Python 3.10+, we can add kw_only=True
[email protected]
class IdentifierSelector:
"""
This class holds a set of build/skip patterns. You call an instance with a
@@ -215,20 +219,12 @@ class IdentifierSelector:
"""
# a pattern that skips prerelease versions, when include_prereleases is False.
- PRERELEASE_SKIP = ""
-
- def __init__(
- self,
- *,
- build_config: str,
- skip_config: str,
- requires_python: Optional[SpecifierSet] = None,
- prerelease_pythons: bool = False,
- ):
- self.build_config = build_config
- self.skip_config = skip_config
- self.requires_python = requires_python
- self.prerelease_pythons = prerelease_pythons
+ PRERELEASE_SKIP: ClassVar[str] = ""
+
+ skip_config: str
+ build_config: str
+ requires_python: Optional[SpecifierSet] = None
+ prerelease_pythons: bool = False
def __call__(self, build_id: str) -> bool:
# Filter build selectors by python_requires if set
@@ -241,9 +237,7 @@ def __call__(self, build_id: str) -> bool:
return False
# filter out the prerelease pythons if self.prerelease_pythons is False
- if not self.prerelease_pythons and selector_matches(
- BuildSelector.PRERELEASE_SKIP, build_id
- ):
+ if not self.prerelease_pythons and selector_matches(self.PRERELEASE_SKIP, build_id):
return False
should_build = selector_matches(self.build_config, build_id)
@@ -251,28 +245,17 @@ def __call__(self, build_id: str) -> bool:
return should_build and not should_skip
- def __repr__(self) -> str:
- result = f"{self.__class__.__name__}(build_config={self.build_config!r}"
-
- if self.skip_config:
- result += f", skip_config={self.skip_config!r}"
- if self.prerelease_pythons:
- result += ", prerelease_pythons=True"
-
- result += ")"
-
- return result
-
[email protected]
class BuildSelector(IdentifierSelector):
pass
# Note that requires-python is not needed for TestSelector, as you can't test
# what you can't build.
[email protected]
class TestSelector(IdentifierSelector):
- def __init__(self, *, skip_config: str):
- super().__init__(build_config="*", skip_config=skip_config)
+ build_config: str = "*"
# Taken from https://stackoverflow.com/a/107717
| No build identifiers selected, even though build_config is `cp37-*`
### Description
I'm having an issue building Python 3.7 wheels on Linux, OSX and Windows. This issue seems to have come very recently and did not occur a week ago in a release test run.
```
cibuildwheel version 2.3.1
Build options:
platform: 'macos'
architectures: {<Architecture.x86_64: 'x86_64'>, <Architecture.arm64: 'arm64'>}
build_selector: BuildSelector(build_config='cp37-*', skip_config='pp* *-musllinux_* cp310-win32')
output_dir: PosixPath('wheelhouse')
package_dir: PosixPath('statsmodels')
test_selector: TestSelector(build_config='*', skip_config='*-*linux_{aarch64,ppc64le,s390x}')
before_all: ''
before_build: 'git submodule foreach git checkout v0.13.2'
before_test: ''
build_frontend: 'pip'
build_verbosity: 2
dependency_constraints: DependencyConstraints(PosixPath('/Users/runner/hostedtoolcache/Python/3.10.2/x64/lib/python3.10/site-packages/cibuildwheel/resources/constraints.txt'))
environment: ParsedEnvironment([])
manylinux_images: None
musllinux_images: None
repair_command: 'delocate-listdeps {wheel} && delocate-wheel --require-archs {delocate_archs} -w {dest_dir} {wheel}'
test_command: 'python -c "import statsmodels; statsmodels.test([\'--skip-examples\',\'--skip-slow\',\'-n\',\'2\'])"'
test_extras: ''
test_requires: ['pytest', 'pytest-xdist']
Here we go!
cibuildwheel: No build identifiers selected: BuildSelector(build_config='cp37-*', skip_config='pp* *-musllinux_* cp310-win32')
Error: Process completed with exit code 3.
```
### Build log
https://github.com/MacPython/statsmodels-wheels/runs/5114202151?check_suite_focus=true
### CI config
https://github.com/MacPython/statsmodels-wheels/blob/rel-0.13.0/.github/workflows/build-wheels.yml
| This one has me stumped, it's fine locally:
```console
$ CIBW_BUILD='cp37-*' CIBW_SKIP='pp* *-musllinux_* cp310-win32' CIBW_ARCHS_LINUX='x86_64 aarch64' pipx run cibuildwheel --platform linux --print-build-identifiers
cp37-manylinux_x86_64
cp37-manylinux_aarch64
```
I'd recommend statsmodels include `build-backend` (they are using `__legacy__` by default), and `oldest-supported-numpy` instead of listing the NumPy's manually (they are wrong for unusual platforms, and not forward-compatible when a new platform or Python version comes out), but there's not even anything in there that would affect cibuildwheel.
Ahh, no, I understand it. It's Python-requires, that's killing the build. Upstream has dropped Python 3.7 and cibuildwheel knows that, so it's not able to build what you are asking for.
Though to me it looks like this is the old branch where that hasn't happened...
@henryiii We are building an old branch: https://github.com/statsmodels/statsmodels/tree/maintenance/0.13.x
The relvant part of the setup is
```python
extras_require=EXTRAS_REQUIRE,
zip_safe=False,
python_requires=">=3.7",
)
```
So no issue with python requires.
> I'd recommend statsmodels include build-backend (they are using __legacy__ by default), and oldest-supported-numpy instead of listing the NumPy's manually (they are wrong for unusual platforms, and not forward-compatible when a new platform or Python version comes out), but there's not even anything in there that would affect cibuildwheel.
Does setting `build-backend` require a new release? We are already on oldest-supported-numpy
https://github.com/statsmodels/statsmodels/blob/maintenance/0.13.x/pyproject.toml
No, it’s just nice.
Don’t know why this is doing this, will have to investigate further.
(Also posted on the debugging PR in statsmodels-wheels)
Yep, running this:
```console
$ git checkout rel-0.13.0
$ git submodule update --init --recursive
$ pipx run cibuildwheel --platform linux --print-build-identifiers statsmodels/
cp38-manylinux_x86_64
cp39-manylinux_x86_64
cp310-manylinux_x86_64
cp38-manylinux_i686
cp39-manylinux_i686
cp310-manylinux_i686
pp38-manylinux_x86_64
pp38-manylinux_i686
cp38-musllinux_x86_64
cp39-musllinux_x86_64
cp310-musllinux_x86_64
cp38-musllinux_i686
cp39-musllinux_i686
cp310-musllinux_i686
```
And looking in setup.py, I see this:
```python
python_requires=">=3.8",
```
I think you have the wrong commit in the submodule.
I wonder if we can make the Python requires setting a bit more obvious.
Also, I'd personally make a cibuildwheel.toml file, put all configuration in that, then use `--config-file=cibuildwheel.toml`, would make local runs easier. | 2022-02-09T20:04:58 |
|
pypa/cibuildwheel | 1,031 | pypa__cibuildwheel-1031 | [
"1032"
] | 4465ec193f1508ee4b9b2866eae68f3a2a198d88 | diff --git a/bin/update_pythons.py b/bin/update_pythons.py
--- a/bin/update_pythons.py
+++ b/bin/update_pythons.py
@@ -109,7 +109,12 @@ def __init__(self, arch_str: ArchStr):
response = requests.get("https://downloads.python.org/pypy/versions.json")
response.raise_for_status()
- releases = [r for r in response.json() if r["pypy_version"] != "nightly"]
+ releases = [
+ r
+ for r in response.json()
+ if r["pypy_version"] != "nightly"
+ and f'{r["python_version"]}-{r["pypy_version"]}' != "3.7.12-7.3.8"
+ ]
for release in releases:
release["pypy_version"] = Version(release["pypy_version"])
release["python_version"] = Version(release["python_version"])
| diff --git a/test/test_manylinuxXXXX_only.py b/test/test_manylinuxXXXX_only.py
--- a/test/test_manylinuxXXXX_only.py
+++ b/test/test_manylinuxXXXX_only.py
@@ -71,6 +71,9 @@ def test(manylinux_image, tmp_path):
if manylinux_image in {"manylinux1"}:
# We don't have a manylinux1 image for PyPy & CPython 3.10 and above
add_env["CIBW_SKIP"] = "pp* cp31*"
+ if manylinux_image in {"manylinux2010"}:
+ # We don't have a manylinux2010 image for PyPy 3.9
+ add_env["CIBW_SKIP"] = "pp39*"
actual_wheels = utils.cibuildwheel_run(project_dir, add_env=add_env)
@@ -88,4 +91,7 @@ def test(manylinux_image, tmp_path):
if manylinux_image in {"manylinux1"}:
# remove PyPy & CPython 3.10 and above
expected_wheels = [w for w in expected_wheels if "-pp" not in w and "-cp31" not in w]
+ if manylinux_image in {"manylinux2010"}:
+ # remove PyPy 3.9
+ expected_wheels = [w for w in expected_wheels if "-pp39" not in w]
assert set(actual_wheels) == set(expected_wheels)
diff --git a/test/utils.py b/test/utils.py
--- a/test/utils.py
+++ b/test/utils.py
@@ -135,7 +135,7 @@ def expected_wheels(
python_abi_tags = ["cp36-cp36m", "cp37-cp37m", "cp38-cp38", "cp39-cp39", "cp310-cp310"]
if machine_arch in ["x86_64", "AMD64", "x86", "aarch64"]:
- python_abi_tags += ["pp37-pypy37_pp73", "pp38-pypy38_pp73"]
+ python_abi_tags += ["pp37-pypy37_pp73", "pp38-pypy38_pp73", "pp39-pypy39_pp73"]
if platform == "macos" and machine_arch == "arm64":
# currently, arm64 macs are only supported by cp39 & cp310
diff --git a/unit_test/option_prepare_test.py b/unit_test/option_prepare_test.py
--- a/unit_test/option_prepare_test.py
+++ b/unit_test/option_prepare_test.py
@@ -11,7 +11,7 @@
from cibuildwheel import linux, util
from cibuildwheel.__main__ import main
-ALL_IDS = {"cp36", "cp37", "cp38", "cp39", "cp310", "pp37", "pp38"}
+ALL_IDS = {"cp36", "cp37", "cp38", "cp39", "cp310", "pp37", "pp38", "pp39"}
@pytest.fixture
@@ -133,7 +133,7 @@ def test_build_with_override_launches(mock_build_docker, monkeypatch, tmp_path):
identifiers = {x.identifier for x in kwargs["platform_configs"]}
assert identifiers == {
- f"{x}-manylinux_x86_64" for x in ALL_IDS - {"cp36", "cp310", "pp37", "pp38"}
+ f"{x}-manylinux_x86_64" for x in ALL_IDS - {"cp36", "cp310", "pp37", "pp38", "pp39"}
}
assert kwargs["options"].build_options("cp37-manylinux_x86_64").before_all == ""
@@ -146,6 +146,7 @@ def test_build_with_override_launches(mock_build_docker, monkeypatch, tmp_path):
"cp310-manylinux_x86_64",
"pp37-manylinux_x86_64",
"pp38-manylinux_x86_64",
+ "pp39-manylinux_x86_64",
}
kwargs = build_on_docker.call_args_list[3][1]
| PyPy has released bug fixes and a python3.9
### Description
PyPy released version v7.3.8. It would be nice to be able to use it in cibuildwheel, including the newly released python3.9
### Build log
_No response_
### CI config
_No response_
| Bug fixes are included in #1027 (for macOS/Windows).
PyPy 3.9 is addressed in #1031 (for macOS/Windows).
~~manylinux is on hold for both c.f. https://github.com/pypa/manylinux/pull/1288#issuecomment-1046322172~~ | 2022-02-20T21:43:26 |
pypa/cibuildwheel | 1,065 | pypa__cibuildwheel-1065 | [
"1064"
] | 358362d08c02f663d7abbff0e4f0900086e94986 | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -4,6 +4,7 @@
"docs": [
"mkdocs-include-markdown-plugin==2.8.0",
"mkdocs==1.0.4",
+ "jinja2==3.0.3",
"pymdown-extensions",
"mkdocs-macros-plugin",
],
| [Docs] Example may accidentally be encouraging users to write invalid `pyproject.toml` files
### Description
Hi guys, first of all thank you for the amazing project (always good to remember).
While I was working on adding support for PEP 621 to setuptools, I noticed a series of users having problems with invalid `pyproject.toml` files. The common pattern seem to be a almost empty `[project]` table with only a `requires-python` field set, which is invalid according to PEP 621.
It took me some time to find the reason for this behaviour but I think it comes from:
https://cibuildwheel.readthedocs.io/en/stable/options/#requires-python
I suspect that the example and notes about the preferred way of setting the config has been encouraging users that are unaware of PEP 621 to write technically invalid `pyproject.toml` files.
Please note that this issue is not necessarily related to setuptools itself.
The existence of the `[project]` table in the `pyproject.toml` allows (according to the standard) the installer/builder/consumer program to treat the package differently (specially regarding `dynamic`).
I think it would be nice to at least add a note about this so users became aware of the implications of adding a `[project]` table.
### Build log
_No response_
### CI config
_No response_
| 2022-03-26T10:32:08 |
||
pypa/cibuildwheel | 1,098 | pypa__cibuildwheel-1098 | [
"1262"
] | 3a46bde3fbcdffa4b61a4cc0b7f8a4b403408fd0 | diff --git a/cibuildwheel/__main__.py b/cibuildwheel/__main__.py
--- a/cibuildwheel/__main__.py
+++ b/cibuildwheel/__main__.py
@@ -7,6 +7,7 @@
import tarfile
import tempfile
import textwrap
+import typing
from pathlib import Path
from tempfile import mkdtemp
@@ -41,7 +42,7 @@ def main() -> None:
parser.add_argument(
"--platform",
choices=["auto", "linux", "macos", "windows"],
- default=os.environ.get("CIBW_PLATFORM", "auto"),
+ default=None,
help="""
Platform to build for. Use this option to override the
auto-detected platform or to run cibuildwheel on your development
@@ -65,6 +66,16 @@ def main() -> None:
""",
)
+ parser.add_argument(
+ "--only",
+ default=None,
+ help="""
+ Force a single wheel build when given an identifier. Overrides
+ CIBW_BUILD/CIBW_SKIP. --platform and --arch cannot be specified
+ if this is given.
+ """,
+ )
+
parser.add_argument(
"--output-dir",
type=Path,
@@ -149,10 +160,40 @@ def main() -> None:
def build_in_directory(args: CommandLineArguments) -> None:
+ platform_option_value = args.platform or os.environ.get("CIBW_PLATFORM", "auto")
platform: PlatformName
- if args.platform != "auto":
- platform = args.platform
+ if args.only:
+ if "linux_" in args.only:
+ platform = "linux"
+ elif "macosx_" in args.only:
+ platform = "macos"
+ elif "win_" in args.only:
+ platform = "windows"
+ else:
+ print(
+ f"Invalid --only='{args.only}', must be a build selector with a known platform",
+ file=sys.stderr,
+ )
+ sys.exit(2)
+ if args.platform is not None:
+ print(
+ "--platform cannot be specified with --only, it is computed from --only",
+ file=sys.stderr,
+ )
+ sys.exit(2)
+ if args.archs is not None:
+ print(
+ "--arch cannot be specified with --only, it is computed from --only",
+ file=sys.stderr,
+ )
+ sys.exit(2)
+ elif platform_option_value != "auto":
+ if platform_option_value not in PLATFORMS:
+ print(f"cibuildwheel: Unsupported platform: {platform_option_value}", file=sys.stderr)
+ sys.exit(2)
+
+ platform = typing.cast(PlatformName, platform_option_value)
else:
ci_provider = detect_ci_provider()
if ci_provider is None:
@@ -182,10 +223,6 @@ def build_in_directory(args: CommandLineArguments) -> None:
)
sys.exit(2)
- if platform not in PLATFORMS:
- print(f"cibuildwheel: Unsupported platform: {platform}", file=sys.stderr)
- sys.exit(2)
-
options = compute_options(platform=platform, command_line_arguments=args)
package_dir = options.globals.package_dir
diff --git a/cibuildwheel/options.py b/cibuildwheel/options.py
--- a/cibuildwheel/options.py
+++ b/cibuildwheel/options.py
@@ -41,9 +41,10 @@
@dataclass
class CommandLineArguments:
- platform: Literal["auto", "linux", "macos", "windows"]
+ platform: Literal["auto", "linux", "macos", "windows"] | None
archs: str | None
output_dir: Path
+ only: str | None
config_file: str
package_dir: Path
print_build_identifiers: bool
@@ -388,6 +389,15 @@ def globals(self) -> GlobalOptions:
)
requires_python = None if requires_python_str is None else SpecifierSet(requires_python_str)
+ archs_config_str = args.archs or self.reader.get("archs", sep=" ")
+ architectures = Architecture.parse_config(archs_config_str, platform=self.platform)
+
+ # Process `--only`
+ if args.only:
+ build_config = args.only
+ skip_config = ""
+ architectures = Architecture.all_archs(self.platform)
+
build_selector = BuildSelector(
build_config=build_config,
skip_config=skip_config,
@@ -396,9 +406,6 @@ def globals(self) -> GlobalOptions:
)
test_selector = TestSelector(skip_config=test_skip)
- archs_config_str = args.archs or self.reader.get("archs", sep=" ")
- architectures = Architecture.parse_config(archs_config_str, platform=self.platform)
-
container_engine_str = self.reader.get("container-engine")
if container_engine_str not in ["docker", "podman"]:
@@ -569,6 +576,9 @@ def summary(self, identifiers: list[str]) -> str:
]
build_option_defaults = self.build_options(identifier=None)
+ build_options_for_identifier = {
+ identifier: self.build_options(identifier) for identifier in identifiers
+ }
for option_name, default_value in sorted(asdict(build_option_defaults).items()):
if option_name == "globals":
@@ -578,7 +588,7 @@ def summary(self, identifiers: list[str]) -> str:
# if any identifiers have an overridden value, print that too
for identifier in identifiers:
- option_value = getattr(self.build_options(identifier=identifier), option_name)
+ option_value = getattr(build_options_for_identifier[identifier], option_name)
if option_value != default_value:
lines.append(f" {identifier}: {option_value!r}")
diff --git a/cibuildwheel/util.py b/cibuildwheel/util.py
--- a/cibuildwheel/util.py
+++ b/cibuildwheel/util.py
@@ -144,7 +144,7 @@ def shell(*commands: str, env: dict[str, str] | None = None, cwd: PathOrStr | No
subprocess.run(command, env=env, cwd=cwd, shell=True, check=True)
-def format_safe(template: str, **kwargs: Any) -> str:
+def format_safe(template: str, **kwargs: str | os.PathLike[str]) -> str:
"""
Works similarly to `template.format(**kwargs)`, except that unmatched
fields in `template` are passed through untouched.
@@ -172,11 +172,9 @@ def format_safe(template: str, **kwargs: Any) -> str:
re.VERBOSE,
)
- # we use a function for repl to prevent re.sub interpreting backslashes
- # in repl as escape sequences.
result = re.sub(
pattern=find_pattern,
- repl=lambda _: str(value), # pylint: disable=cell-var-from-loop
+ repl=str(value).replace("\\", r"\\"),
string=result,
)
| diff --git a/unit_test/main_tests/conftest.py b/unit_test/main_tests/conftest.py
--- a/unit_test/main_tests/conftest.py
+++ b/unit_test/main_tests/conftest.py
@@ -12,7 +12,13 @@
class ArgsInterceptor:
+ def __init__(self):
+ self.call_count = 0
+ self.args = None
+ self.kwargs = None
+
def __call__(self, *args, **kwargs):
+ self.call_count += 1
self.args = args
self.kwargs = kwargs
@@ -75,16 +81,14 @@ def platform(request, monkeypatch):
@pytest.fixture
-def intercepted_build_args(platform, monkeypatch):
+def intercepted_build_args(monkeypatch):
intercepted = ArgsInterceptor()
- if platform == "linux":
- monkeypatch.setattr(linux, "build", intercepted)
- elif platform == "macos":
- monkeypatch.setattr(macos, "build", intercepted)
- elif platform == "windows":
- monkeypatch.setattr(windows, "build", intercepted)
- else:
- raise ValueError(f"unknown platform value: {platform}")
+ monkeypatch.setattr(linux, "build", intercepted)
+ monkeypatch.setattr(macos, "build", intercepted)
+ monkeypatch.setattr(windows, "build", intercepted)
+
+ yield intercepted
- return intercepted
+ # check that intercepted_build_args only ever had one set of args
+ assert intercepted.call_count <= 1
diff --git a/unit_test/main_tests/main_platform_test.py b/unit_test/main_tests/main_platform_test.py
--- a/unit_test/main_tests/main_platform_test.py
+++ b/unit_test/main_tests/main_platform_test.py
@@ -192,3 +192,73 @@ def test_archs_platform_all(platform, intercepted_build_args, monkeypatch):
Architecture.arm64,
Architecture.universal2,
}
+
+
[email protected](
+ "only,plat",
+ (
+ ("cp311-manylinux_x86_64", "linux"),
+ ("cp310-win_amd64", "windows"),
+ ("cp311-macosx_x86_64", "macos"),
+ ),
+)
+def test_only_argument(intercepted_build_args, monkeypatch, only, plat):
+ monkeypatch.setenv("CIBW_BUILD", "unused")
+ monkeypatch.setenv("CIBW_SKIP", "unused")
+ monkeypatch.setattr(sys, "argv", sys.argv + ["--only", only])
+
+ main()
+
+ options = intercepted_build_args.args[0]
+ assert options.globals.build_selector.build_config == only
+ assert options.globals.build_selector.skip_config == ""
+ assert options.platform == plat
+ assert options.globals.architectures == Architecture.all_archs(plat)
+
+
[email protected]("only", ("cp311-manylxinux_x86_64", "some_linux_thing"))
+def test_only_failed(monkeypatch, only):
+ monkeypatch.setattr(sys, "argv", sys.argv + ["--only", only])
+
+ with pytest.raises(SystemExit):
+ main()
+
+
+def test_only_no_platform(monkeypatch):
+ monkeypatch.setattr(
+ sys, "argv", sys.argv + ["--only", "cp311-manylinux_x86_64", "--platform", "macos"]
+ )
+
+ with pytest.raises(SystemExit):
+ main()
+
+
+def test_only_no_archs(monkeypatch):
+ monkeypatch.setattr(
+ sys, "argv", sys.argv + ["--only", "cp311-manylinux_x86_64", "--archs", "x86_64"]
+ )
+
+ with pytest.raises(SystemExit):
+ main()
+
+
[email protected](
+ "envvar_name,envvar_value",
+ (
+ ("CIBW_BUILD", "cp310-*"),
+ ("CIBW_SKIP", "cp311-*"),
+ ("CIBW_ARCHS", "auto32"),
+ ("CIBW_PLATFORM", "macos"),
+ ),
+)
+def test_only_overrides_env_vars(monkeypatch, intercepted_build_args, envvar_name, envvar_value):
+ monkeypatch.setattr(sys, "argv", sys.argv + ["--only", "cp311-manylinux_x86_64"])
+ monkeypatch.setenv(envvar_name, envvar_value)
+
+ main()
+
+ options = intercepted_build_args.args[0]
+ assert options.globals.build_selector.build_config == "cp311-manylinux_x86_64"
+ assert options.globals.build_selector.skip_config == ""
+ assert options.platform == "linux"
+ assert options.globals.architectures == Architecture.all_archs("linux")
diff --git a/unit_test/utils.py b/unit_test/utils.py
--- a/unit_test/utils.py
+++ b/unit_test/utils.py
@@ -10,6 +10,7 @@ def get_default_command_line_arguments() -> CommandLineArguments:
platform="auto",
allow_empty=False,
archs=None,
+ only=None,
config_file="",
output_dir=Path("wheelhouse"),
package_dir=Path("."),
| feat: add formatted indentifier printout
See #1261 - this would enable easier production of dynamic matrix outputs from `cibuildwheel`.
Example:
```console
$ cibuildwheel --print-build-identifiers '{{"CIBW_BUILD": "{identifier}", "CIBW_ARCHS":"{arch}", "os": "ubuntu-latest"}}' --platform linux | jq -s
[
{
"CIBW_BUILD": "cp36-manylinux_x86_64",
"CIBW_ARCHS": "x86_64",
"os": "ubuntu-latest"
},
{
"CIBW_BUILD": "cp37-manylinux_x86_64",
"CIBW_ARCHS": "x86_64",
"os": "ubuntu-latest"
},
...
```
You should be able to concat multiple platforms as well.
Todo:
- [ ] Verify this is a good idea / makes using this easier in #1261.
- [ ] Tests
- [ ] Any other values useful in the format string?
- [ ] Is there any way to use this from the action, so the version stays locked with the cibuildwheel invocation that runs the builds?
- [ ] This does not have a way to group smaller chunks of runs, sadly, but maybe that could be done via jq? I did include `version` if that's useful - so we have `version`, `identifier`, and `arch`. You'd have to combine the contents of the build field. But you could write a processor to do this at least much easier than now.
I could have done this by just processing the build identifier, and not touching `get_build_identifiers` - version, pp/cp, os, and arch are all obtainable by processing the identifier. Maybe that would be better. Edit: trying that in a new commit. Also added `impl` and `os`.
| 2022-04-28T04:22:39 |
|
pypa/cibuildwheel | 1,138 | pypa__cibuildwheel-1138 | [
"1137"
] | aa753429b2fdc380e9665c4f031c7d9998718d7c | diff --git a/cibuildwheel/util.py b/cibuildwheel/util.py
--- a/cibuildwheel/util.py
+++ b/cibuildwheel/util.py
@@ -234,21 +234,20 @@ def selector_matches(patterns: str, string: str) -> bool:
# Once we require Python 3.10+, we can add kw_only=True
@dataclasses.dataclass
-class IdentifierSelector:
+class BuildSelector:
"""
This class holds a set of build/skip patterns. You call an instance with a
build identifier, and it returns True if that identifier should be
included. Only call this on valid identifiers, ones that have at least 2
- numeric digits before the first dash. If a pre-release version X.Y is present,
- you can filter it with prerelease="XY".
+ numeric digits before the first dash.
"""
- # a pattern that skips prerelease versions, when include_prereleases is False.
- PRERELEASE_SKIP: ClassVar[str] = "cp311-*"
-
- skip_config: str
build_config: str
+ skip_config: str
requires_python: Optional[SpecifierSet] = None
+
+ # a pattern that skips prerelease versions, when include_prereleases is False.
+ PRERELEASE_SKIP: ClassVar[str] = "cp311-*"
prerelease_pythons: bool = False
def __call__(self, build_id: str) -> bool:
@@ -272,15 +271,16 @@ def __call__(self, build_id: str) -> bool:
@dataclasses.dataclass
-class BuildSelector(IdentifierSelector):
- pass
+class TestSelector:
+ """
+ A build selector that can only skip tests according to a skip pattern.
+ """
+ skip_config: str
-# Note that requires-python is not needed for TestSelector, as you can't test
-# what you can't build.
[email protected]
-class TestSelector(IdentifierSelector):
- build_config: str = "*"
+ def __call__(self, build_id: str) -> bool:
+ should_skip = selector_matches(self.skip_config, build_id)
+ return not should_skip
# Taken from https://stackoverflow.com/a/107717
| diff --git a/unit_test/build_selector_test.py b/unit_test/build_selector_test.py
--- a/unit_test/build_selector_test.py
+++ b/unit_test/build_selector_test.py
@@ -136,3 +136,14 @@ def test_build_limited_python_patch():
assert build_selector("cp36-manylinux_x86_64")
assert build_selector("cp37-manylinux_x86_64")
+
+
+def test_testing_selector():
+ # local import to avoid pytest trying to collect this as a test class!
+ from cibuildwheel.util import TestSelector
+
+ test_selector = TestSelector(skip_config="cp36-*")
+
+ assert not test_selector("cp36-win_amd64")
+ assert test_selector("cp37-manylinux_x86_64")
+ assert test_selector("cp311-manylinux_x86_64")
| Tests are skipped on pre-release Pythons
### Description
On cibuildwheel==2.6.1, when running with `CIBW_PRERELEASE_PYTHONS=1`, any configured test command is always skipped when targeting a pre-release Python version. A quick glance at the code makes me think it's an unintended consequence of having `TestSelector` inherit the prerelease exclusion behavior from `IdentifierSelector`.
### Build log
https://github.com/rolpdog/cffi-mirror/runs/6867587654?check_suite_focus=true
### CI config
https://github.com/rolpdog/cffi-mirror/commit/b7a01156bafba52bd2bfedc1222c9eb8d36491c7#diff-944291df2c9c06359d37cc8833d182d705c9e8c3108e7cfe132d61a06e9133dd
| 2022-06-13T21:44:24 |
|
pypa/cibuildwheel | 1,171 | pypa__cibuildwheel-1171 | [
"1168",
"1169"
] | f349304eb7f9f723a011a6d09cf261bec463a985 | diff --git a/cibuildwheel/macos.py b/cibuildwheel/macos.py
--- a/cibuildwheel/macos.py
+++ b/cibuildwheel/macos.py
@@ -465,6 +465,23 @@ def build(options: Options, tmp_path: Path) -> None:
# skip this test
continue
+ if testing_arch == "arm64" and config.identifier.startswith("cp38-"):
+ log.warning(
+ unwrap(
+ """
+ While cibuildwheel can build CPython 3.8 universal2/arm64 wheels, we
+ cannot test the arm64 part of them, even when running on an Apple
+ Silicon machine. This is because we use the x86_64 installer of
+ CPython 3.8. See the discussion in
+ https://github.com/pypa/cibuildwheel/pull/1169 for the details. To
+ silence this warning, set `CIBW_TEST_SKIP: cp38-macosx_*:arm64`.
+ """
+ )
+ )
+
+ # skip this test
+ continue
+
log.step(
"Testing wheel..."
if testing_arch == machine_arch
| diff --git a/test/test_macos_archs.py b/test/test_macos_archs.py
--- a/test/test_macos_archs.py
+++ b/test/test_macos_archs.py
@@ -133,3 +133,36 @@ def test_universal2_testing(tmp_path, capfd, skip_arm64_test):
expected_wheels = [w for w in ALL_MACOS_WHEELS if "cp39" in w and "universal2" in w]
assert set(actual_wheels) == set(expected_wheels)
+
+
+def test_cp38_arm64_testing(tmp_path, capfd):
+ if utils.platform != "macos":
+ pytest.skip("this test is only relevant to macos")
+ if get_xcode_version() < (12, 2):
+ pytest.skip("this test only works with Xcode 12.2 or greater")
+ if platform.machine() != "arm64":
+ pytest.skip("this test only works on arm64")
+
+ project_dir = tmp_path / "project"
+ basic_project.generate(project_dir)
+
+ actual_wheels = utils.cibuildwheel_run(
+ project_dir,
+ add_env={
+ "CIBW_BUILD": "cp38-*",
+ "CIBW_TEST_COMMAND": '''python -c "import platform; print('running tests on ' + platform.machine())"''',
+ "CIBW_ARCHS": "x86_64,universal2,arm64",
+ },
+ )
+
+ captured = capfd.readouterr()
+
+ assert "running tests on x86_64" in captured.out
+ assert "running tests on arm64" not in captured.out
+
+ warning_message = "While cibuildwheel can build CPython 3.8 universal2/arm64 wheels, we cannot test the arm64 part of them"
+ assert warning_message in captured.err
+
+ expected_wheels = [w for w in ALL_MACOS_WHEELS if "cp38" in w]
+
+ assert set(actual_wheels) == set(expected_wheels)
diff --git a/test/utils.py b/test/utils.py
--- a/test/utils.py
+++ b/test/utils.py
@@ -154,8 +154,8 @@ def expected_wheels(
python_abi_tags += ["pp37-pypy37_pp73", "pp38-pypy38_pp73", "pp39-pypy39_pp73"]
if platform == "macos" and machine_arch == "arm64":
- # currently, arm64 macs are only supported by cp39, cp310 & cp311
- python_abi_tags = ["cp39-cp39", "cp310-cp310", "cp311-cp311"]
+ # arm64 macs are only supported by cp38+
+ python_abi_tags = ["cp38-cp38", "cp39-cp39", "cp310-cp310", "cp311-cp311"]
wheels = []
@@ -193,7 +193,7 @@ def expected_wheels(
platform_tags = ["win32", "win_amd64"]
elif platform == "macos":
- if python_abi_tag == "cp39-cp39" and machine_arch == "arm64":
+ if machine_arch == "arm64":
arm64_macosx_deployment_target = _get_arm64_macosx_deployment_target(
macosx_deployment_target
)
| Fix cp38-macosx universal2 and arm64 Python pkg URL
Previous URLs pointed to x86_64 version
| Unfortunately we don't want to upgrade the bundled Python because it would increase the minimum MacOS version for the x86_64 arch from 10.9 to 11.0, which is a pretty massive jump.
Sorry, could you please explain how arm64 Python URL affects x86_64 minimum macOS version?
Ah, I understand what's going on. We still have no arm64 CI, so we've never actually had a chance to test running all this on arm64. The version of 3.8 that we install only runs on x86_64. So when you run it on arm64, it's emulating x86_64. The wheel that you built is fine, but you're not able to test it.
This PR seems to only change the universal2/arm64 build identifiers. I don't think that will change anything, as on macOS you can only have one minor version of Python installed at once, and the first one that cibuildwheel finds, it will install. So if we were going to make this change we'd have to change all three 3.8 URLs.
The Python 3.8 arm64 support was added in https://github.com/pypa/cibuildwheel/pull/704. I don't have time right now but perhaps there are some clues in there why we're not using the `macos11` versions.
We tried very hard to use this when it came out, but it doesn’t work. CPython only has partial support for 3.8 on ARM - it was squeezed in at the last minute in the last binary release. IIRC you can’t produce 10.9-10.15 compatible wheels for Intel from the ARM version since Python didn’t include a 10.9 compatible Intel version in the binary, like they normally do (and do in 3.9+). You can look at the PR where 3.8 was added, I think.
I think to make this “work” we’d need to special case when running on ARM and have the (big) caveat that you can’t build for macOS 10 from AS (but only for 3.8). Is 3.8 still the version Apple ships? I’m traveling and won’t be my AS machine till the 27th.
Read here:
* https://github.com/pypa/cibuildwheel/pull/663#discussion_r628731708
* https://github.com/scipy/oldest-supported-numpy/pull/20#discussion_r641740734
* https://github.com/pypa/cibuildwheel/pull/704
I think we should see if we can build from AS too, but we haven’t had CI runners for it and it will take some changes and should error out of macOS version is set <11.
I’m pretty sure we use the AS version from this table when building pure AS wheels from Intel.
> IIRC you can’t produce 10.9-10.15 compatible wheels for Intel from the ARM version since Python didn’t include a 10.9 compatible Intel version in the binary, like they normally do (and do in 3.9+).
Now that `MACOSX_DEPLOYMENT_TARGET` is always set, I don't think there's a big issue on what you can target (i.e. will still be 10.9 for x86_64) unless I'm missing something. However, using a macOS 10.x runner to do the build won't be possible anymore with this installer which requires macOS 11+.
From what I understand, you can't set `MACOSX_DEPLOYMENT_TARGET` lower than whatever CPython was prepared with, otherwise we'd not need to download the official binaries at all. I could be wrong, but I'm pretty sure `MACOSX_DEPLOYMENT_TARGET` can only be increased.
yes. here is a `wheel` code fragment with a warning about this:
https://github.com/pypa/wheel/blob/9d4264d2f23ea44dd85639a27fa6b284261921cd/src/wheel/macosx_libfile.py#L411-L419
Thanks all for the informative discussion. I guess the upshot is that we keep the macos10.9 installer, and we can't test 3.8 arm64 wheels, even when running on arm64.
However, we should change the behaviour when running on Apple Silicon, to not try to test the arm64 3.8 wheel, and include an informative message explaining why. | 2022-07-11T17:30:16 |
pypa/cibuildwheel | 1,195 | pypa__cibuildwheel-1195 | [
"1193"
] | fb63392538402c77b519160b7af87b37e66d98c6 | diff --git a/cibuildwheel/macos.py b/cibuildwheel/macos.py
--- a/cibuildwheel/macos.py
+++ b/cibuildwheel/macos.py
@@ -505,7 +505,7 @@ def build(options: Options, tmp_path: Path) -> None:
# define a custom 'call' function that adds the arch prefix each time
call_with_arch = functools.partial(call, *arch_prefix)
- shell_with_arch = functools.partial(shell, *arch_prefix)
+ shell_with_arch = functools.partial(call, *arch_prefix, "/bin/sh", "-c")
# Use --no-download to ensure determinism by using seed libraries
# built into virtualenv
| When testing x86_64 on macOS arm64, `CIBW_TEST_COMMAND` isn't ran under Rosetta when using pipes
### Description
So basically, the problem is that the way cibuildwheel runs stuff under Rosetta is by prefixing the command with `arch -x86_64`. This works great for regular subprocess calls *but* when used with `shell=True`, it only prefixes the first command with it:
https://github.com/pypa/cibuildwheel/blob/fb63392538402c77b519160b7af87b37e66d98c6/cibuildwheel/macos.py#L497-L508
So when `CIBW_TEST_COMMAND` is set to something like `false || python -c 'import spam'`, it doesn't work because the latter part of the command isn't run under Rosetta:
```
+ arch -x86_64 false || python -c 'import spam'
Traceback (most recent call last):
File "<string>", line 1, in <module>
ImportError: dlopen(/private/var/folders/0d/08wbx2j52nx5hjg16cshb8g80000gn/T/cibw-run-qbqflk30/cp39-macosx_x86_64/venv-test/lib/python3.9/site-packages/spam.cpython-39-darwin.so, 0x0002): tried: '/private/var/folders/0d/08wbx2j52nx5hjg16cshb8g80000gn/T/cibw-run-qbqflk30/cp39-macosx_x86_64/venv-test/lib/python3.9/site-packages/spam.cpython-39-darwin.so' (mach-o file, but is an incompatible architecture (have (x86_64), need (arm64e)))
```
I'm unsure how this should be resolved, perhaps we should have a simple python script that can run shell commands? Then it would be possible to do `arch -x86_64 python -m cibuildwheel._run_shell_command "false || python -c 'import spam'"` and I think Rosetta emulation would then apply to everything that's run there?
---
The linked build log and CI config are for Cirrus CI which I'm adding support for in #1191 as this is the only available CI that has M1 runners and I don't have any macOS system myself. The issue is not related to the used CI service at all though so that should not matter here.
### Build log
https://cirrus-ci.com/task/4706399904071680?logs=run_cibuildwheel#L660
### CI config
https://raw.githubusercontent.com/jack1142/cibuildwheel/25d1b927d828047928aea2c6fb7e9dd0eab42fc8/.cirrus.yml
| Ah yes, good catch! It looks to me that the solution could be
https://github.com/pypa/cibuildwheel/blob/fb63392538402c77b519160b7af87b37e66d98c6/cibuildwheel/macos.py#L508
```
shell_with_arch = functools.partial(call, *arch_prefix, "sh", "-c")
``` | 2022-07-25T14:21:09 |
|
pypa/cibuildwheel | 1,205 | pypa__cibuildwheel-1205 | [
"761"
] | c73152918723802fb39c9e9544e6dbf840845591 | diff --git a/cibuildwheel/options.py b/cibuildwheel/options.py
--- a/cibuildwheel/options.py
+++ b/cibuildwheel/options.py
@@ -1,5 +1,6 @@
from __future__ import annotations
+import difflib
import functools
import os
import sys
@@ -188,14 +189,10 @@ def __init__(
# Validate project config
for option_name in config_options:
- if not self._is_valid_global_option(option_name):
- raise ConfigOptionError(f'Option "{option_name}" not supported in a config file')
+ self._validate_global_option(option_name)
for option_name in config_platform_options:
- if not self._is_valid_platform_option(option_name):
- raise ConfigOptionError(
- f'Option "{option_name}" not supported in the "{self.platform}" section'
- )
+ self._validate_platform_option(option_name)
self.config_options = config_options
self.config_platform_options = config_platform_options
@@ -207,40 +204,51 @@ def __init__(
if config_overrides is not None:
if not isinstance(config_overrides, list):
- raise ConfigOptionError('"tool.cibuildwheel.overrides" must be a list')
+ raise ConfigOptionError("'tool.cibuildwheel.overrides' must be a list")
for config_override in config_overrides:
select = config_override.pop("select", None)
if not select:
- raise ConfigOptionError('"select" must be set in an override')
+ raise ConfigOptionError("'select' must be set in an override")
if isinstance(select, list):
select = " ".join(select)
self.overrides.append(Override(select, config_override))
- def _is_valid_global_option(self, name: str) -> bool:
+ def _validate_global_option(self, name: str) -> None:
"""
- Returns True if an option with this name is allowed in the
+ Raises an error if an option with this name is not allowed in the
[tool.cibuildwheel] section of a config file.
"""
allowed_option_names = self.default_options.keys() | PLATFORMS | {"overrides"}
- return name in allowed_option_names
+ if name not in allowed_option_names:
+ msg = f"Option {name!r} not supported in a config file."
+ matches = difflib.get_close_matches(name, allowed_option_names, 1, 0.7)
+ if matches:
+ msg += f" Perhaps you meant {matches[0]!r}?"
+ raise ConfigOptionError(msg)
- def _is_valid_platform_option(self, name: str) -> bool:
+ def _validate_platform_option(self, name: str) -> None:
"""
- Returns True if an option with this name is allowed in the
+ Raises an error if an option with this name is not allowed in the
[tool.cibuildwheel.<current-platform>] section of a config file.
"""
disallowed_platform_options = self.disallow.get(self.platform, set())
if name in disallowed_platform_options:
- return False
+ msg = f"{name!r} is not allowed in {disallowed_platform_options}"
+ raise ConfigOptionError(msg)
allowed_option_names = self.default_options.keys() | self.default_platform_options.keys()
- return name in allowed_option_names
+ if name not in allowed_option_names:
+ msg = f"Option {name!r} not supported in the {self.platform!r} section"
+ matches = difflib.get_close_matches(name, allowed_option_names, 1, 0.7)
+ if matches:
+ msg += f" Perhaps you meant {matches[0]!r}?"
+ raise ConfigOptionError(msg)
def _load_file(self, filename: Path) -> tuple[dict[str, Any], dict[str, Any]]:
"""
@@ -290,7 +298,8 @@ def get(
"""
if name not in self.default_options and name not in self.default_platform_options:
- raise ConfigOptionError(f"{name} must be in cibuildwheel/resources/defaults.toml file")
+ msg = f"{name!r} must be in cibuildwheel/resources/defaults.toml file to be accessed."
+ raise ConfigOptionError(msg)
# Environment variable form
envvar = f"CIBW_{name.upper().replace('-', '_')}"
@@ -314,12 +323,12 @@ def get(
if isinstance(result, dict):
if table is None:
- raise ConfigOptionError(f"{name} does not accept a table")
+ raise ConfigOptionError(f"{name!r} does not accept a table")
return table["sep"].join(table["item"].format(k=k, v=v) for k, v in result.items())
if isinstance(result, list):
if sep is None:
- raise ConfigOptionError(f"{name} does not accept a list")
+ raise ConfigOptionError(f"{name!r} does not accept a list")
return sep.join(result)
if isinstance(result, int):
@@ -393,7 +402,7 @@ def globals(self) -> GlobalOptions:
container_engine_str = self.reader.get("container-engine")
if container_engine_str not in ["docker", "podman"]:
- msg = f"cibuildwheel: Unrecognised container_engine '{container_engine_str}', only 'docker' and 'podman' are supported"
+ msg = f"cibuildwheel: Unrecognised container_engine {container_engine_str!r}, only 'docker' and 'podman' are supported"
print(msg, file=sys.stderr)
sys.exit(2)
@@ -437,7 +446,7 @@ def build_options(self, identifier: str | None) -> BuildOptions:
elif build_frontend_str == "pip":
build_frontend = "pip"
else:
- msg = f"cibuildwheel: Unrecognised build frontend '{build_frontend_str}', only 'pip' and 'build' are supported"
+ msg = f"cibuildwheel: Unrecognised build frontend {build_frontend_str!r}, only 'pip' and 'build' are supported"
print(msg, file=sys.stderr)
sys.exit(2)
@@ -445,7 +454,7 @@ def build_options(self, identifier: str | None) -> BuildOptions:
environment = parse_environment(environment_config)
except (EnvironmentParseError, ValueError):
print(
- f'cibuildwheel: Malformed environment option "{environment_config}"',
+ f"cibuildwheel: Malformed environment option {environment_config!r}",
file=sys.stderr,
)
traceback.print_exc(None, sys.stderr)
| diff --git a/unit_test/options_toml_test.py b/unit_test/options_toml_test.py
--- a/unit_test/options_toml_test.py
+++ b/unit_test/options_toml_test.py
@@ -160,9 +160,28 @@ def test_unexpected_key(tmp_path):
"""
)
- with pytest.raises(ConfigOptionError):
+ with pytest.raises(ConfigOptionError) as excinfo:
OptionsReader(pyproject_toml, platform="linux")
+ assert "repair-wheel-command" in str(excinfo.value)
+
+
+def test_underscores_in_key(tmp_path):
+ # Note that platform contents are only checked when running
+ # for that platform.
+ pyproject_toml = tmp_path / "pyproject.toml"
+ pyproject_toml.write_text(
+ """
+[tool.cibuildwheel]
+repair_wheel_command = "repair-project-linux"
+"""
+ )
+
+ with pytest.raises(ConfigOptionError) as excinfo:
+ OptionsReader(pyproject_toml, platform="linux")
+
+ assert "repair-wheel-command" in str(excinfo.value)
+
def test_unexpected_table(tmp_path):
pyproject_toml = tmp_path / "pyproject.toml"
| Consider auto-correcting underscores to dashes in TOML options
From https://github.com/pypa/cibuildwheel/pull/759#discussion_r670922746
While TOML prefers dashes, as a Python programmer, it's easy to accidentally write underscores for option names.
I wonder if it would be nice to do autofix this a la `option_name.replace("_","-")`, after we read these. pip does something similar - if you do e.g. `pip install python_dateutil`, it will still find `python-dateutil`, because underscores are invalid.
| I'm not really a fan of this - I'd rather force "one correct" way to do it, rather than making it arbitrary. You'll immediately get an error if you put something in incorrectly, so I think it's fairly low friction. And then what do you do if `a_b` and `a-b` are both present? Normally the TOML parser should handle the errors if you duplicate a key (or eventually a linter), but this is not a TOML error, so we'd have to handle it.
Hmm. yes. Perhaps just a nicer error message, like `Unknown option "before_all". Perhaps you meant "before-all"?`
I really like it when Python 3.10 does this, and it's pretty easy to implement with `difflib.get_close_matches`, so I like this idea. Also would help for near misspellings. | 2022-08-01T06:08:19 |
pypa/cibuildwheel | 1,217 | pypa__cibuildwheel-1217 | [
"1204"
] | 35c795e4c6fcb627dc1b07998ee28f3310f9a768 | diff --git a/cibuildwheel/architecture.py b/cibuildwheel/architecture.py
--- a/cibuildwheel/architecture.py
+++ b/cibuildwheel/architecture.py
@@ -66,10 +66,6 @@ def auto_archs(platform: PlatformName) -> set[Architecture]:
if platform == "windows" and native_architecture == Architecture.AMD64:
result.add(Architecture.x86)
- if platform == "macos" and native_architecture == Architecture.arm64:
- # arm64 can build and test both archs of a universal2 wheel.
- result.add(Architecture.universal2)
-
return result
@staticmethod
| diff --git a/test/test_macos_archs.py b/test/test_macos_archs.py
--- a/test/test_macos_archs.py
+++ b/test/test_macos_archs.py
@@ -11,7 +11,7 @@
ALL_MACOS_WHEELS = {
*utils.expected_wheels("spam", "0.1.0", machine_arch="x86_64"),
- *utils.expected_wheels("spam", "0.1.0", machine_arch="arm64"),
+ *utils.expected_wheels("spam", "0.1.0", machine_arch="arm64", include_universal2=True),
}
diff --git a/test/utils.py b/test/utils.py
--- a/test/utils.py
+++ b/test/utils.py
@@ -116,6 +116,7 @@ def expected_wheels(
macosx_deployment_target="10.9",
machine_arch=None,
python_abi_tags=None,
+ include_universal2=False,
):
"""
Returns a list of expected wheels from a run of cibuildwheel.
@@ -199,14 +200,14 @@ def expected_wheels(
arm64_macosx_deployment_target = _get_arm64_macosx_deployment_target(
macosx_deployment_target
)
- platform_tags = [
- f'macosx_{macosx_deployment_target.replace(".", "_")}_universal2',
- f'macosx_{arm64_macosx_deployment_target.replace(".", "_")}_arm64',
- ]
+ platform_tags = [f'macosx_{arm64_macosx_deployment_target.replace(".", "_")}_arm64']
else:
- platform_tags = [
- f'macosx_{macosx_deployment_target.replace(".", "_")}_x86_64',
- ]
+ platform_tags = [f'macosx_{macosx_deployment_target.replace(".", "_")}_x86_64']
+
+ if include_universal2:
+ platform_tags.append(
+ f'macosx_{macosx_deployment_target.replace(".", "_")}_universal2',
+ )
else:
raise Exception("unsupported platform")
| Should arm64 runners build both universal2 and arm64 wheels by default?
Continuing the discussion from https://github.com/pypa/cibuildwheel/pull/1191#discussion_r928749039
The current situation is that arm64 runners builds universal2 and arm64 by default. This discussion is whether we change that to just arm64.
> @joerick: The question of whether to default building both universal2 and arm64 on the arm runner is still open, in my mind. I'm still mostly of the opinion that building both is good, because universal2 packages are preferred in some contexts (e.g. distributable GUI apps), and that most packages are small and the extra build time isn't much of a hit. Of course, people building big libraries would be wise to change this to just build arm64.
> @henryiii: Personally, I think the default should be native only. Eventually no one will care about Intel / universal and will only produce native binaries again. Also, there are still workarounds related to cross compiling for universal (like requiring all dependencies to also be universal). And we don’t support it yet, but there are two ways to make a universal binary, the default way and merging two native binaries. And finally universal vs. native is a choice, and I’d you pick universal, you don’t need native. You can always add tags for the native arch’s to a universal binary. And pip won’t even ever download universal if both native wheels exist.
>
> So I’d recommend native being default, and only producing native binaries with it.
Let's get a decision on this before we release CirrusCI support, which would be our first officially supported arm64 CI.
| Notably, numpy doesn't distribute universal2 wheels currently. There's some discussion about that here: https://github.com/numpy/numpy/pull/20787
Regarding wheel fusing (I had to look this up) - multibuild's implementation is here:
https://github.com/multi-build/multibuild/blob/10d9a4c9663759e194495e87eb84f96f7cb513e9/osx_utils.sh#L508-L525
It uses a tool built into delocate called [delocate-fuse](https://github.com/matthew-brett/delocate/blob/master/delocate/fuse.py), called like `delocate-fuse <x86_64_wheel> <arm64_wheel> -w <outdir>`.
I don't know how well this works, it seems like there _could_ be cases where this fails (particularly I'm wondering what happens we hit the `# existing not-lib file not identical to source` comment). But, given this is part of multibuild, it must have gotten a good amount of testing, and delocate has always been solid.
The other thing that's on my mind is whether we're even doing the universal2 thing at the right stage of the packaging process. As we know, universal2 wheels are ignored by pip when a native one is present. Packaged macOS apps are a pretty small percentage of Python deployment. If fusing is indeed possible, do we really need the universal2 wheels on PyPI at all? Can we push wheel fusing down the process to a distribution tool like py2app or pyinstaller? As @mattip says in the numpy thread linked above:
> @mattip: As I understand things, app packagers would still need to have the option to fuse wheels for projects that do not produce universal2 wheels, so I think the proper place for all that to happen is when packaging apps.
I suppose the only downside to this is that project-built universal2 wheels have the chance to be tested before release, whereas I might not be as confident in using a universal2 wheel that was fused 'in the wild'. I suppose that depends on how reliable the 'fuse' process is.
A final thought - is wheel fusing the only option for macOS packagers? Could they build/distribute separate x86_64 and arm64 versions?
> Can we push wheel fusing down the process to the packaging tool like py2app or pyinstaller?
+1 from me, until there is a real user-side demand for universal wheels.
IMO, this means (for cibuildwheel) that we should never build `universal2` by default, but leave it up to a user choice. So AS runners should try to build AS wheels by default, like Intel runners and Intel wheels. We could add a fuse option eventually, which would also allow us to test the fused wheel, but that's very much extra. Users can use delocate-fuse today if they want to - especially if they build on both Intel and Apple Silicon runners. Users can also just not ship Universal2 if they ship both. Maybe packaging could eventually fuze after the fact if they needed both.
PS: I would also recommend retagging a universal2 wheel like `macosx_10_10_universal2.macosx_10_10_x86_64.macosx_11_0_arm64.macosx_11_0_universal2` which allows a single wheel to also work with older pips, too. The tooling for that would be in https://github.com/pypa/wheel/pull/422 but wheel development is a bit slow at the moment.
What are "AS runners" and "AS wheels"? The plan sounds reasonable in any case.
AS is probably short for Apple Silicon. | 2022-08-06T09:22:49 |
pypa/cibuildwheel | 1,226 | pypa__cibuildwheel-1226 | [
"1221"
] | 6549a90991c8d1fe135402fede048bc9d9572e54 | diff --git a/cibuildwheel/util.py b/cibuildwheel/util.py
--- a/cibuildwheel/util.py
+++ b/cibuildwheel/util.py
@@ -242,7 +242,7 @@ class BuildSelector:
requires_python: SpecifierSet | None = None
# a pattern that skips prerelease versions, when include_prereleases is False.
- PRERELEASE_SKIP: ClassVar[str] = "cp311-*"
+ PRERELEASE_SKIP: ClassVar[str] = ""
prerelease_pythons: bool = False
def __call__(self, build_id: str) -> bool:
| diff --git a/unit_test/build_selector_test.py b/unit_test/build_selector_test.py
--- a/unit_test/build_selector_test.py
+++ b/unit_test/build_selector_test.py
@@ -11,7 +11,7 @@ def test_build():
assert build_selector("cp36-manylinux_x86_64")
assert build_selector("cp37-manylinux_x86_64")
assert build_selector("cp310-manylinux_x86_64")
- assert not build_selector("cp311-manylinux_x86_64")
+ assert build_selector("cp311-manylinux_x86_64")
assert build_selector("pp36-manylinux_x86_64")
assert build_selector("pp37-manylinux_x86_64")
assert build_selector("cp36-manylinux_i686")
@@ -30,7 +30,7 @@ def test_build():
assert build_selector("cp36-win_amd64")
assert build_selector("cp37-win_amd64")
assert build_selector("cp310-win_amd64")
- assert not build_selector("cp311-win_amd64")
+ assert build_selector("cp311-win_amd64")
assert not build_selector("pp36-win_amd64")
assert not build_selector("pp37-win_amd64")
diff --git a/unit_test/linux_build_steps_test.py b/unit_test/linux_build_steps_test.py
--- a/unit_test/linux_build_steps_test.py
+++ b/unit_test/linux_build_steps_test.py
@@ -60,8 +60,12 @@ def before_alls(step):
pprint(build_steps)
assert build_steps[0].container_image == "normal_container_image"
- assert identifiers(build_steps[0]) == ["cp36-manylinux_x86_64", "cp37-manylinux_x86_64"]
- assert before_alls(build_steps[0]) == ["", ""]
+ assert identifiers(build_steps[0]) == [
+ "cp36-manylinux_x86_64",
+ "cp37-manylinux_x86_64",
+ "cp311-manylinux_x86_64",
+ ]
+ assert before_alls(build_steps[0]) == ["", "", ""]
assert build_steps[1].container_image == "other_container_image"
assert identifiers(build_steps[1]) == ["cp38-manylinux_x86_64", "cp310-manylinux_x86_64"]
diff --git a/unit_test/option_prepare_test.py b/unit_test/option_prepare_test.py
--- a/unit_test/option_prepare_test.py
+++ b/unit_test/option_prepare_test.py
@@ -13,7 +13,7 @@
from cibuildwheel import linux, util
from cibuildwheel.__main__ import main
-ALL_IDS = {"cp36", "cp37", "cp38", "cp39", "cp310", "pp37", "pp38", "pp39"}
+ALL_IDS = {"cp36", "cp37", "cp38", "cp39", "cp310", "cp311", "pp37", "pp38", "pp39"}
@pytest.fixture
@@ -137,7 +137,8 @@ def test_build_with_override_launches(mock_build_container, monkeypatch, tmp_pat
identifiers = {x.identifier for x in kwargs["platform_configs"]}
assert identifiers == {
- f"{x}-manylinux_x86_64" for x in ALL_IDS - {"cp36", "cp310", "pp37", "pp38", "pp39"}
+ f"{x}-manylinux_x86_64"
+ for x in ALL_IDS - {"cp36", "cp310", "cp311", "pp37", "pp38", "pp39"}
}
assert kwargs["options"].build_options("cp37-manylinux_x86_64").before_all == ""
@@ -147,10 +148,7 @@ def test_build_with_override_launches(mock_build_container, monkeypatch, tmp_pat
assert not kwargs["container"]["simulate_32_bit"]
identifiers = {x.identifier for x in kwargs["platform_configs"]}
assert identifiers == {
- "cp310-manylinux_x86_64",
- "pp37-manylinux_x86_64",
- "pp38-manylinux_x86_64",
- "pp39-manylinux_x86_64",
+ f"{x}-manylinux_x86_64" for x in {"cp310", "cp311", "pp37", "pp38", "pp39"}
}
kwargs = build_in_container.call_args_list[3][1]
| Enable building on Python 3.11 without CIBW_PRERELEASE_PYTHONS
### Description
The docs at https://cibuildwheel.readthedocs.io/en/stable/options/#prerelease-pythons say:
> Once Python is ABI stable and enters the release candidate phase, that version of Python will become available without this flag.
Now that Python 3.11.0rc1 has been released, should Python 3.11 be included in regular wheel builds?
(My motivation for the issue: I'm planning to make a package release later this week, and I'd like to include wheels for Python 3.11; I'm wondering whether the best way forward is to set `CIBW_PRERELEASE_PYTHONS` or not.)
### Build log
_No response_
### CI config
_No response_
| the manylinux image with 3.11.0rc0 was uploaded two hours ago and only for [manylinux2014_x86_64](https://quay.io/repository/pypa/manylinux2014_x86_64), so there is a need to wait until all docker images are updated before change from 3.11.0b5 to 3.11.0rc0 in cibuildwheel.
@Czaki Thanks for the update! Sounds like this is all in progress, and this issue was redundant - apologies for the noise. Should I just close this?
As it is open then may be closed when the new version is released to increase discoverability (people more often check open issues than closed one).
I believe the rc1 builds finished last night, so a manual run of https://github.com/pypa/cibuildwheel/actions/workflows/update-dependencies.yml should get the rc1 image. | 2022-08-11T10:50:03 |
pypa/cibuildwheel | 1,273 | pypa__cibuildwheel-1273 | [
"1271"
] | 00b2600cca381be796f3755f9c38065dfbf8c3b1 | diff --git a/cibuildwheel/environment.py b/cibuildwheel/environment.py
--- a/cibuildwheel/environment.py
+++ b/cibuildwheel/environment.py
@@ -4,6 +4,7 @@
from typing import Any, Mapping, Sequence
import bashlex
+import bashlex.errors
from cibuildwheel.typing import Protocol
@@ -33,7 +34,11 @@ def split_env_items(env_string: str) -> list[str]:
if not env_string:
return []
- command_node = bashlex.parsesingle(env_string)
+ try:
+ command_node = bashlex.parsesingle(env_string)
+ except bashlex.errors.ParsingError as e:
+ raise EnvironmentParseError(env_string) from e
+
result = []
for word_node in command_node.parts:
diff --git a/cibuildwheel/options.py b/cibuildwheel/options.py
--- a/cibuildwheel/options.py
+++ b/cibuildwheel/options.py
@@ -10,7 +10,7 @@
from contextlib import contextmanager
from dataclasses import asdict, dataclass
from pathlib import Path
-from typing import Any, Dict, Generator, Iterator, List, Mapping, Union, cast
+from typing import Any, Callable, Dict, Generator, Iterator, List, Mapping, Union, cast
if sys.version_info >= (3, 11):
import tomllib
@@ -23,7 +23,7 @@
from .environment import EnvironmentParseError, ParsedEnvironment, parse_environment
from .oci_container import ContainerEngine
from .projectfiles import get_requires_python_str
-from .typing import PLATFORMS, Literal, PlatformName, TypedDict
+from .typing import PLATFORMS, Literal, NotRequired, PlatformName, TypedDict
from .util import (
MANYLINUX_ARCHS,
MUSLLINUX_ARCHS,
@@ -123,6 +123,7 @@ class Override:
class TableFmt(TypedDict):
item: str
sep: str
+ quote: NotRequired[Callable[[str], str]]
class ConfigOptionError(KeyError):
@@ -329,7 +330,7 @@ def get(
if table is None:
raise ConfigOptionError(f"{name!r} does not accept a table")
return table["sep"].join(
- item for k, v in result.items() for item in _inner_fmt(k, v, table["item"])
+ item for k, v in result.items() for item in _inner_fmt(k, v, table)
)
if isinstance(result, list):
@@ -343,14 +344,16 @@ def get(
return result
-def _inner_fmt(k: str, v: Any, table_item: str) -> Iterator[str]:
+def _inner_fmt(k: str, v: Any, table: TableFmt) -> Iterator[str]:
+ quote_function = table.get("quote", lambda a: a)
+
if isinstance(v, list):
for inner_v in v:
- qv = shlex.quote(inner_v)
- yield table_item.format(k=k, v=qv)
+ qv = quote_function(inner_v)
+ yield table["item"].format(k=k, v=qv)
else:
- qv = shlex.quote(v)
- yield table_item.format(k=k, v=qv)
+ qv = quote_function(v)
+ yield table["item"].format(k=k, v=qv)
class Options:
@@ -449,13 +452,13 @@ def build_options(self, identifier: str | None) -> BuildOptions:
build_frontend_str = self.reader.get("build-frontend", env_plat=False)
environment_config = self.reader.get(
- "environment", table={"item": "{k}={v}", "sep": " "}
+ "environment", table={"item": '{k}="{v}"', "sep": " "}
)
environment_pass = self.reader.get("environment-pass", sep=" ").split()
before_build = self.reader.get("before-build", sep=" && ")
repair_command = self.reader.get("repair-wheel-command", sep=" && ")
config_settings = self.reader.get(
- "config-settings", table={"item": "{k}={v}", "sep": " "}
+ "config-settings", table={"item": "{k}={v}", "sep": " ", "quote": shlex.quote}
)
dependency_versions = self.reader.get("dependency-versions")
diff --git a/cibuildwheel/typing.py b/cibuildwheel/typing.py
--- a/cibuildwheel/typing.py
+++ b/cibuildwheel/typing.py
@@ -10,6 +10,10 @@
else:
from typing import Final, Literal, OrderedDict, Protocol, TypedDict
+if sys.version_info < (3, 11):
+ from typing_extensions import NotRequired
+else:
+ from typing import NotRequired
__all__ = (
"Final",
@@ -26,6 +30,7 @@
"OrderedDict",
"Union",
"assert_never",
+ "NotRequired",
)
| diff --git a/unit_test/options_test.py b/unit_test/options_test.py
--- a/unit_test/options_test.py
+++ b/unit_test/options_test.py
@@ -1,11 +1,14 @@
from __future__ import annotations
+import os
import platform as platform_module
import textwrap
+from pathlib import Path
import pytest
from cibuildwheel.__main__ import get_build_identifiers
+from cibuildwheel.bashlex_eval import local_environment_executor
from cibuildwheel.environment import parse_environment
from cibuildwheel.options import Options, _get_pinned_container_images
@@ -59,7 +62,7 @@ def test_options_1(tmp_path, monkeypatch):
default_build_options = options.build_options(identifier=None)
- assert default_build_options.environment == parse_environment("FOO=BAR")
+ assert default_build_options.environment == parse_environment('FOO="BAR"')
all_pinned_container_images = _get_pinned_container_images()
pinned_x86_64_container_image = all_pinned_container_images["x86_64"]
@@ -119,30 +122,75 @@ def test_passthrough_evil(tmp_path, monkeypatch, env_var_value):
assert parsed_environment.as_dictionary(prev_environment={}) == {"ENV_VAR": env_var_value}
+xfail_env_parse = pytest.mark.xfail(
+ raises=SystemExit, reason="until we can figure out the right way to quote these values"
+)
+
+
@pytest.mark.parametrize(
"env_var_value",
[
"normal value",
- '"value wrapped in quotes"',
- 'an unclosed double-quote: "',
+ pytest.param('"value wrapped in quotes"', marks=[xfail_env_parse]),
+ pytest.param('an unclosed double-quote: "', marks=[xfail_env_parse]),
"string\nwith\ncarriage\nreturns\n",
- "a trailing backslash \\",
+ pytest.param("a trailing backslash \\", marks=[xfail_env_parse]),
],
)
def test_toml_environment_evil(tmp_path, monkeypatch, env_var_value):
args = get_default_command_line_arguments()
args.package_dir = tmp_path
- with tmp_path.joinpath("pyproject.toml").open("w") as f:
- f.write(
- textwrap.dedent(
- f"""\
- [tool.cibuildwheel.environment]
- EXAMPLE='''{env_var_value}'''
- """
- )
+ tmp_path.joinpath("pyproject.toml").write_text(
+ textwrap.dedent(
+ f"""\
+ [tool.cibuildwheel.environment]
+ EXAMPLE='''{env_var_value}'''
+ """
)
+ )
options = Options(platform="linux", command_line_arguments=args)
parsed_environment = options.build_options(identifier=None).environment
assert parsed_environment.as_dictionary(prev_environment={}) == {"EXAMPLE": env_var_value}
+
+
[email protected](
+ "toml_assignment,result_value",
+ [
+ ('TEST_VAR="simple_value"', "simple_value"),
+ # spaces
+ ('TEST_VAR="simple value"', "simple value"),
+ # env var
+ ('TEST_VAR="$PARAM"', "spam"),
+ ('TEST_VAR="$PARAM $PARAM"', "spam spam"),
+ # env var extension
+ ('TEST_VAR="before:$PARAM:after"', "before:spam:after"),
+ # env var extension with spaces
+ ('TEST_VAR="before $PARAM after"', "before spam after"),
+ # literal $ - this test is just for reference, I'm not sure if this
+ # syntax will work if we change the TOML quoting behaviour
+ (r'TEST_VAR="before\\$after"', "before$after"),
+ ],
+)
+def test_toml_environment_quoting(tmp_path: Path, toml_assignment, result_value):
+ args = get_default_command_line_arguments()
+ args.package_dir = tmp_path
+
+ tmp_path.joinpath("pyproject.toml").write_text(
+ textwrap.dedent(
+ f"""\
+ [tool.cibuildwheel.environment]
+ {toml_assignment}
+ """
+ )
+ )
+
+ options = Options(platform="linux", command_line_arguments=args)
+ parsed_environment = options.build_options(identifier=None).environment
+ environment_values = parsed_environment.as_dictionary(
+ prev_environment={**os.environ, "PARAM": "spam"},
+ executor=local_environment_executor,
+ )
+
+ assert environment_values["TEST_VAR"] == result_value
| `sh: No such file or directory` in `before_all` for linux builds with cibuildwheel v2.10.0
### Description
After updating from `2.9.0` to `v2.10.0`, I get the following error for linux builds using a simple `before-all` command like `before-all = "ls"`:
```
Running before_all...
+ /opt/python/cp38-cp38/bin/python -c 'import sys, json, os; json.dump(os.environ.copy(), sys.stdout)'
+ sh -c ls
env: sh: No such file or directory
```
This seems to be related to the fact that we're updating the `PATH` environment variable:
```
[tool.cibuildwheel]
before-all = "ls"
environment = { PATH="$PATH:/usr/local/bin" }
```
| The host `$PATH` isn't valid inside the container. So it must have been expanded inside the container. Hmm, what I expect is happening is `shlex.quote` is protecting the envvars there; we added that to have better string quoting in https://github.com/pypa/cibuildwheel/pull/1244, didn't realize it would change envvar usage
@joerick two easy options:
We could call `os.path.expandvars` inside the container, which would expand these usages, but unfortunately, would ignore quoting protection choices (which is also fortunate, actually, since shlex.quote is adding single quotes - it just means there would be no way to pass a string with a valid environment variable as a string. Though, thinking about it, you might be able to use PASSENV in that case).
We could remove the quoting, and just trust users not to produce malformed input. It wasn't supported before 2.10 anyway.
A final "option" that I'm not calling an option would be to restructure this so it is not passed through as strings, but is passed through as a dict (making the toml version the "true" method and strings just a way to build that dict). But this is a larger architectural change.
Quick example:
```py
>>> shlex.quote("$HOME")
"'$HOME'"
```
This is quoted on the host, but needs to be expanded on the target.
I wonder if this is related to why https://github.com/scikit-build/cmake-python-distributions/pull/282 is breaking. I don't see an expansion, but maybe the quoting is not working as expected: https://github.com/scikit-build/cmake-python-distributions/blob/d8404ad6bcb294f2b95b0ceba26c9b35d248706f/pyproject.toml#L42
The only thing unique to that pair of jobs is the part: `-DRUN_CMAKE_TEST_EXCLUDE:STRING='BootstrapTest|ExportImport|RunCMake.install|RunCMake.file-GET_RUNTIME_DEPENDENCIES'`
Ah. Hmm. So it seems to be that the problem is that unlike the config-settings option, we're not calling `shlex.split` before these values are processed further. `shlex.split` would remove the quotes that `shlex.quote` added. But we use bashlex to parse these options, when the bashlex parser sees a single-quote, it's getting more from that than just tokenisation. It changes how env vars are processed.
Could we call `shlex.split`?
I think we can't - we can't call `shlex.split` on these values because they will sometimes contain single-quotes that really do mean something.
So, let's think what we want...
1.
```toml
environment = { PATH="$PATH:/usr/local/bin" }
```
should translate to `PATH="$PATH:/usr/local/bin"` (or `PATH=$PATH:/usr/local/bin`)
2.
```toml
environment = { SOME_STRING="a string with spaces" }
```
should translate to `SOME_STRING="a string with spaces"` (or `SOME_STRING='a string with spaces'`)
3.
```toml
environment = { SOME_STRING="an evil string with\" some strange punctuation" }
```
should translate to `SOME_STRING="an evil string with\" some strange punctuation"`
4.
```toml
environment = { LITERAL_PASSTHROUGH="'this is not expanded: $PATH'" }
```
should translate to `LITERAL_PASSTHROUGH='this is not expanded: $PATH'`
Though 3 might not be a huge priority... I'm not even sure I care about 4. much, tbh.
One idea I had was, when parsing the option from TOML, we could run `shlex.split` on it at option parse time, and see if it is more than one token. If it is, we run `shlex.quote` on it, to ensure that it's a valid env var assignment. Otherwise we don't quote these values at all.
☝️ I _think_ this might be a good approach - it gives users the most control - if they specify one token (i.e. get their bash quoting right) then cibuildwheel doesn't touch it. Only if the option value would form an invalid bash assignment do things get quoted. We _could_ raise a warning in this case, too?
~Or, the other option would be to just raise an error if the value is more than one bash token, and get the user to fix their config~ Nah, based on how you're using it above this would be pretty user-hostile I feel!
@henryiii and I have just been chatting on Discord. There is some concern that my proposed fix above would make the quoting behaviour even more confusing than it was before (which I kinda agree with!)
Based on @henryiii's build logs above, it also seems like there's might be an incompatibility between shlex and bashlex somewhere-
```
>>> import shlex
>>> shlex.quote("a string with 'single quotes'")
'\'a string with \'"\'"\'single quotes\'"\'"\'\''
>>> shlex.split(shlex.quote("a string with 'single quotes'"))
["a string with 'single quotes'"]
>>> import bashlex
>>> list(bashlex.split(shlex.quote("a string with 'single quotes'")))
['a string with \'"\'"\'single quotes\'"\'"\'']
```
We don't actually use bashlex.split anywhere though, so maybe it's not actually a problem. Needs more investigation!
> We could call `os.path.expandvars` inside the container, which would expand these usages, but unfortunately, would ignore quoting protection choices (which is also fortunate, actually, since shlex.quote is adding single quotes - it just means there would be no way to pass a string with a valid environment variable as a string. Though, thinking about it, you might be able to use PASSENV in that case).
I think this is only papering over the problem... correct quoting is also required for the bash parser to do command substitution with `$()` or backticks. It could also make it impossible to pass a literal `$` in an option.
> We could remove the quoting, and just trust users not to produce malformed input. It wasn't supported before 2.10 anyway.
Yeah, this is what I'm leaning towards, at least temporarily.
> A final "option" that I'm not calling an option would be to restructure this so it is not passed through as strings, but is passed through as a dict (making the toml version the "true" method and strings just a way to build that dict). But this is a larger architectural change.
Perhaps. Though we'd still have to figure out the tricky problem of how to translate between a dict and a bash string of assignments somewhere e.g. how do we reconcile this:
> @henryiii: I'm just not fond of this working:
>
> ```
> PATH=$PATH:/local/bin
> PATH=/windows loves spaces/bin
> ```
>
> But not this:
> ```
> PATH=$PATH:/windows loves spaces/bin
> ```
So I'm kinda leaning towards reverting to the previous behaviour for now, until we can figure out what the right policy is here. | 2022-09-17T10:22:40 |
pypa/cibuildwheel | 1,282 | pypa__cibuildwheel-1282 | [
"1281"
] | 7969f7c79478a9700bfd462e71e9e0b1fd61a892 | diff --git a/cibuildwheel/__main__.py b/cibuildwheel/__main__.py
--- a/cibuildwheel/__main__.py
+++ b/cibuildwheel/__main__.py
@@ -173,7 +173,7 @@ def build_in_directory(args: CommandLineArguments) -> None:
platform = "linux"
elif "macosx_" in args.only:
platform = "macos"
- elif "win_" in args.only:
+ elif "win_" in args.only or "win32" in args.only:
platform = "windows"
else:
print(
| diff --git a/unit_test/main_tests/main_platform_test.py b/unit_test/main_tests/main_platform_test.py
--- a/unit_test/main_tests/main_platform_test.py
+++ b/unit_test/main_tests/main_platform_test.py
@@ -199,6 +199,7 @@ def test_archs_platform_all(platform, intercepted_build_args, monkeypatch):
(
("cp311-manylinux_x86_64", "linux"),
("cp310-win_amd64", "windows"),
+ ("cp310-win32", "windows"),
("cp311-macosx_x86_64", "macos"),
),
)
| --only doesn't work for `-win32` identifiers
> - cp311 https://github.com/ddelange/asyncpg/actions/runs/3092263953/jobs/5003341864#step:4:43
> - same for all other win32 builds: `Invalid --only='cp310-win32', must be a build selector with a known platform`
>
> in https://github.com/ddelange/asyncpg/pull/2
>
> and before this change it was fine: https://github.com/ddelange/asyncpg/actions/runs/3072698535/jobs/4964355341#step:4:89
>
> maybe it's an issue with the `--only` flag?
>
> on my mac:
>
> ```console
> $ cibuildwheel --print-build-identifiers --platform windows --arch x86,AMD64
> cp36-win32
> cp36-win_amd64
> cp37-win32
> cp37-win_amd64
> cp38-win32
> cp38-win_amd64
> cp39-win32
> cp39-win_amd64
> cp310-win32
> cp310-win_amd64
> cp311-win32
> cp311-win_amd64
> pp37-win_amd64
> pp38-win_amd64
> pp39-win_amd64
> ```
>
_Originally posted by @ddelange in https://github.com/pypa/cibuildwheel/issues/1266#issuecomment-1252703994_
| Issue seems to be this line:
https://github.com/mayeut/cibuildwheel/blob/6df5de717552b07dfee43760b154f0bf8a1af4ed/cibuildwheel/__main__.py#L176 | 2022-09-24T22:13:35 |
pypa/cibuildwheel | 1,298 | pypa__cibuildwheel-1298 | [
"1187"
] | 7c4bbf8cb31d856a0fe547faf8edf165cd48ce74 | diff --git a/cibuildwheel/linux.py b/cibuildwheel/linux.py
--- a/cibuildwheel/linux.py
+++ b/cibuildwheel/linux.py
@@ -110,6 +110,29 @@ def get_build_steps(
yield from steps.values()
+def check_all_python_exist(
+ *, platform_configs: list[PythonConfiguration], container: OCIContainer
+) -> None:
+ exist = True
+ messages = []
+ for config in platform_configs:
+ python_path = config.path / "bin" / "python"
+ try:
+ container.call(["test", "-x", python_path])
+ except subprocess.CalledProcessError:
+ messages.append(
+ f" '{python_path}' executable doesn't exist in image '{container.image}' to build '{config.identifier}'."
+ )
+ exist = False
+ if not exist:
+ message = "\n".join(messages)
+ print(
+ f"cibuildwheel:\n{message}",
+ file=sys.stderr,
+ )
+ sys.exit(1)
+
+
def build_in_container(
*,
options: Options,
@@ -120,6 +143,8 @@ def build_in_container(
) -> None:
container_output_dir = PurePosixPath("/output")
+ check_all_python_exist(platform_configs=platform_configs, container=container)
+
log.step("Copying project into container...")
container.copy_into(Path.cwd(), container_project_path)
| diff --git a/test/test_linux_python.py b/test/test_linux_python.py
new file mode 100644
--- /dev/null
+++ b/test/test_linux_python.py
@@ -0,0 +1,43 @@
+from __future__ import annotations
+
+import platform
+import subprocess
+
+import pytest
+
+from . import test_projects, utils
+
+
+def test_python_exist(tmp_path, capfd):
+ if utils.platform != "linux":
+ pytest.skip("the test is only relevant to the linux build")
+ machine = platform.machine()
+ if machine not in ["x86_64", "i686"]:
+ pytest.skip(
+ "this test is currently only possible on x86_64/i686 due to availability of alternative images"
+ )
+
+ project_dir = tmp_path / "project"
+ basic_project = test_projects.new_c_project()
+ basic_project.generate(project_dir)
+
+ with pytest.raises(subprocess.CalledProcessError):
+ utils.cibuildwheel_run(
+ project_dir,
+ add_env={
+ "CIBW_MANYLINUX_X86_64_IMAGE": "manylinux2010",
+ "CIBW_MANYLINUX_I686_IMAGE": "manylinux2010",
+ "CIBW_BUILD": "cp3{10,11}-manylinux*",
+ },
+ )
+
+ captured = capfd.readouterr()
+ print("out", captured.out)
+ print("err", captured.err)
+ assert f" to build 'cp310-manylinux_{machine}'." not in captured.err
+ message = (
+ "'/opt/python/cp311-cp311/bin/python' executable doesn't exist"
+ f" in image 'quay.io/pypa/manylinux2010_{machine}:2022-08-05-4535177'"
+ f" to build 'cp311-manylinux_{machine}'."
+ )
+ assert message in captured.err
| 3.11 builds failing with v2.8.1 on manylinux2010 images
### Description
With:
```yaml
cibw_python: [ "cp37-*", "cp38-*", "cp39-*", "cp310-*", "cp311-*"]
```
and
```yaml
env:
CIBW_PRERELEASE_PYTHONS: True
```
I'm seeing this error:
```
Setting up build environment...
+ /opt/python/cp38-cp38/bin/python -c 'import sys, json, os; json.dump(os.environ.copy(), sys.stdout)'
+ which python
cibuildwheel: python available on PATH doesn't match our installed instance. If you have modified PATH, ensure that you don't overwrite cibuildwheel's entry or insert python above it.
Error: Process completed with exit code 1.
```
This didn't occur with 2.8.0 - I suspect it's somehow related to this changelog entry:
> 🛠 The GitHub Action will ensure a compatible version of Python is installed on the runner (#1114)
I'm also seeing
```
Run actions/setup-python@v4
with:
python-version: 3.7 - 3.10
update-environment: false
token: ***
env:
CIBW_BUILD: cp311-*
CIBW_ARCHS_LINUX: x86_64
CIBW_MANYLINUX_X86_64_IMAGE: manylinux[20](https://github.com/bloomberg/memray/runs/7420878603?check_suite_focus=true#step:3:21)10
CIBW_MANYLINUX_I686_IMAGE: manylinux2010
CIBW_PRERELEASE_PYTHONS: true
CIBW_TEST_EXTRAS: test
CIBW_TEST_COMMAND: pytest {package}/tests
```
in the build log - perhaps that "python-version: 3.7 - 3.10" is the problem?
### Build log
https://github.com/bloomberg/memray/runs/7421778954?check_suite_focus=true
### CI config
https://github.com/bloomberg/memray/blob/f9bfaacea95c9c565189da2eeb121a3303c11c71/.github/workflows/build_wheels.yml
| That’s not what that range means. It uses one version from that, and the outer host setting shouldn’t matter. I wish we printed the details of the mismatch. We are using 3.11b4 now. Don’t see how that would change this either, though. Am traveling and can’t check quite yet.
Why is it using `/opt/python/cp38-cp38/bin/python`? Host is 3.10 and target is 3.11. Don’t get where 3.8 is sneaking in.
I'm past my depth in debugging this, but if there's anything I can run to help you make sense of it, I'd be happy to. I've added a `type -a python` to `before-all` and it prints out:
```
python is /opt/python/cp38-cp38/bin/python
python is /usr/bin/python
```
but I'm not sure where the `/opt/python/cp38-cp38/bin/python` is coming from...
Likewise, printing `$PATH` is showing me
```
PATH=/opt/python/cp38-cp38/bin:/opt/rh/devtoolset-8/root/usr/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
```
This could be something I've got misconfigured, but v2.8.1 is failing in a way that v2.8.0 didn't...
manylinux2010 does not have CPython 3.11
support for this image will be dropped before ABI stability of CPython 3.11 thus the beta has been dropped on this image to prevent building 3.11 with a Python that will never be ABI stable on that image.
You'll have to use manylinux2014 image to build CPython 3.11
- https://github.com/pypa/manylinux/issues/1281
- https://github.com/pypa/manylinux/commit/97c4ab0bc6f0d9a44f60e7938b025c69788a89b7
Oof. OK... Well, that's not good news for me, since I do still need RHEL 6 compatibility, but at least that answers what changed.
Thanks for helping me look into this.
Thanks for the debugging. We could use a better error message for this then. @mayeut I'm guessing manylinux1 also won't get Python 3.11?
We need to detect this then, rather than grabbing a random Python. :)
manylinux1 is “dead”, it’s just still being built while CI doesn’t break, so it’s not getting anything new. Not sure you can build 3.11 on CentOS 5.
You can use an override to build 3.11 with manylinux2014. In fact, IIRC, NumPy switched to 2014 for 3.10, since anyone on 3.10 should be on a 2014 compatible platform. Doesn’t help to have higher compatibility than your dependencies.
> I'm guessing manylinux1 also won't get Python 3.11?
Correct. The manylinux1 image does not even have Python 3.10
> We need to detect this then, rather than grabbing a random Python. :)
It's correctly detected. The error message "cibuildwheel: python available on PATH doesn't match our installed instance. If you have modified PATH, ensure that you don't overwrite cibuildwheel's entry or insert python above it." relates to that (could be clearer by checking the path added to PATH exists in the first place).
The `/opt/python/cp38-cp38/bin/python` python mentioned is the one used as a build driver inside the container (counterpart to the host python running cibuildwheel).
Yep, it is detected, it's just the error message is confusing because that message is assuming that the user has fiddled with PATH to break it, not that it never existed in the image.
We can probably add a different message, like `if not python_bin.exists(): raise Exception(f'Python {python_configuration.version} doesn't exist in image {image}')`. And even perhaps a fail-fast before the builds start.
> The `/opt/python/cp38-cp38/bin/python` python mentioned is the one used as a build driver inside the container (counterpart to the host python running cibuildwheel).
I don't recall adding this to PATH in cibuildwheel. Perhaps it's added in manylinux?
> Perhaps it's added in manylinux?
The only `python` in the `PATH` in manylinux images is the system one (when it exists, on some images the system python is `python3`)
I probably wasn't clear enough about `/opt/python/cp38-cp38/bin/python`.
> The `/opt/python/cp38-cp38/bin/python` python mentioned is the one used as a build driver inside the container (counterpart to the host python running cibuildwheel).
It's almost always used by cibuildwheel with the full file path: https://github.com/pypa/cibuildwheel/blob/7c7dda72084bd9203b4fffd3af856560f8e48c64/cibuildwheel/oci_container.py#L46
except for before_all step:
https://github.com/pypa/cibuildwheel/blob/7c7dda72084bd9203b4fffd3af856560f8e48c64/cibuildwheel/linux.py#L128-L137
ahh.. gotcha. So the cp38 in PATH thing really is a red herring. It's only a result of trying to investigate this using before-all. | 2022-10-08T11:04:34 |
pypa/cibuildwheel | 1,311 | pypa__cibuildwheel-1311 | [
"1310"
] | 1aa841bedaa6998f8a3104eae6f9bfa242cceea4 | diff --git a/cibuildwheel/linux.py b/cibuildwheel/linux.py
--- a/cibuildwheel/linux.py
+++ b/cibuildwheel/linux.py
@@ -189,6 +189,8 @@ def build_in_container(
log.step("Setting up build environment...")
env = container.get_environment()
+ env["PIP_DISABLE_PIP_VERSION_CHECK"] = "1"
+ env["PIP_ROOT_USER_ACTION"] = "ignore"
# put this config's python top of the list
python_bin = config.path / "bin"
| diff --git a/test/test_0_basic.py b/test/test_0_basic.py
--- a/test/test_0_basic.py
+++ b/test/test_0_basic.py
@@ -20,7 +20,7 @@
)
-def test(tmp_path, build_frontend_env):
+def test(tmp_path, build_frontend_env, capfd):
project_dir = tmp_path / "project"
basic_project.generate(project_dir)
@@ -31,6 +31,12 @@ def test(tmp_path, build_frontend_env):
expected_wheels = utils.expected_wheels("spam", "0.1.0")
assert set(actual_wheels) == set(expected_wheels)
+ # Verify pip warning not shown
+ captured = capfd.readouterr()
+ for stream in (captured.err, captured.out):
+ assert "WARNING: Running pip as the 'root' user can result" not in stream
+ assert "A new release of pip available" not in stream
+
@pytest.mark.skip(reason="to keep test output clean")
def test_sample_build(tmp_path, capfd):
| Minor warning: still showing 'root' user warning on Linux
Still seeing this in 2.11.1, even after #1304:
```
Successfully installed mypy_extensions-0.4.3 setuptools-65.4.1 tomli-2.0.1 types-psutil-5.9.5.1 types-setuptools-65.4.0.0 types-typed-ast-1.5.8 typing_extensions-4.4.0 wheel-0.37.1
WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv
Running command Getting requirements to build wheel
```
Looks like it's missing from an env still?
| 2022-10-14T02:30:56 |
|
pypa/cibuildwheel | 1,324 | pypa__cibuildwheel-1324 | [
"1161"
] | b2b703db9d7cb8176f757f4d695cda755d9b8f95 | diff --git a/cibuildwheel/oci_container.py b/cibuildwheel/oci_container.py
--- a/cibuildwheel/oci_container.py
+++ b/cibuildwheel/oci_container.py
@@ -303,9 +303,11 @@ def call(
return_code = int(return_code_str)
# add the last line to output, without the footer
output_io.write(line[0:footer_offset])
+ output_io.flush()
break
else:
output_io.write(line)
+ output_io.flush()
if isinstance(output_io, io.BytesIO):
output = str(output_io.getvalue(), encoding="utf8", errors="surrogateescape")
| Stderr on docker run at the top
Currently, stdout is captured and placed in the error. But stderr is not, so it's displayed inline. `CalledProcessError` does support stderr too, so should we do the same thing with stderr? (Not sure I'm a big fan of separating stderr and stdout, sometimes they might be best in the original order).
Thoughts?
| Here's an example (just the end of the log):
```
...
Building cp39-manylinux_x86_64 wheel
CPython 3.9 manylinux x86_64
Setting up build environment...
+ /opt/python/cp38-cp38/bin/python -c 'import sys, json, os; json.dump(os.environ.copy(), sys.stdout)'
+ which python
+ which pip
✓ 0.04s
Running before_build...
+ sh -c 'pip install cython && cmake -DOPENDHT_PYTHON=ON . && make'
WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv
Indexation enabled since it is required for Python support
/project/src/dhtrunner.cpp: In member function ‘void dht::DhtRunner::setProxyServer(const string&, const string&)’:
/project/src/dhtrunner.cpp:1080:72: warning: unused parameter ‘pushNodeId’ [-Wunused-parameter]
1080 | DhtRunner::setProxyServer(const std::string& proxy, const std::string& pushNodeId)
| ~~~~~~~~~~~~~~~~~~~^~~~~~~~~~
/project/src/dhtrunner.cpp: In member function ‘void dht::DhtRunner::setProxyServer(const string&, const string&)’:
/project/src/dhtrunner.cpp:1080:72: warning: unused parameter ‘pushNodeId’ [-Wunused-parameter]
1080 | DhtRunner::setProxyServer(const std::string& proxy, const std::string& pushNodeId)
| ~~~~~~~~~~~~~~~~~~~^~~~~~~~~~
Traceback (most recent call last):
File "/project/python/setup.py", line 26, in <module>
setup(name="opendht",
File "/opt/python/cp39-cp39/lib/python3.9/site-packages/setuptools/__init__.py", line 87, in setup
return distutils.core.setup(**attrs)
File "/opt/python/cp39-cp39/lib/python3.9/site-packages/setuptools/_distutils/core.py", line 148, in setup
return run_commands(dist)
File "/opt/python/cp39-cp39/lib/python3.9/site-packages/setuptools/_distutils/core.py", line 163, in run_commands
dist.run_commands()
File "/opt/python/cp39-cp39/lib/python3.9/site-packages/setuptools/_distutils/dist.py", line 967, in run_commands
self.run_command(cmd)
File "/opt/python/cp39-cp39/lib/python3.9/site-packages/setuptools/dist.py", line 1229, in run_command
super().run_command(command)
File "/opt/python/cp39-cp39/lib/python3.9/site-packages/setuptools/_distutils/dist.py", line 985, in run_command
cmd_obj.ensure_finalized()
File "/opt/python/cp39-cp39/lib/python3.9/site-packages/setuptools/_distutils/cmd.py", line 107, in ensure_finalized
self.finalize_options()
File "/opt/python/cp39-cp39/lib/python3.9/site-packages/setuptools/_distutils/command/build.py", line 106, in finalize_options
if self.distribution.has_ext_modules():
File "/opt/python/cp39-cp39/lib/python3.9/site-packages/setuptools/_distutils/dist.py", line 995, in has_ext_modules
return self.ext_modules and len(self.ext_modules) > 0
TypeError: object of type 'Extension' has no len()
make[2]: *** [python/CMakeFiles/python.dir/build.make:72: python/CMakeFiles/python] Error 1
make[1]: *** [CMakeFiles/Makefile2:300: python/CMakeFiles/python.dir/all] Error 2
make: *** [Makefile:136: all] Error 2
Collecting cython
Downloading Cython-0.29.30-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl (2.0 MB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 2.0/2.0 MB 19.2 MB/s eta 0:00:00
Installing collected packages: cython
Successfully installed cython-0.29.30
-- The C compiler identification is GNU 11.2.1
-- The CXX compiler identification is GNU 11.2.1
-- Detecting C compiler ABI info
-- Detecting C compiler ABI info - done
-- Check for working C compiler: /opt/rh/gcc-toolset-11/root/usr/bin/cc - skipped
-- Detecting C compile features
-- Detecting C compile features - done
-- Detecting CXX compiler ABI info
-- Detecting CXX compiler ABI info - done
-- Check for working CXX compiler: /opt/rh/gcc-toolset-11/root/usr/bin/c++ - skipped
-- Detecting CXX compile features
-- Detecting CXX compile features - done
-- Found PkgConfig: /usr/bin/pkg-config (found version "1.4.2")
-- Performing Test HAVE_CXX_ATOMICS_WITHOUT_LIB
-- Performing Test HAVE_CXX_ATOMICS_WITHOUT_LIB - Success
-- Performing Test HAVE_CXX_ATOMICS64_WITHOUT_LIB
-- Performing Test HAVE_CXX_ATOMICS64_WITHOUT_LIB - Success
-- Performing Test LLVM_HAS_ATOMICS
-- Performing Test LLVM_HAS_ATOMICS - Success
-- Could NOT find Doxygen (missing: DOXYGEN_EXECUTABLE)
-- Looking for pthread.h
-- Looking for pthread.h - found
-- Performing Test CMAKE_HAVE_LIBC_PTHREAD
-- Performing Test CMAKE_HAVE_LIBC_PTHREAD - Failed
-- Check if compiler accepts -pthread
-- Check if compiler accepts -pthread - yes
-- Found Threads: TRUE
-- Found GnuTLS: /usr/lib64/libgnutls.so (found suitable version "3.6.16", minimum required is "3.3")
-- Checking for one of the modules 'nettle'
-- Found Readline: /usr/lib64/libreadline.so (Required is at least version "6")
-- Checking for one of the modules 'libargon2'
-- Checking for one of the modules 'jsoncpp'
-- Configuring done
-- Generating done
-- Build files have been written to: /project
[ 2%] Building CXX object CMakeFiles/opendht-static.dir/src/utils.cpp.o
[ 4%] Building CXX object CMakeFiles/opendht-static.dir/src/infohash.cpp.o
[ 6%] Building CXX object CMakeFiles/opendht-static.dir/src/crypto.cpp.o
[ 8%] Building CXX object CMakeFiles/opendht-static.dir/src/default_types.cpp.o
[ 10%] Building CXX object CMakeFiles/opendht-static.dir/src/node.cpp.o
[ 12%] Building CXX object CMakeFiles/opendht-static.dir/src/value.cpp.o
[ 14%] Building CXX object CMakeFiles/opendht-static.dir/src/dht.cpp.o
[ 16%] Building CXX object CMakeFiles/opendht-static.dir/src/op_cache.cpp.o
[ 18%] Building CXX object CMakeFiles/opendht-static.dir/src/callbacks.cpp.o
[ 20%] Building CXX object CMakeFiles/opendht-static.dir/src/routing_table.cpp.o
[ 22%] Building CXX object CMakeFiles/opendht-static.dir/src/node_cache.cpp.o
[ 25%] Building CXX object CMakeFiles/opendht-static.dir/src/network_engine.cpp.o
[ 27%] Building CXX object CMakeFiles/opendht-static.dir/src/securedht.cpp.o
[ 29%] Building CXX object CMakeFiles/opendht-static.dir/src/dhtrunner.cpp.o
[ 31%] Building CXX object CMakeFiles/opendht-static.dir/src/log.cpp.o
[ 33%] Building CXX object CMakeFiles/opendht-static.dir/src/network_utils.cpp.o
[ 35%] Building CXX object CMakeFiles/opendht-static.dir/src/thread_pool.cpp.o
[ 37%] Building CXX object CMakeFiles/opendht-static.dir/src/peer_discovery.cpp.o
[ 39%] Building CXX object CMakeFiles/opendht-static.dir/src/indexation/pht.cpp.o
[ 41%] Linking CXX static library libopendht.a
[ 41%] Built target opendht-static
[ 43%] Building CXX object CMakeFiles/opendht.dir/src/utils.cpp.o
[ 45%] Building CXX object CMakeFiles/opendht.dir/src/infohash.cpp.o
[ 47%] Building CXX object CMakeFiles/opendht.dir/src/crypto.cpp.o
[ 50%] Building CXX object CMakeFiles/opendht.dir/src/default_types.cpp.o
[ 52%] Building CXX object CMakeFiles/opendht.dir/src/node.cpp.o
[ 54%] Building CXX object CMakeFiles/opendht.dir/src/value.cpp.o
[ 56%] Building CXX object CMakeFiles/opendht.dir/src/dht.cpp.o
[ 58%] Building CXX object CMakeFiles/opendht.dir/src/op_cache.cpp.o
[ 60%] Building CXX object CMakeFiles/opendht.dir/src/callbacks.cpp.o
[ 62%] Building CXX object CMakeFiles/opendht.dir/src/routing_table.cpp.o
[ 64%] Building CXX object CMakeFiles/opendht.dir/src/node_cache.cpp.o
[ 66%] Building CXX object CMakeFiles/opendht.dir/src/network_engine.cpp.o
[ 68%] Building CXX object CMakeFiles/opendht.dir/src/securedht.cpp.o
[ 70%] Building CXX object CMakeFiles/opendht.dir/src/dhtrunner.cpp.o
[ 72%] Building CXX object CMakeFiles/opendht.dir/src/log.cpp.o
[ 75%] Building CXX object CMakeFiles/opendht.dir/src/network_utils.cpp.o
[ 77%] Building CXX object CMakeFiles/opendht.dir/src/thread_pool.cpp.o
[ 79%] Building CXX object CMakeFiles/opendht.dir/src/peer_discovery.cpp.o
[ 81%] Building CXX object CMakeFiles/opendht.dir/src/indexation/pht.cpp.o
[ 83%] Linking CXX shared library libopendht.so
[ 83%] Built target opendht
[ 85%] Building CXX object tools/CMakeFiles/dhtnode.dir/dhtnode.cpp.o
[ 87%] Linking CXX executable dhtnode
[ 87%] Built target dhtnode
[ 89%] Building CXX object tools/CMakeFiles/dhtscanner.dir/dhtscanner.cpp.o
[ 91%] Linking CXX executable dhtscanner
[ 91%] Built target dhtscanner
[ 93%] Building CXX object tools/CMakeFiles/dhtchat.dir/dhtchat.cpp.o
[ 95%] Linking CXX executable dhtchat
[ 95%] Built target dhtchat
[ 97%] Building CXX object tools/CMakeFiles/perftest.dir/perftest.cpp.o
[100%] Linking CXX executable perftest
[100%] Built target perftest
running build
✕ 163.52s
```
Hm. Yeah. I have recreated this locally, here is the MWE:
```bash
python -m test.test_projects test.test_0_basic.basic_project /tmp/basic_project
(cd /tmp/basic_project && CIBW_BEFORE_BUILD="python -uc 'import sys; print(1, file=sys.stdout); print(2, file=sys.stderr)'" cibuildwheel --platform linux)
```
In the printout, you see:
```
Running before_build...
+ sh -c 'python -uc '"'"'import sys; print(1, file=sys.stdout); print(2, file=sys.stderr)'"'"''
2
1
```
Whereas running that command locally:
```bash
sh -c 'python -uc '"'"'import sys; print(1, file=sys.stdout); print(2, file=sys.stderr)'"'"''
1
2
```
I'm not sure why we see this behaviour, tbh. We _do_ line-buffer stdout (while stderr passes through untouched), but even with a 1 second delay between the print statements the behaviour is the same. So it's not about how fast we do it. Line-buffering shouldn't wait for the end of the command before printing the output, but it looks like that's what is happening... | 2022-10-21T14:10:28 |
|
pypa/cibuildwheel | 1,372 | pypa__cibuildwheel-1372 | [
"1369"
] | 06c4927798c6cc67792db86e6e133df4ac0a7139 | diff --git a/bin/update_pythons.py b/bin/update_pythons.py
--- a/bin/update_pythons.py
+++ b/bin/update_pythons.py
@@ -159,8 +159,8 @@ def update_version_windows(self, spec: Specifier) -> ConfigWinCP:
)
def update_version_macos(self, spec: Specifier) -> ConfigMacOS:
- if self.arch != "64":
- msg = "Other archs not supported yet on macOS"
+ if self.arch not in {"64", "ARM64"}:
+ msg = f"'{self.arch}' arch not supported yet on macOS"
raise RuntimeError(msg)
releases = [r for r in self.releases if spec.contains(r["python_version"])]
@@ -172,12 +172,14 @@ def update_version_macos(self, spec: Specifier) -> ConfigMacOS:
release = releases[-1]
version = release["python_version"]
- identifier = f"pp{version.major}{version.minor}-macosx_x86_64"
+ arch = "x86_64" if self.arch == "64" else self.arch.lower()
+ identifier = f"pp{version.major}{version.minor}-macosx_{arch}"
+ arch = "x64" if self.arch == "64" else self.arch.lower()
(url,) = (
rf["download_url"]
for rf in release["files"]
- if "" in rf["platform"] == "darwin" and rf["arch"] == "x64"
+ if "" in rf["platform"] == "darwin" and rf["arch"] == arch
)
return ConfigMacOS(
@@ -251,6 +253,7 @@ def __init__(self) -> None:
self.macos_cpython = CPythonVersions()
self.macos_pypy = PyPyVersions("64")
+ self.macos_pypy_arm64 = PyPyVersions("ARM64")
def update_config(self, config: dict[str, str]) -> None:
identifier = config["identifier"]
@@ -261,11 +264,14 @@ def update_config(self, config: dict[str, str]) -> None:
config_update: AnyConfig | None = None
# We need to use ** in update due to MyPy (probably a bug)
- if "macos" in identifier:
+ if "macosx" in identifier:
if identifier.startswith("cp"):
config_update = self.macos_cpython.update_version_macos(identifier, version, spec)
elif identifier.startswith("pp"):
- config_update = self.macos_pypy.update_version_macos(spec)
+ if "macosx_x86_64" in identifier:
+ config_update = self.macos_pypy.update_version_macos(spec)
+ elif "macosx_arm64" in identifier:
+ config_update = self.macos_pypy_arm64.update_version_macos(spec)
elif "win32" in identifier:
if identifier.startswith("cp"):
config_update = self.windows_32.update_version_windows(spec)
diff --git a/cibuildwheel/logger.py b/cibuildwheel/logger.py
--- a/cibuildwheel/logger.py
+++ b/cibuildwheel/logger.py
@@ -126,6 +126,10 @@ def step_end_with_error(self, error: BaseException | str) -> None:
self.step_end(success=False)
self.error(error)
+ def quiet(self, message: str) -> None:
+ c = self.colors
+ print(f"{c.gray}{message}{c.end}", file=sys.stderr)
+
def notice(self, message: str) -> None:
if self.fold_mode == "github":
print(f"::notice::{message}\n", file=sys.stderr)
diff --git a/cibuildwheel/macos.py b/cibuildwheel/macos.py
--- a/cibuildwheel/macos.py
+++ b/cibuildwheel/macos.py
@@ -84,7 +84,31 @@ def get_python_configurations(
]
# skip builds as required by BUILD/SKIP
- return [c for c in python_configurations if build_selector(c.identifier)]
+ python_configurations = [c for c in python_configurations if build_selector(c.identifier)]
+
+ # filter-out some cross-compilation configs with PyPy:
+ # can't build arm64 on x86_64
+ # rosetta allows to build x86_64 on arm64
+ if platform.machine() == "x86_64":
+ python_configurations_before = set(python_configurations)
+ python_configurations = [
+ c
+ for c in python_configurations
+ if not (c.identifier.startswith("pp") and c.identifier.endswith("arm64"))
+ ]
+ removed_elements = python_configurations_before - set(python_configurations)
+ if removed_elements:
+ ids = ", ".join(c.identifier for c in removed_elements)
+ log.quiet(
+ unwrap(
+ f"""
+ Note: {ids} {'was' if len(removed_elements) == 1 else 'were'}
+ selected, but can't be built on x86_64 so will be skipped automatically.
+ """
+ )
+ )
+
+ return python_configurations
def install_cpython(tmp: Path, version: str, url: str) -> Path:
| diff --git a/test/test_macos_archs.py b/test/test_macos_archs.py
--- a/test/test_macos_archs.py
+++ b/test/test_macos_archs.py
@@ -65,7 +65,7 @@ def test_cross_compiled_test(tmp_path, capfd, build_universal2):
actual_wheels = utils.cibuildwheel_run(
project_dir,
add_env={
- "CIBW_BUILD": "cp39-*",
+ "CIBW_BUILD": "cp39-*" if build_universal2 else "*p39-*",
"CIBW_TEST_COMMAND": '''python -c "import platform; print('running tests on ' + platform.machine())"''',
"CIBW_ARCHS": "universal2" if build_universal2 else "x86_64 arm64",
"CIBW_BUILD_VERBOSITY": "3",
@@ -76,7 +76,8 @@ def test_cross_compiled_test(tmp_path, capfd, build_universal2):
assert DEPLOYMENT_TARGET_TOO_LOW_WARNING not in captured.err
- if platform.machine() == "x86_64":
+ platform_machine = platform.machine()
+ if platform_machine == "x86_64":
# ensure that tests were run on only x86_64
assert "running tests on x86_64" in captured.out
assert "running tests on arm64" not in captured.out
@@ -89,15 +90,24 @@ def test_cross_compiled_test(tmp_path, capfd, build_universal2):
assert (
"While arm64 wheels can be built on x86_64, they cannot be tested" in captured.err
)
- elif platform.machine() == "arm64":
+ elif platform_machine == "arm64":
# ensure that tests were run on both x86_64 and arm64
assert "running tests on x86_64" in captured.out
assert "running tests on arm64" in captured.out
+ assert (
+ "While universal2 wheels can be built on x86_64, the arm64 part of them cannot currently be tested"
+ not in captured.err
+ )
+ assert (
+ "While arm64 wheels can be built on x86_64, they cannot be tested" not in captured.err
+ )
if build_universal2:
expected_wheels = [w for w in ALL_MACOS_WHEELS if "cp39" in w and "universal2" in w]
else:
- expected_wheels = [w for w in ALL_MACOS_WHEELS if "cp39" in w and "universal2" not in w]
+ expected_wheels = [w for w in ALL_MACOS_WHEELS if "p39-" in w and "universal2" not in w]
+ if platform_machine == "x86_64":
+ expected_wheels = [w for w in expected_wheels if not ("pp39" in w and "arm64" in w)]
assert set(actual_wheels) == set(expected_wheels)
diff --git a/test/utils.py b/test/utils.py
--- a/test/utils.py
+++ b/test/utils.py
@@ -177,7 +177,14 @@ def expected_wheels(
if platform == "macos" and machine_arch == "arm64":
# arm64 macs are only supported by cp38+
- python_abi_tags = ["cp38-cp38", "cp39-cp39", "cp310-cp310", "cp311-cp311"]
+ python_abi_tags = [
+ "cp38-cp38",
+ "cp39-cp39",
+ "cp310-cp310",
+ "cp311-cp311",
+ "pp38-pypy38_pp73",
+ "pp39-pypy39_pp73",
+ ]
wheels = []
| Update PyPy versions, add macos arm64 PyPy
### Description
PyPy has released version 7.3.10 which includes macos arm64 versions. This release only has python 3.8 and 3.9.
### Build log
_No response_
### CI config
_No response_
| Also CPython 3.11.1 is out - would make a nice update & release. :)
FYI, I'm on vacation this week so if it's up to me, likely will be early next week. Maybe someone else will be faster. :)
Update available in #1371
I'll prepare a PR with macos arm64 PyPy | 2022-12-11T10:45:24 |
pypa/cibuildwheel | 1,507 | pypa__cibuildwheel-1507 | [
"1505",
"1502"
] | 301dca264f3e41b1f0d07e13019fccfaaaae6273 | diff --git a/cibuildwheel/util.py b/cibuildwheel/util.py
--- a/cibuildwheel/util.py
+++ b/cibuildwheel/util.py
@@ -251,7 +251,7 @@ class BuildSelector:
requires_python: SpecifierSet | None = None
# a pattern that skips prerelease versions, when include_prereleases is False.
- PRERELEASE_SKIP: ClassVar[str] = ""
+ PRERELEASE_SKIP: ClassVar[str] = "cp312-*"
prerelease_pythons: bool = False
def __call__(self, build_id: str) -> bool:
diff --git a/noxfile.py b/noxfile.py
--- a/noxfile.py
+++ b/noxfile.py
@@ -9,7 +9,7 @@
nox.options.sessions = ["lint", "pylint", "check_manifest", "tests"]
-PYTHON_ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"]
+PYTHON_ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"]
DIR = Path(__file__).parent.resolve()
| diff --git a/test/test_abi_variants.py b/test/test_abi_variants.py
--- a/test/test_abi_variants.py
+++ b/test/test_abi_variants.py
@@ -47,7 +47,7 @@ def test_abi3(tmp_path):
expected_wheels = [
w.replace("cp38-cp38", "cp38-abi3")
for w in utils.expected_wheels("spam", "0.1.0")
- if "-pp" not in w and "-cp39" not in w and "-cp310" not in w and "-cp311" not in w
+ if "-pp" not in w and "-cp39" not in w and "-cp31" not in w
]
assert set(actual_wheels) == set(expected_wheels)
diff --git a/test/test_dependency_versions.py b/test/test_dependency_versions.py
--- a/test/test_dependency_versions.py
+++ b/test/test_dependency_versions.py
@@ -120,10 +120,10 @@ def test_dependency_constraints_file(tmp_path, build_frontend_env):
project_with_expected_version_checks.generate(project_dir)
tool_versions = {
- "pip": "20.0.2",
- "setuptools": "53.0.0",
- "wheel": "0.36.2",
- "virtualenv": "20.11.2",
+ "pip": "23.1.2",
+ "setuptools": "67.7.2",
+ "wheel": "0.38.3",
+ "virtualenv": "20.23.0",
}
constraints_file = tmp_path / "constraints file.txt"
@@ -155,11 +155,12 @@ def test_dependency_constraints_file(tmp_path, build_frontend_env):
add_env={
"CIBW_ENVIRONMENT": cibw_environment_option,
"CIBW_DEPENDENCY_VERSIONS": str(constraints_file),
+ "CIBW_SKIP": "cp36-*",
**build_frontend_env,
},
)
# also check that we got the right wheels
- expected_wheels = utils.expected_wheels("spam", "0.1.0")
+ expected_wheels = [w for w in utils.expected_wheels("spam", "0.1.0") if "-cp36" not in w]
assert set(actual_wheels) == set(expected_wheels)
diff --git a/test/test_manylinuxXXXX_only.py b/test/test_manylinuxXXXX_only.py
--- a/test/test_manylinuxXXXX_only.py
+++ b/test/test_manylinuxXXXX_only.py
@@ -84,8 +84,11 @@ def test(manylinux_image, tmp_path):
# We don't have a manylinux1 image for PyPy & CPython 3.10 and above
add_env["CIBW_SKIP"] = "pp* cp31*"
if manylinux_image in {"manylinux2010"}:
- # We don't have a manylinux2010 image for PyPy 3.9, CPython 3.11
- add_env["CIBW_SKIP"] = "pp39* cp311*"
+ # We don't have a manylinux2010 image for PyPy 3.9, CPython 3.11+
+ add_env["CIBW_SKIP"] = "pp39* cp311* cp312*"
+ if manylinux_image in {"manylinux_2_24"}:
+ # We don't have a manylinux_2_24 image for CPython 3.12+
+ add_env["CIBW_SKIP"] = "cp312*"
if manylinux_image == "manylinux_2_28" and platform.machine() == "x86_64":
# We don't have a manylinux_2_28 image for i686
add_env["CIBW_ARCHS"] = "x86_64"
@@ -109,7 +112,15 @@ def test(manylinux_image, tmp_path):
if manylinux_image in {"manylinux2010"}:
# remove PyPy 3.9 & CPython 3.11
- expected_wheels = [w for w in expected_wheels if "-pp39" not in w and "-cp311" not in w]
+ expected_wheels = [
+ w
+ for w in expected_wheels
+ if "-pp39" not in w and "-cp311" not in w and "-cp312" not in w
+ ]
+
+ if manylinux_image in {"manylinux_2_24"}:
+ # remove CPython 3.11 and above
+ expected_wheels = [w for w in expected_wheels if "-cp312" not in w]
if manylinux_image == "manylinux_2_28" and platform.machine() == "x86_64":
# We don't have a manylinux_2_28 image for i686
diff --git a/test/test_pep518.py b/test/test_pep518.py
--- a/test/test_pep518.py
+++ b/test/test_pep518.py
@@ -9,7 +9,7 @@
"""
# Will fail if PEP 518 does work
import requests
- assert requests.__version__ == "2.23.0", "Requests found but wrong version ({0})".format(requests.__version__)
+ assert requests.__version__ == "2.27.0", "Requests found but wrong version ({0})".format(requests.__version__)
# Just making sure environment is still set
import os
@@ -27,7 +27,7 @@
"setuptools >= 42",
"setuptools_scm[toml]>=4.1.2",
"wheel",
- "requests==2.23.0"
+ "requests==2.27.0"
]
build-backend = "setuptools.build_meta"
diff --git a/test/utils.py b/test/utils.py
--- a/test/utils.py
+++ b/test/utils.py
@@ -170,6 +170,7 @@ def expected_wheels(
"cp39-cp39",
"cp310-cp310",
"cp311-cp311",
+ "cp312-cp312",
]
if machine_arch in ["x86_64", "AMD64", "x86", "aarch64"]:
@@ -182,6 +183,7 @@ def expected_wheels(
"cp39-cp39",
"cp310-cp310",
"cp311-cp311",
+ "cp312-cp312",
"pp38-pypy38_pp73",
"pp39-pypy39_pp73",
]
diff --git a/unit_test/build_selector_test.py b/unit_test/build_selector_test.py
--- a/unit_test/build_selector_test.py
+++ b/unit_test/build_selector_test.py
@@ -12,6 +12,7 @@ def test_build():
assert build_selector("cp37-manylinux_x86_64")
assert build_selector("cp310-manylinux_x86_64")
assert build_selector("cp311-manylinux_x86_64")
+ assert not build_selector("cp312-manylinux_x86_64")
assert build_selector("pp36-manylinux_x86_64")
assert build_selector("pp37-manylinux_x86_64")
assert build_selector("cp36-manylinux_i686")
@@ -31,6 +32,7 @@ def test_build():
assert build_selector("cp37-win_amd64")
assert build_selector("cp310-win_amd64")
assert build_selector("cp311-win_amd64")
+ assert not build_selector("cp312-win_amd64")
assert not build_selector("pp36-win_amd64")
assert not build_selector("pp37-win_amd64")
@@ -43,9 +45,9 @@ def test_build_filter_pre():
)
assert build_selector("cp37-manylinux_x86_64")
- assert build_selector("cp311-manylinux_x86_64")
+ assert build_selector("cp312-manylinux_x86_64")
assert build_selector("cp37-win_amd64")
- assert build_selector("cp311-win_amd64")
+ assert build_selector("cp312-win_amd64")
def test_skip():
| Python 3.12?
### Description
Now that CPython 3.12.0b1 is out, I would like to produce wheels for it. I know the cibuildwheel docs say to wait for rc1, but the advice from Brett Cannon and Thomas Wouters is that it is fine to ship wheels now as long as I produce new wheels in the unlikely event that the ABI changes.
I [tried specifying 3.12, but it failed](https://github.com/nedbat/coveragepy/actions/runs/5073139235/jobs/9111774802#step:6:72)
```
cibuildwheel: No build identifiers selected: BuildSelector(build_config='cp312-*', skip_config='', requires_python=<SpecifierSet('>=3.7')>, prerelease_pythons=True)
```
### Build log
https://github.com/nedbat/coveragepy/actions/runs/5073139235/jobs/9111774802#step:6:72
### CI config
https://github.com/nedbat/coveragepy/blob/nedbat/312-kits/.github/workflows/kit.yml
[Bot] Update dependencies
Update the versions of our dependencies.
PR generated by "Update dependencies" [workflow](https://github.com/pypa/cibuildwheel/actions/runs/5082190344).
| (Pasted from the pypa discord, where I was replying to something related, but the contents roughly is of interest here.) Also I could have sworn that I responded that we have to wait till https://github.com/pypa/manylinux/pull/1483, which got merged yesterday. I forget the build frequency, but it's either weekly or daily.
----
manylinux has [roughly the same warning](https://github.com/pypa/manylinux/issues/1484). One key difference is a wheel built with an ABI difference will likely segfault, compared to a Python wheel that will produce a readable error. And the first 3.x binary is very important for NumPy, which will forever use that wheel to build all 3.12 wheels in the future (okay, patch releases could possibly be moved to, but never a minor version. And even going up a patch version has to be extensively tested in `oldest-supported-numpy`. So NumPy is likely the last possible (though I'm with some of the NumPy team this week, so I could ask to see what they think).
I think we need a guarantee that binary compatibility will be preserved before we can recommend building and uploading support for Python-beta wheels. Or a tag to indicate that wheels were made with a beta (<4) Python. If a project tests cross-beta release compatibility (that is, downloads the wheels again and tests them against the new betas) and immediately deletes them if there's an issue, it's probably fine. But then if you are doing that you probably know what you are doing and don't need advice.
It also takes some time for projects to test, so for even the projects that want to upload beta wheels, I'd probably at the minimum wait till beta 2. I think there's a ton of movement right before beta 1, so I'd not really consider relying on beta 1's stability to be safe.
Maybe I don't understand some of the factors here (likely!)... I'm not counting on the ABI being stable. I'm gambling that it is likely to be stable, and accepting that if it changes, I will need to make new 3.12 wheels. This is the advice from the Steering Council.
I don't understand why you are mentioning Numpy in your answer. If they have unusual requirements, can't they can decide their own path?
I get it that building wheels on beta releases can involve complications that wheels on stable releases don't have. I'm asking for the ability to make those wheels and navigate those complications myself.
The short answer is that it'll be available under the CIBW_PRERELEASE_PYTHONS flag, probably within the next few days.
Apologies, that made less sense out of context than I expected it to. The discussion was about the steering council recommendation clashing with cibuildwheel and manylinux's warning, which state pretty much the opposite.
> I'm gambling that it is likely to be stable, and accepting that if it changes, I will need to make new 3.12 wheels.
You can't upload a new Python 3.12 wheel, you only get to upload it once per release. You can delete the broken wheel and (ideally) make a new release with the updated 3.12 wheel, but many projects don't make releases based on fixing 3.12 wheels. If you know what to do, it's fine to do it, I was just defending the cibuildwheel/manylinux stance of making this available but with a warning and a custom flag, and this issue was a handy place to dump the text in a (more) public place. It absolutely will be available soon, and you absolutely can upload if you know what you are doing.
Just checked, the latest manylinux has `Python 3.12.0b1`!
Great, thanks for clearing that up.
> Just checked, the latest manylinux has `Python 3.12.0b1`!
I guess we need a new release of cibuildwheel for it to work?
> You can't upload a new Python 3.12 wheel, you only get to upload it once per release.
It's possible to upload a new wheel for a release via build numbers.
For example, we did it not once but twice in https://github.com/python-pillow/Pillow/issues/6750
https://pypi.org/project/Pillow/9.4.0/#files has:
* `Pillow-9.4.0-cp311-cp311-macosx_10_10_x86_64.whl`
* `Pillow-9.4.0-1-cp311-cp311-macosx_10_10_x86_64.whl`
* `Pillow-9.4.0-2-cp311-cp311-macosx_10_10_x86_64.whl`
That allows you to only upload one new wheel rather than the full set, right? I think I assumed it was the full set. That would help a lot (as long as you tracked failures and actually rebuilt wheels if there was one - and `wheel tags` would help).
I would like to know how common ABI breakages during betas are.
> That allows you to only upload one new wheel rather than the full set, right?
Yes, you can do it for as many or as few as you need. In this Pillow case, we specifically re-uploaded the x86-64 macOS wheels, as only those were broken.
| 2023-05-26T06:15:58 |
pypa/cibuildwheel | 1,564 | pypa__cibuildwheel-1564 | [
"1560"
] | 36049d86a2e21d74382d84d7a423e20e49a2ca91 | diff --git a/cibuildwheel/options.py b/cibuildwheel/options.py
--- a/cibuildwheel/options.py
+++ b/cibuildwheel/options.py
@@ -450,6 +450,7 @@ def globals(self) -> GlobalOptions:
build_config = args.only
skip_config = ""
architectures = Architecture.all_archs(self.platform)
+ prerelease_pythons = True
build_selector = BuildSelector(
build_config=build_config,
| diff --git a/unit_test/main_tests/main_platform_test.py b/unit_test/main_tests/main_platform_test.py
--- a/unit_test/main_tests/main_platform_test.py
+++ b/unit_test/main_tests/main_platform_test.py
@@ -214,6 +214,7 @@ def test_only_argument(intercepted_build_args, monkeypatch, only, plat):
assert options.globals.build_selector.skip_config == ""
assert options.platform == plat
assert options.globals.architectures == Architecture.all_archs(plat)
+ assert options.globals.build_selector.prerelease_pythons is True
@pytest.mark.parametrize("only", ("cp311-manylxinux_x86_64", "some_linux_thing"))
| --only shouldn't require CIBW_PRERELEASE_PYTHONS
### Description
Using `--only` to target a pre-release Python shouldn't require `CIBW_PRERELEASE_PYTHONS` be set.
### Build log
_No response_
### CI config
_No response_
| Agreed! | 2023-08-07T16:16:26 |
pypa/cibuildwheel | 1,613 | pypa__cibuildwheel-1613 | [
"1611"
] | e5d0cc0b0d37d20a053b85112e4e1f137079c85e | diff --git a/cibuildwheel/projectfiles.py b/cibuildwheel/projectfiles.py
--- a/cibuildwheel/projectfiles.py
+++ b/cibuildwheel/projectfiles.py
@@ -8,6 +8,43 @@
from ._compat import tomllib
+def get_parent(node: ast.AST | None, depth: int = 1) -> ast.AST | None:
+ for _ in range(depth):
+ node = getattr(node, "parent", None)
+ return node
+
+
+def is_main(parent: ast.AST | None) -> bool:
+ if parent is None:
+ return False
+
+ # This would be much nicer with 3.10's pattern matching!
+ if not isinstance(parent, ast.If):
+ return False
+ if not isinstance(parent.test, ast.Compare):
+ return False
+
+ try:
+ (op,) = parent.test.ops
+ (comp,) = parent.test.comparators
+ except ValueError:
+ return False
+
+ if not isinstance(op, ast.Eq):
+ return False
+
+ values = {comp, parent.test.left}
+
+ mains = {x for x in values if isinstance(x, ast.Constant) and x.value == "__main__"}
+ if len(mains) != 1:
+ return False
+ consts = {x for x in values if isinstance(x, ast.Name) and x.id == "__name__"}
+ if len(consts) != 1:
+ return False
+
+ return True
+
+
class Analyzer(ast.NodeVisitor):
def __init__(self) -> None:
self.requires_python: str | None = None
@@ -19,13 +56,22 @@ def visit(self, node: ast.AST) -> None:
super().visit(node)
def visit_keyword(self, node: ast.keyword) -> None:
+ # Must not be nested except for if __name__ == "__main__"
+
self.generic_visit(node)
- # Must not be nested in an if or other structure
# This will be Module -> Expr -> Call -> keyword
+ parent = get_parent(node, 4)
+ unnested = parent is None
+
+ # This will be Module -> If -> Expr -> Call -> keyword
+ name_main_unnested = (
+ parent is not None and get_parent(parent) is None and is_main(get_parent(node, 3))
+ )
+
if (
node.arg == "python_requires"
- and not hasattr(node.parent.parent.parent, "parent") # type: ignore[attr-defined]
and isinstance(node.value, ast.Constant)
+ and (unnested or name_main_unnested)
):
self.requires_python = node.value.value
| diff --git a/unit_test/projectfiles_test.py b/unit_test/projectfiles_test.py
--- a/unit_test/projectfiles_test.py
+++ b/unit_test/projectfiles_test.py
@@ -26,6 +26,72 @@ def test_read_setup_py_simple(tmp_path):
assert get_requires_python_str(tmp_path) == "1.23"
+def test_read_setup_py_if_main(tmp_path):
+ with open(tmp_path / "setup.py", "w") as f:
+ f.write(
+ dedent(
+ """
+ from setuptools import setup
+
+ if __name__ == "__main__":
+ setup(
+ name = "hello",
+ other = 23,
+ example = ["item", "other"],
+ python_requires = "1.23",
+ )
+ """
+ )
+ )
+
+ assert setup_py_python_requires(tmp_path.joinpath("setup.py").read_text()) == "1.23"
+ assert get_requires_python_str(tmp_path) == "1.23"
+
+
+def test_read_setup_py_if_main_reversed(tmp_path):
+ with open(tmp_path / "setup.py", "w") as f:
+ f.write(
+ dedent(
+ """
+ from setuptools import setup
+
+ if "__main__" == __name__:
+ setup(
+ name = "hello",
+ other = 23,
+ example = ["item", "other"],
+ python_requires = "1.23",
+ )
+ """
+ )
+ )
+
+ assert setup_py_python_requires(tmp_path.joinpath("setup.py").read_text()) == "1.23"
+ assert get_requires_python_str(tmp_path) == "1.23"
+
+
+def test_read_setup_py_if_invalid(tmp_path):
+ with open(tmp_path / "setup.py", "w") as f:
+ f.write(
+ dedent(
+ """
+ from setuptools import setup
+
+ if True:
+ setup(
+ name = "hello",
+ other = 23,
+ example = ["item", "other"],
+ python_requires = "1.23",
+ )
+ """
+ )
+ )
+
+ assert not setup_py_python_requires(tmp_path.joinpath("setup.py").read_text())
+ assert not get_requires_python_str(tmp_path)
+
+
def test_read_setup_py_full(tmp_path):
with open(tmp_path / "setup.py", "w", encoding="utf8") as f:
f.write(
| setup.py setup() not detected in __name__ == '__main__' block
### Description
My setup.py setup() includes:
python_requires=">=3.8"
However cibuildwheel still tries and fails to compile under Python 3.6.
I understand there is [CIBW_BUILD / CIBW_SKIP](https://cibuildwheel.readthedocs.io/en/stable/options/#build-skip) but that is then duplicating the python requires information.
I can add a \[project\] section to pyproject.toml but that leads to a lot of problems because it ends up fighting with setup() parameters and they **really** don't like it.
I believe cibuildwheel should establish the Python version support automatically whether it comes from setuptools or pyproject.toml, and not try to build on unsupported versions. My [pyproject.toml](https://github.com/rogerbinns/apsw/blob/master/pyproject.toml) is:
````
[build-system]
requires = ["setuptools"]
build-backend = "setuptools.build_meta"
````
### Build log
https://github.com/rogerbinns/apsw/actions/runs/6175182758/job/16761477543
### CI config
https://github.com/rogerbinns/apsw/actions/runs/6175182758/workflow
| This requires parsing the AST and trying to figure out this setting. Cibuildwheel actually does exactly this (though some people dislike it). You have a non-standard `if __name__ == "__main__"` wrapping your setup call, which I assume is what is confusing cibuildwheel's static investigation of your setup.py.
What I'd highly recommend is moving as many of these keyword arguments as possible out of your setup.py's `setup()` call and putting them in `setup.cfg`. (Or pyproject.toml, but it sounded like you didn't want to do that, that makes a bigger differences in defaults and it also requires setuptools >=61 or better 62). I'd also try to use setuptool's mechanisms for reading the README, etc - the less custom code you have in setup.py, the better.
Of course, the simplest fix would be to remove the if statement.
PS: setuptools has deprecated running tests with setup.py, so you could at least drop your test command code.
I'll leave out the name == main bit and see what happens. Just found that CIBW_PROJECT_REQUIRES_PYTHON doc says this should work ..
Again, I'd _highly_ recommend moving all the static config to setup.cfg (remove from setup call, add to setup.cfg). This makes parsing it much easier for tooling like cibuildwheel. But if you remove the if statement, it should work, and it's probably a cibuildwheel bug if it doesn't (parsing arbitrary AST is hard).
Removing the if name == main did make cibuildwheel pick up the python_requires bit. It is going to cause me other problems because some of my other tooling uses functions from setup.py and is going to require some ugly monkey patching to prevent setup() from running. It would be really nice if cibuildwheel ast parsing allows the name == main if statement.
The test command code is not there for setuptools use, as in that is not the usual way tests are run. It is there as an arbitrary command to help with test configuration. For example a build option can say that a particular SQLite extension should be enabled, and when test runs from the same process it can verify that extension is actually present. When the test suite is run separately it auto-detects which extensions are present, but if there is a bug in setup.py where it is told to include an extension, and doesn't then that won't be caught.
Putting stuff in setup.cfg has problems like in #1487 and then means the information is now in two places. Additionally setup.cfg is also there for the end users and provides a place for them to specify additional compilation options such as defines and libraries. (As in a generic thing that has been part of distutils forever.)
I did try moving stuff to pyproject.toml \[project\] and it doesn't work because setuptools then starts complaining a lot, as I play a game of whack a mole moving the various keys and values.
The fundamental issue here is that cibuildwheel is not allowing for a name == main block containing the setup call. I'll rename the ticket. The suggested fixes are one of:
* Close as *wontfix*
* Close by documenting the name == main limitation in CIBW_PROJECT_REQUIRES_PYTHON section
* Make name == main work | 2023-09-13T19:47:54 |
pypa/cibuildwheel | 1,617 | pypa__cibuildwheel-1617 | [
"1609",
"1616"
] | 099d397aee3ec4bead60ae0c8a554d77b156c86c | diff --git a/cibuildwheel/environment.py b/cibuildwheel/environment.py
--- a/cibuildwheel/environment.py
+++ b/cibuildwheel/environment.py
@@ -124,8 +124,12 @@ def as_dictionary(
return environment
- def add(self, name: str, value: str) -> None:
- self.assignments.append(EnvironmentAssignmentRaw(name=name, value=value))
+ def add(self, name: str, value: str, prepend: bool = False) -> None:
+ assignment = EnvironmentAssignmentRaw(name=name, value=value)
+ if prepend:
+ self.assignments.insert(0, assignment)
+ else:
+ self.assignments.append(assignment)
def __repr__(self) -> str:
return f"{self.__class__.__name__}({[repr(a) for a in self.assignments]!r})"
diff --git a/cibuildwheel/options.py b/cibuildwheel/options.py
--- a/cibuildwheel/options.py
+++ b/cibuildwheel/options.py
@@ -532,9 +532,9 @@ def build_options(self, identifier: str | None) -> BuildOptions:
# Pass through environment variables
if self.platform == "linux":
- for env_var_name in environment_pass:
+ for env_var_name in reversed(environment_pass):
with contextlib.suppress(KeyError):
- environment.add(env_var_name, self.env[env_var_name])
+ environment.add(env_var_name, self.env[env_var_name], prepend=True)
if dependency_versions == "pinned":
dependency_constraints: None | (
| diff --git a/unit_test/options_test.py b/unit_test/options_test.py
--- a/unit_test/options_test.py
+++ b/unit_test/options_test.py
@@ -255,6 +255,25 @@ def test_container_engine_option(tmp_path: Path, toml_assignment, result_name, r
assert parsed_container_engine.create_args == result_create_args
+def test_environment_pass_references():
+ options = Options(
+ platform="linux",
+ command_line_arguments=CommandLineArguments.defaults(),
+ env={
+ "CIBW_ENVIRONMENT_PASS_LINUX": "STARTER MAIN_COURSE",
+ "STARTER": "green eggs",
+ "MAIN_COURSE": "ham",
+ "CIBW_ENVIRONMENT": 'MEAL="$STARTER and $MAIN_COURSE"',
+ },
+ )
+ parsed_environment = options.build_options(identifier=None).environment
+ assert parsed_environment.as_dictionary(prev_environment={}) == {
+ "MEAL": "green eggs and ham",
+ "STARTER": "green eggs",
+ "MAIN_COURSE": "ham",
+ }
+
+
@pytest.mark.parametrize(
("toml_assignment", "result_name", "result_args"),
[
| Environment pass before environment assignment
### Discussed in https://github.com/pypa/cibuildwheel/discussions/1609
<div type='discussions-op-text'>
<sup>Originally posted by **hombit** September 12, 2023</sup>
It looks like currently environment is set by `CIBW_ENVIRONMENT` before it is being updated from `CIBW_ENVIRONMENT_PASS_LINUX`. Could I make it work in the reverse order? I'm trying to do something like that:
```
CIBW_ENVIRONMENT="A=$B"
CIBW_ENVIRONMENT_PASS_LINUX=B
```</div>
| 2023-09-18T16:10:51 |
|
pypa/cibuildwheel | 1,620 | pypa__cibuildwheel-1620 | [
"1320"
] | 099d397aee3ec4bead60ae0c8a554d77b156c86c | diff --git a/cibuildwheel/oci_container.py b/cibuildwheel/oci_container.py
--- a/cibuildwheel/oci_container.py
+++ b/cibuildwheel/oci_container.py
@@ -17,7 +17,13 @@
from typing import IO, Dict, Literal
from .typing import PathOrStr, PopenBytes
-from .util import CIProvider, call, detect_ci_provider, parse_key_value_string
+from .util import (
+ CIProvider,
+ call,
+ detect_ci_provider,
+ parse_key_value_string,
+ strtobool,
+)
ContainerEngineName = Literal["docker", "podman"]
@@ -188,12 +194,14 @@ def __exit__(
assert isinstance(self.name, str)
- subprocess.run(
- [self.engine.name, "rm", "--force", "-v", self.name],
- stdout=subprocess.DEVNULL,
- check=False,
- )
- self.name = None
+ keep_container = strtobool(os.environ.get("CIBW_DEBUG_KEEP_CONTAINER", ""))
+ if not keep_container:
+ subprocess.run(
+ [self.engine.name, "rm", "--force", "-v", self.name],
+ stdout=subprocess.DEVNULL,
+ check=False,
+ )
+ self.name = None
def copy_into(self, from_path: Path, to_path: PurePath) -> None:
# `docker cp` causes 'no space left on device' error when
| Option to leave OCI container alive for debugging after build
### Description
When experimenting with the local setup, using `cibuildwheel --platform=linux`, for example, a container is created. The container is removed after the build finishes. If the build fails, there's no way to debug what caused it except for the logs. The roundtrip time of adding debug logs are pretty high for big software projects. For myself I've commented out these lines:
https://github.com/pypa/cibuildwheel/blob/f7669045f52f7ffc85ff6a3534b61ea90c00ad1b/cibuildwheel/oci_container.py#L148-L152
But I could PR a proper solution if someone talks me through the requirements, and if everyone agrees this is a good addition for debugging.
### Build log
_No response_
### CI config
_No response_
| @henryiii sorry to tag you, but 1 year later, and still willing to PR this ^_^
I'd be okay with an environment variable to disable the cleanup. CIBW_CONTAINER_DEBUG or something like that? @joerick, @mayeut?
Yeah, that would be fine. CIBW_DEBUG_KEEP_CONTAINER might make sense. IMO it doesn't have to go through all the options.py machinery, it can be a simple `os.environ.get`, as we don't care about reading from TOML etc. I think we could add a little section to the bottom of the options documentation page to note little extra options that are more for debugging. The other one that doesn't fit with the other options is CIBW_CACHE_PATH, that could be documented there too. | 2023-09-19T10:02:05 |
|
pypa/cibuildwheel | 1,621 | pypa__cibuildwheel-1621 | [
"1600"
] | 099d397aee3ec4bead60ae0c8a554d77b156c86c | diff --git a/cibuildwheel/util.py b/cibuildwheel/util.py
--- a/cibuildwheel/util.py
+++ b/cibuildwheel/util.py
@@ -731,23 +731,26 @@ def parse_key_value_string(
all_field_names = [*positional_arg_names, *kw_arg_names]
- shlexer = shlex.shlex(key_value_string, posix=True, punctuation_chars=";:")
+ shlexer = shlex.shlex(key_value_string, posix=True, punctuation_chars=";")
shlexer.commenters = ""
+ shlexer.whitespace_split = True
parts = list(shlexer)
# parts now looks like
- # ['docker', ';', 'create_args',':', '--some-option=value', 'another-option']
+ # ['docker', ';', 'create_args:', '--some-option=value', 'another-option']
# split by semicolon
fields = [list(group) for k, group in itertools.groupby(parts, lambda x: x == ";") if not k]
result: dict[str, list[str]] = defaultdict(list)
for field_i, field in enumerate(fields):
- if len(field) > 1 and field[1] == ":":
- field_name = field[0]
- values = field[2:]
+ # check to see if the option name is specified
+ field_name, sep, first_value = field[0].partition(":")
+ if sep:
if field_name not in all_field_names:
msg = f"Failed to parse {key_value_string!r}. Unknown field name {field_name!r}"
raise ValueError(msg)
+
+ values = ([first_value] if first_value else []) + field[1:]
else:
try:
field_name = positional_arg_names[field_i]
| diff --git a/unit_test/utils_test.py b/unit_test/utils_test.py
--- a/unit_test/utils_test.py
+++ b/unit_test/utils_test.py
@@ -9,6 +9,7 @@
find_compatible_wheel,
fix_ansi_codes_for_github_actions,
format_safe,
+ parse_key_value_string,
prepare_command,
)
@@ -124,3 +125,84 @@ def test_fix_ansi_codes_for_github_actions():
output = fix_ansi_codes_for_github_actions(input)
assert output == expected
+
+
+def test_parse_key_value_string():
+ assert parse_key_value_string("bar", positional_arg_names=["foo"]) == {"foo": ["bar"]}
+ assert parse_key_value_string("foo:bar", kw_arg_names=["foo"]) == {"foo": ["bar"]}
+ with pytest.raises(ValueError, match="Too many positional arguments"):
+ parse_key_value_string("bar")
+ with pytest.raises(ValueError, match="Unknown field name"):
+ parse_key_value_string("foo:bar")
+ assert parse_key_value_string("foo:bar", kw_arg_names=["foo"]) == {"foo": ["bar"]}
+ assert parse_key_value_string("foo:bar", positional_arg_names=["foo"]) == {"foo": ["bar"]}
+ assert parse_key_value_string("foo: bar", kw_arg_names=["foo"]) == {"foo": ["bar"]}
+ assert parse_key_value_string("foo: bar", kw_arg_names=["foo"]) == {"foo": ["bar"]}
+ assert parse_key_value_string("foo: bar; baz: qux", kw_arg_names=["foo", "baz"]) == {
+ "foo": ["bar"],
+ "baz": ["qux"],
+ }
+
+ # some common options
+ assert parse_key_value_string(
+ "docker; create_args: --some-option --another-option=foo",
+ positional_arg_names=["name"],
+ kw_arg_names=["create_args"],
+ ) == {
+ "name": ["docker"],
+ "create_args": ["--some-option", "--another-option=foo"],
+ }
+ # semicolon in value
+ assert parse_key_value_string(
+ "docker; create_args: --some-option='this; that'",
+ positional_arg_names=["name"],
+ kw_arg_names=["create_args"],
+ ) == {
+ "name": ["docker"],
+ "create_args": ["--some-option=this; that"],
+ }
+ # colon in value
+ assert parse_key_value_string(
+ "docker; create_args: --mount a:b",
+ positional_arg_names=["name"],
+ kw_arg_names=["create_args"],
+ ) == {
+ "name": ["docker"],
+ "create_args": ["--mount", "a:b"],
+ }
+ assert parse_key_value_string(
+ "docker;create_args:--mount a:b",
+ positional_arg_names=["name"],
+ kw_arg_names=["create_args"],
+ ) == {
+ "name": ["docker"],
+ "create_args": ["--mount", "a:b"],
+ }
+ # quoted value with spaces
+ assert parse_key_value_string(
+ "docker;create_args:'some string with spaces'",
+ positional_arg_names=["name"],
+ kw_arg_names=["create_args"],
+ ) == {
+ "name": ["docker"],
+ "create_args": ["some string with spaces"],
+ }
+
+ # colon in positional value
+ assert parse_key_value_string(
+ "docker; --mount a:b",
+ positional_arg_names=["name", "create_args"],
+ ) == {
+ "name": ["docker"],
+ "create_args": ["--mount", "a:b"],
+ }
+
+ # empty option gives empty array
+ assert parse_key_value_string(
+ "docker;create_args:",
+ positional_arg_names=["name"],
+ kw_arg_names=["create_args"],
+ ) == {
+ "name": ["docker"],
+ "create_args": [],
+ }
| `CIBW_CONTAINER_ENGINE` splits on `:` when it shouldn't
### Description
`CIBW_CONTAINER_ENGINE="docker; create_args: -p=8080:8080"`
gets parsed into
```
container_engine:
name: docker
create_args: ['-p=8080', ':', '8080']
```
problem is here https://github.com/pypa/cibuildwheel/blob/e5d0cc0b0d37d20a053b85112e4e1f137079c85e/cibuildwheel/util.py#L710
potential fix from [here](https://stackoverflow.com/a/61737902/9045206):
```python
shlexer = shlex.shlex(key_value_string, posix=True, punctuation_chars=True)
shlexer.wordchars += ':'
```
with this change, `CIBW_CONTAINER_ENGINE="docker; create_args : -p=8080:8080"` (note the space after `create_args`) parses to `fields=[['docker'], ['create_args', ':', '-p=8080:8080']]`.
| Ah, that's unfortunate. A workaround would be to quote the create_args, e.g. `CIBW_CONTAINER_ENGINE="docker; create_args: '-p=8080:8080'"`. However, I'll work on a fix too. | 2023-09-19T13:10:11 |
pypa/cibuildwheel | 1,687 | pypa__cibuildwheel-1687 | [
"1683"
] | 796361d9f98c2b986f9d61bca9c116695aa5a1ca | diff --git a/cibuildwheel/__main__.py b/cibuildwheel/__main__.py
--- a/cibuildwheel/__main__.py
+++ b/cibuildwheel/__main__.py
@@ -160,7 +160,7 @@ def main() -> None:
# This is now the new package dir
args.package_dir = project_dir.resolve()
- with chdir(temp_dir):
+ with chdir(project_dir):
build_in_directory(args)
finally:
# avoid https://github.com/python/cpython/issues/86962 by performing
| diff --git a/test/test_from_sdist.py b/test/test_from_sdist.py
--- a/test/test_from_sdist.py
+++ b/test/test_from_sdist.py
@@ -61,10 +61,23 @@ def test_simple(tmp_path):
sdist_dir.mkdir()
sdist_path = make_sdist(basic_project, sdist_dir)
+ setup_py_assertion_snippet = textwrap.dedent(
+ """
+ import os
+
+ assert os.path.exists('setup.py')
+ assert os.path.exists('{package}/setup.py')
+ """,
+ )
+ setup_py_assertion_cmd = f'python3 -c "{setup_py_assertion_snippet !s}"'
+
# build the wheels from sdist
actual_wheels = cibuildwheel_from_sdist_run(
sdist_path,
- add_env={"CIBW_BUILD": "cp39-*"},
+ add_env={
+ "CIBW_BEFORE_BUILD": setup_py_assertion_cmd,
+ "CIBW_BUILD": "cp39-*",
+ },
)
# check that the expected wheels are produced
| [FR] Expose `{package}` placeholder to the build stage
### Description
I mentioned before that I set the `PIP_CONSTRAINT` env var when building wheels, to improve the reproducibility (https://github.com/pypa/cibuildwheel/issues/1666).
I got that integrated into yarl, and it works well when I use my https://github.com/re-actors/checkout-python-sdist action to check out the project from sdist instead of Git.
Later on, I learned that cibuildwheel can consume `*.tar.gz` files directly, so I figured why not try that out. There's no huge practical gain in my case, since the cibuildwheel action combined with my action already achieves this. And this doesn't reduce the number of steps in the job, just replaces one thing with another. But I wanted to see it in action.
Long story short, it didn't actually work. The first obstacle was figuring out how to pass an sdist into the action. I read the source and found out that there's an input called `package-dir` for passing sdists :exploding_head: (https://github.com/pypa/cibuildwheel/issues/1682). I replaced my action with a simple download, but I only had a wildcard for the tarball name — and the action quotes the input internally so it wouldn't be auto-expanded. So I had to add some supporting code to look up the actual sdist filename (which is fine — I wanted to do that at some point anyway).
I thought, that would be the end of it, but it crashed on the build step, with all the above setup! Apparently, since I was no longer checking out the project to the CWD, the relative path in the `PIP_CONSTRAINT` variable was pointing to a non-existent location :man_shrugging:
Oh, well, I thought I'd find something in the docs. And I did find mentions of some placeholders. I tried out `{project}` (confusing where it's supposed to point to) and `{package}` but the internal `pip install` was still reporting a “file not found”, with those placeholders remaining non-rendered, as is.
Later, I found the notes at the very bottom of the options page, mentioning that not all settings interpolate values. And realized that maybe, it's just not implemented.
So here I am, filing this feature request to make it work. While doing so, I realized that while implementing this (with the placeholder pointing to a temporary directory where the sdist is unpacked) will likely fix it for me (unless, new issues arise at later stages, like having to switch the tests dir path to `{package}`, I suppose).
But then, is it really worth it? Is it the best UX? After all, the thing I had was already doing what I needed, following KISS / DRY and typical *NIX composability considerations. Maybe, cibuildwheel (the action, not the PyPI dist!) should really delegate this to `checkout-python-sdist` instead of complicating the setup. Or, maybe, it should just call the action internally, bypassing the corresponding inputs there. WDYT?
The PR is here, if you're curious: https://github.com/aio-libs/yarl/pull/967. Though, I'll probably keep using my action that is a bit more generic, and I use it in other jobs (like tests) as well.
### Build log
_No response_
### CI config
_No response_
| When cibuildwheel builds an sdist, it changes working directory to the expanded tarball before doing the build. So, I would expect a relative path in PIP_CONSTRAINT to (kinda) work - the issues in #1675 notwithstanding.
When building an sdist, `{project}` and `{package}` are the same, the only time they're different is when the user runs cibuildwheel on a subdirectory of a project.
Given that bashlex is already parsing our CIBW_ENVIRONMENT option, I'd be inclined not to add the curly-brace expansion there too, the bash syntax is complex enough as it is :) There is perhaps some argument to expand the env vars set by cibuildwheel to include `CIBUILDWHEEL_PROJECT` and `CIBUILDWHEEL_PACKAGE`, so they can be referenced by other variables.
Having said all that I would still expect a relative path to work. I can't see the commit that tried that in your PR, perhaps due to a force-push?
> Having said all that I would still expect a relative path to work. I can't see the commit that tried that in your PR, perhaps due to a force-push?
Yeah, that was the very first iteration. Let me double-check, then.
So it's failing trying to provision an ephemeral PEP 517 build env: https://github.com/aio-libs/yarl/actions/runs/7013832623/job/19080617779?pr=967#step:5:273.
ah, got it. Then I wonder if `build` is changing the CWD...
Ah, nope, that's not it. Actually my statement above is incorrect.
https://github.com/pypa/cibuildwheel/blob/4cedf1c603388572c4cc00e9101f0f6193833b93/cibuildwheel/__main__.py#L163
cibuildwheel doesn't chdir to the expanded sdist, it chdirs one level up. Looks like a bug to me, I can't think why anyone would want that - the `temp_dir` is an implementation detail of cibuildwheel.
If you have time to put in a PR for that that would be appriciated.
| 2023-12-05T22:45:59 |
pypa/cibuildwheel | 1,854 | pypa__cibuildwheel-1854 | [
"1855"
] | 877d3bf6498044bf8781377171fe46f231cc2f73 | diff --git a/cibuildwheel/macos.py b/cibuildwheel/macos.py
--- a/cibuildwheel/macos.py
+++ b/cibuildwheel/macos.py
@@ -34,6 +34,7 @@
detect_ci_provider,
download,
find_compatible_wheel,
+ free_thread_enable_313,
get_build_verbosity_extra_flags,
get_pip_version,
install_certifi_script,
@@ -115,12 +116,13 @@ def get_python_configurations(
return python_configurations
-def install_cpython(tmp: Path, version: str, url: str) -> Path:
- installation_path = Path(f"/Library/Frameworks/Python.framework/Versions/{version}")
+def install_cpython(tmp: Path, version: str, url: str, free_threading: bool) -> Path:
+ ft = "T" if free_threading else ""
+ installation_path = Path(f"/Library/Frameworks/Python{ft}.framework/Versions/{version}")
with FileLock(CIBW_CACHE_PATH / f"cpython{version}.lock"):
installed_system_packages = call("pkgutil", "--pkgs", capture_stdout=True).splitlines()
# if this version of python isn't installed, get it from python.org and install
- python_package_identifier = f"org.python.Python.PythonFramework-{version}"
+ python_package_identifier = f"org.python.Python.Python{ft}Framework-{version}"
if python_package_identifier not in installed_system_packages:
if detect_ci_provider() is None:
# if running locally, we don't want to install CPython with sudo
@@ -137,13 +139,22 @@ def install_cpython(tmp: Path, version: str, url: str) -> Path:
# download the pkg
download(url, pkg_path)
# install
- call("sudo", "installer", "-pkg", pkg_path, "-target", "/")
+ args = []
+ if version.startswith("3.13"):
+ # Python 3.13 is the first version to have a free-threading option
+ args += ["-applyChoiceChangesXML", str(free_thread_enable_313.resolve())]
+ call("sudo", "installer", "-pkg", pkg_path, *args, "-target", "/")
pkg_path.unlink()
env = os.environ.copy()
env["PIP_DISABLE_PIP_VERSION_CHECK"] = "1"
- call(installation_path / "bin" / "python3", install_certifi_script, env=env)
- return installation_path / "bin" / "python3"
+ if free_threading:
+ call(installation_path / f"bin/python{version}t", "-m", "ensurepip", env=env)
+ call(installation_path / f"bin/python{version}t", install_certifi_script, env=env)
+ else:
+ call(installation_path / "bin/python3", install_certifi_script, env=env)
+
+ return installation_path / "bin" / (f"python{version}t" if free_threading else "python3")
def install_pypy(tmp: Path, url: str) -> Path:
@@ -172,13 +183,19 @@ def setup_python(
implementation_id = python_configuration.identifier.split("-")[0]
log.step(f"Installing Python {implementation_id}...")
if implementation_id.startswith("cp"):
- base_python = install_cpython(tmp, python_configuration.version, python_configuration.url)
+ free_threading = "t-macos" in python_configuration.identifier
+ base_python = install_cpython(
+ tmp, python_configuration.version, python_configuration.url, free_threading
+ )
+
elif implementation_id.startswith("pp"):
base_python = install_pypy(tmp, python_configuration.url)
else:
msg = "Unknown Python implementation"
raise ValueError(msg)
- assert base_python.exists()
+ assert (
+ base_python.exists()
+ ), f"{base_python.name} not found, has {list(base_python.parent.iterdir())}"
log.step("Setting up build environment...")
venv_path = tmp / "venv"
@@ -244,8 +261,25 @@ def setup_python(
# Set MACOSX_DEPLOYMENT_TARGET, if the user didn't set it.
# For arm64, the minimal deployment target is 11.0.
# On x86_64 (or universal2), use 10.9 as a default.
- # PyPy defaults to 10.7, causing inconsistencies if it's left unset.
- env.setdefault("MACOSX_DEPLOYMENT_TARGET", "11.0" if config_is_arm64 else "10.9")
+ # CPython 3.13 needs 10.13.
+ if config_is_arm64:
+ default_target = "11.0"
+ elif Version(python_configuration.version) >= Version("3.13"):
+ default_target = "10.13"
+ elif python_configuration.identifier.startswith("pp") and Version(
+ python_configuration.version
+ ) >= Version("3.9"):
+ default_target = "10.15"
+ else:
+ default_target = "10.9"
+ env.setdefault("MACOSX_DEPLOYMENT_TARGET", default_target)
+
+ # This is a floor, it can't be set lower than the default_target.
+ if Version(env["MACOSX_DEPLOYMENT_TARGET"]) < Version(default_target):
+ log.warning(
+ f"Bumping MACOSX_DEPLOYMENT_TARGET ({env['MACOSX_DEPLOYMENT_TARGET']}) to the minimum required ({default_target})."
+ )
+ env["MACOSX_DEPLOYMENT_TARGET"] = default_target
if python_configuration.version not in {"3.6", "3.7"}:
if config_is_arm64:
diff --git a/cibuildwheel/util.py b/cibuildwheel/util.py
--- a/cibuildwheel/util.py
+++ b/cibuildwheel/util.py
@@ -59,6 +59,8 @@
install_certifi_script: Final[Path] = resources_dir / "install_certifi.py"
+free_thread_enable_313: Final[Path] = resources_dir / "free-threaded-enable-313.xml"
+
test_fail_cwd_file: Final[Path] = resources_dir / "testing_temp_dir_file.py"
| diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -82,6 +82,8 @@ jobs:
output-dir: wheelhouse
env:
CIBW_ARCHS_MACOS: x86_64 universal2 arm64
+ CIBW_FREE_THREADED_SUPPORT: 1
+ CIBW_PRERELEASE_PYTHONS: 1
- name: Run a sample build (GitHub Action, only)
uses: ./
diff --git a/test/test_macos_archs.py b/test/test_macos_archs.py
--- a/test/test_macos_archs.py
+++ b/test/test_macos_archs.py
@@ -14,7 +14,7 @@
*utils.expected_wheels("spam", "0.1.0", machine_arch="arm64", include_universal2=True),
}
-DEPLOYMENT_TARGET_TOO_LOW_WARNING = "[WARNING] MACOSX_DEPLOYMENT_TARGET is set to a lower value"
+DEPLOYMENT_TARGET_TOO_LOW_WARNING = "Bumping MACOSX_DEPLOYMENT_TARGET"
def get_xcode_version() -> tuple[int, int]:
diff --git a/test/utils.py b/test/utils.py
--- a/test/utils.py
+++ b/test/utils.py
@@ -137,17 +137,11 @@ def cibuildwheel_run(
return wheels
-def _get_arm64_macosx_deployment_target(macosx_deployment_target: str) -> str:
+def _floor_macosx(*args: str) -> str:
"""
- The first version of macOS that supports arm is 11.0. So the wheel tag
- cannot contain an earlier deployment target, even if
- MACOSX_DEPLOYMENT_TARGET sets it.
+ Make sure a deployment target is not less than some value.
"""
- version_tuple = tuple(map(int, macosx_deployment_target.split(".")))
- if version_tuple <= (11, 0):
- return "11.0"
- else:
- return macosx_deployment_target
+ return max(args, key=lambda x: tuple(map(int, x.split("."))))
def expected_wheels(
@@ -202,9 +196,8 @@ def expected_wheels(
"cp311-cp311",
"cp312-cp312",
"cp313-cp313",
+ "cp313-cp313t",
]
- if platform != "macos":
- python_abi_tags.append("cp313-cp313t")
if machine_arch in ["x86_64", "AMD64", "x86", "aarch64"]:
python_abi_tags += [
@@ -223,6 +216,7 @@ def expected_wheels(
"cp311-cp311",
"cp312-cp312",
"cp313-cp313",
+ "cp313-cp313t",
"pp38-pypy38_pp73",
"pp39-pypy39_pp73",
"pp310-pypy310_pp73",
@@ -282,12 +276,19 @@ def expected_wheels(
elif platform == "macos":
if machine_arch == "arm64":
- arm64_macosx_deployment_target = _get_arm64_macosx_deployment_target(
- macosx_deployment_target
- )
- platform_tags = [f'macosx_{arm64_macosx_deployment_target.replace(".", "_")}_arm64']
+ arm64_macosx = _floor_macosx(macosx_deployment_target, "11.0")
+ platform_tags = [f'macosx_{arm64_macosx.replace(".", "_")}_arm64']
else:
- platform_tags = [f'macosx_{macosx_deployment_target.replace(".", "_")}_x86_64']
+ if python_abi_tag.startswith("pp") and not python_abi_tag.startswith(
+ ("pp37", "pp38")
+ ):
+ pypy_macosx = _floor_macosx(macosx_deployment_target, "10.15")
+ platform_tags = [f'macosx_{pypy_macosx.replace(".", "_")}_x86_64']
+ elif python_abi_tag.startswith("cp313"):
+ pypy_macosx = _floor_macosx(macosx_deployment_target, "10.13")
+ platform_tags = [f'macosx_{pypy_macosx.replace(".", "_")}_x86_64']
+ else:
+ platform_tags = [f'macosx_{macosx_deployment_target.replace(".", "_")}_x86_64']
if include_universal2:
platform_tags.append(
| Min macOS version
### Description
There are two changes in b2; the free-threaded one is something we already knew about, but also the macOS version has finally been bumped from 10.9 to 10.13. This breaks up the question: how should we deal with it? For the least change, we can manually force MACOSX_DEPLOYMENT_TARGET to 10.13 if we detect it set less than 10.13, including the default 10.9. The other option is we could set the minimum across the board to 10.13, but users can set it lower (except for Python 3.13+, where we'd still need the above clamping). Thoughts? 10.13 brings nearly complete C++17 support (completed in 10.14, stared in 10.12).
(Not at https://www.python.org/api/v2/downloads/release/?is_published=true yet, guessing it's a bit too soon to show up (https://www.python.org/api/v2/downloads/release/?is_published=true&name=Python%203.13.0b2 is empty)? Not listed on python.org yet unless you know where to look - https://www.python.org/downloads/release/python-3130b2/)
Followup to #1831.
### Build log
_No response_
### CI config
_No response_
| 2024-06-06T03:35:17 |
|
elastic/helm-charts | 516 | elastic__helm-charts-516 | [
"492"
] | 58bdd96312a08cf3e933ffa9afb3c7988c27dd80 | diff --git a/helpers/helpers.py b/helpers/helpers.py
--- a/helpers/helpers.py
+++ b/helpers/helpers.py
@@ -9,7 +9,7 @@ def helm_template(config):
with tempfile.NamedTemporaryFile() as temp:
with open(temp.name, "w") as values:
values.write(config)
- helm_cmd = "helm template -f {0} --namespace default ./".format(temp.name)
+ helm_cmd = "helm template release-name -f {0} ./".format(temp.name)
result = yaml.load_all(check_output(helm_cmd.split()))
results = {}
| diff --git a/elasticsearch/tests/elasticsearch_test.py b/elasticsearch/tests/elasticsearch_test.py
--- a/elasticsearch/tests/elasticsearch_test.py
+++ b/elasticsearch/tests/elasticsearch_test.py
@@ -445,7 +445,10 @@ def test_enabling_persistence_label_in_volumeclaimtemplate():
"volumeClaimTemplates"
][0]["metadata"]["labels"]
statefulset_labels = r["statefulset"][uname]["metadata"]["labels"]
- assert volume_claim_template_labels == statefulset_labels
+ expected_labels = statefulset_labels
+ # heritage label shouldn't be present in volumeClaimTemplates labels
+ expected_labels.pop("heritage")
+ assert volume_claim_template_labels == expected_labels
def test_adding_a_secret_mount():
| helm upgrade fails due to 'cannot patch "elasticsearch-master" with kind StatefulSet'
**Chart version:**
7.6.0
**Kubernetes version:**
v1.14.9-eks-c0eccc
**Kubernetes provider:** E.g. GKE (Google Kubernetes Engine)
EKS
**Helm Version:**
v3.0.2
**`helm get release` output**
<details>
<summary>Output of helm get release</summary>
```
NAME: elasticsearch
LAST DEPLOYED: Fri Feb 21 16:30:05 2020
NAMESPACE: elasticsearch
STATUS: failed
REVISION: 29
USER-SUPPLIED VALUES:
antiAffinity: hard
antiAffinityTopologyKey: kubernetes.io/hostname
clusterHealthCheckParams: wait_for_status=green&timeout=1s
clusterName: elasticsearch
esConfig:
elasticsearch.yml: |
xpack.security.enabled: true
xpack.security.transport.ssl.enabled: true
xpack.security.transport.ssl.verification_mode: certificate
xpack.security.transport.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
xpack.security.transport.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
xpack.security.http.ssl.enabled: true
xpack.security.http.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
xpack.security.http.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
network.host: 0.0.0.0
esJavaOpts: -Xmx1g -Xms1g
esMajorVersion: ""
extraEnvs:
- name: ELASTIC_PASSWORD
valueFrom:
secretKeyRef:
key: password
name: elastic-credentials
- name: ELASTIC_USERNAME
valueFrom:
secretKeyRef:
key: username
name: elastic-credentials
extraInitContainers: ""
extraVolumeMounts: ""
extraVolumes: ""
fsGroup: ""
fullnameOverride: ""
httpPort: 9200
image: docker.elastic.co/elasticsearch/elasticsearch
imagePullPolicy: IfNotPresent
imagePullSecrets: []
imageTag: 7.6.0
ingress:
annotations: {}
enabled: false
hosts:
- elasticsearch.local
path: /
tls: []
initResources: {}
keystore: []
labels: {}
lifecycle: {}
masterService: ""
masterTerminationFix: false
maxUnavailable: 1
minimumMasterNodes: 2
nameOverride: ""
networkHost: 0.0.0.0
nodeAffinity: {}
nodeGroup: master
nodeSelector: {}
persistence:
annotations: {}
enabled: true
podAnnotations: {}
podManagementPolicy: Parallel
podSecurityContext:
fsGroup: 1000
runAsUser: 1000
podSecurityPolicy:
create: false
name: ""
spec:
fsGroup:
rule: RunAsAny
privileged: true
runAsUser:
rule: RunAsAny
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes:
- secret
- configMap
- persistentVolumeClaim
priorityClassName: ""
protocol: https
rbac:
create: false
serviceAccountName: ""
readinessProbe:
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 3
timeoutSeconds: 5
replicas: 3
resources:
limits:
cpu: 1000m
memory: 2Gi
requests:
cpu: 200m
memory: 2Gi
roles:
data: "true"
ingest: "true"
master: "true"
schedulerName: ""
secretMounts:
- name: elastic-certificates
path: /usr/share/elasticsearch/config/certs
secretName: elastic-certificates
securityContext:
capabilities:
drop:
- ALL
runAsNonRoot: true
runAsUser: 1000
service:
annotations: {}
httpPortName: http
labels: {}
labelsHeadless: {}
nodePort: ""
transportPortName: transport
type: ClusterIP
sidecarResources: {}
sysctlInitContainer:
enabled: true
sysctlVmMaxMapCount: 262144
terminationGracePeriod: 120
tolerations: []
transportPort: 9300
updateStrategy: RollingUpdate
volumeClaimTemplate:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 50Gi
COMPUTED VALUES:
antiAffinity: hard
antiAffinityTopologyKey: kubernetes.io/hostname
clusterHealthCheckParams: wait_for_status=green&timeout=1s
clusterName: elasticsearch
esConfig:
elasticsearch.yml: |
xpack.security.enabled: true
xpack.security.transport.ssl.enabled: true
xpack.security.transport.ssl.verification_mode: certificate
xpack.security.transport.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
xpack.security.transport.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
xpack.security.http.ssl.enabled: true
xpack.security.http.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
xpack.security.http.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
network.host: 0.0.0.0
esJavaOpts: -Xmx1g -Xms1g
esMajorVersion: ""
extraContainers: ""
extraEnvs:
- name: ELASTIC_PASSWORD
valueFrom:
secretKeyRef:
key: password
name: elastic-credentials
- name: ELASTIC_USERNAME
valueFrom:
secretKeyRef:
key: username
name: elastic-credentials
extraInitContainers: ""
extraVolumeMounts: ""
extraVolumes: ""
fsGroup: ""
fullnameOverride: ""
httpPort: 9200
image: docker.elastic.co/elasticsearch/elasticsearch
imagePullPolicy: IfNotPresent
imagePullSecrets: []
imageTag: 7.6.0
ingress:
annotations: {}
enabled: false
hosts:
- elasticsearch.local
path: /
tls: []
initResources: {}
keystore: []
labels: {}
lifecycle: {}
masterService: ""
masterTerminationFix: false
maxUnavailable: 1
minimumMasterNodes: 2
nameOverride: ""
networkHost: 0.0.0.0
nodeAffinity: {}
nodeGroup: master
nodeSelector: {}
persistence:
annotations: {}
enabled: true
podAnnotations: {}
podManagementPolicy: Parallel
podSecurityContext:
fsGroup: 1000
runAsUser: 1000
podSecurityPolicy:
create: false
name: ""
spec:
fsGroup:
rule: RunAsAny
privileged: true
runAsUser:
rule: RunAsAny
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes:
- secret
- configMap
- persistentVolumeClaim
priorityClassName: ""
protocol: https
rbac:
create: false
serviceAccountName: ""
readinessProbe:
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 3
timeoutSeconds: 5
replicas: 3
resources:
limits:
cpu: 1000m
memory: 2Gi
requests:
cpu: 200m
memory: 2Gi
roles:
data: "true"
ingest: "true"
master: "true"
schedulerName: ""
secretMounts:
- name: elastic-certificates
path: /usr/share/elasticsearch/config/certs
secretName: elastic-certificates
securityContext:
capabilities:
drop:
- ALL
runAsNonRoot: true
runAsUser: 1000
service:
annotations: {}
httpPortName: http
labels: {}
labelsHeadless: {}
nodePort: ""
transportPortName: transport
type: ClusterIP
sidecarResources: {}
sysctlInitContainer:
enabled: true
sysctlVmMaxMapCount: 262144
terminationGracePeriod: 120
tolerations: []
transportPort: 9300
updateStrategy: RollingUpdate
volumeClaimTemplate:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 50Gi
HOOKS:
---
# Source: elasticsearch/templates/test/test-elasticsearch-health.yaml
apiVersion: v1
kind: Pod
metadata:
name: "elasticsearch-sbxrc-test"
annotations:
"helm.sh/hook": test-success
spec:
containers:
- name: "elasticsearch-ualfr-test"
image: "docker.elastic.co/elasticsearch/elasticsearch:7.6.0"
command:
- "sh"
- "-c"
- |
#!/usr/bin/env bash -e
curl -XGET --fail 'elasticsearch-master:9200/_cluster/health?wait_for_status=green&timeout=1s'
restartPolicy: Never
MANIFEST:
---
# Source: elasticsearch/templates/poddisruptionbudget.yaml
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: "elasticsearch-master-pdb"
spec:
maxUnavailable: 1
selector:
matchLabels:
app: "elasticsearch-master"
---
# Source: elasticsearch/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: elasticsearch-master-config
labels:
heritage: "Helm"
release: "elasticsearch"
chart: "elasticsearch"
app: "elasticsearch-master"
data:
elasticsearch.yml: |
xpack.security.enabled: true
xpack.security.transport.ssl.enabled: true
xpack.security.transport.ssl.verification_mode: certificate
xpack.security.transport.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
xpack.security.transport.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
xpack.security.http.ssl.enabled: true
xpack.security.http.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
xpack.security.http.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
network.host: 0.0.0.0
---
# Source: elasticsearch/templates/service.yaml
kind: Service
apiVersion: v1
metadata:
name: elasticsearch-master-headless
labels:
heritage: "Helm"
release: "elasticsearch"
chart: "elasticsearch"
app: "elasticsearch-master"
annotations:
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
spec:
clusterIP: None # This is needed for statefulset hostnames like elasticsearch-0 to resolve
# Create endpoints also if the related pod isn't ready
publishNotReadyAddresses: true
selector:
app: "elasticsearch-master"
ports:
- name: http
port: 9200
- name: transport
port: 9300
---
# Source: elasticsearch/templates/service.yaml
kind: Service
apiVersion: v1
metadata:
name: elasticsearch-master
labels:
heritage: "Helm"
release: "elasticsearch"
chart: "elasticsearch"
app: "elasticsearch-master"
annotations:
{}
spec:
type: ClusterIP
selector:
heritage: "Helm"
release: "elasticsearch"
chart: "elasticsearch"
app: "elasticsearch-master"
ports:
- name: http
protocol: TCP
port: 9200
- name: transport
protocol: TCP
port: 9300
---
# Source: elasticsearch/templates/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: elasticsearch-master
labels:
heritage: "Helm"
release: "elasticsearch"
chart: "elasticsearch"
app: "elasticsearch-master"
annotations:
esMajorVersion: "7"
spec:
serviceName: elasticsearch-master-headless
selector:
matchLabels:
app: "elasticsearch-master"
replicas: 3
podManagementPolicy: Parallel
updateStrategy:
type: RollingUpdate
volumeClaimTemplates:
- metadata:
name: elasticsearch-master
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 50Gi
template:
metadata:
name: "elasticsearch-master"
labels:
heritage: "Helm"
release: "elasticsearch"
chart: "elasticsearch"
app: "elasticsearch-master"
annotations:
configchecksum: a925349ed01ac0903a539d33164dabb0c174b9b602c943057c90033eee58253
spec:
securityContext:
fsGroup: 1000
runAsUser: 1000
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- "elasticsearch-master"
topologyKey: kubernetes.io/hostname
terminationGracePeriodSeconds: 120
volumes:
- name: elastic-certificates
secret:
secretName: elastic-certificates
- name: esconfig
configMap:
name: elasticsearch-master-config
initContainers:
- name: configure-sysctl
securityContext:
runAsUser: 0
privileged: true
image: "docker.elastic.co/elasticsearch/elasticsearch:7.6.0"
imagePullPolicy: "IfNotPresent"
command: ["sysctl", "-w", "vm.max_map_count=262144"]
resources:
{}
containers:
- name: "elasticsearch"
securityContext:
capabilities:
drop:
- ALL
runAsNonRoot: true
runAsUser: 1000
image: "docker.elastic.co/elasticsearch/elasticsearch:7.6.0"
imagePullPolicy: "IfNotPresent"
readinessProbe:
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 3
timeoutSeconds: 5
exec:
command:
- sh
- -c
- |
#!/usr/bin/env bash -e
# If the node is starting up wait for the cluster to be ready (request params: 'wait_for_status=green&timeout=1s' )
# Once it has started only check that the node itself is responding
START_FILE=/tmp/.es_start_file
http () {
local path="${1}"
if [ -n "${ELASTIC_USERNAME}" ] && [ -n "${ELASTIC_PASSWORD}" ]; then
BASIC_AUTH="-u ${ELASTIC_USERNAME}:${ELASTIC_PASSWORD}"
else
BASIC_AUTH=''
fi
curl -XGET -s -k --fail ${BASIC_AUTH} https://127.0.0.1:9200${path}
}
if [ -f "${START_FILE}" ]; then
echo 'Elasticsearch is already running, lets check the node is healthy and there are master nodes available'
http "/_cluster/health?timeout=0s"
else
echo 'Waiting for elasticsearch cluster to become ready (request params: "wait_for_status=green&timeout=1s" )'
if http "/_cluster/health?wait_for_status=green&timeout=1s" ; then
touch ${START_FILE}
exit 0
else
echo 'Cluster is not yet ready (request params: "wait_for_status=green&timeout=1s" )'
exit 1
fi
fi
ports:
- name: http
containerPort: 9200
- name: transport
containerPort: 9300
resources:
limits:
cpu: 1000m
memory: 2Gi
requests:
cpu: 200m
memory: 2Gi
env:
- name: node.name
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: cluster.initial_master_nodes
value: "elasticsearch-master-0,elasticsearch-master-1,elasticsearch-master-2,"
- name: discovery.seed_hosts
value: "elasticsearch-master-headless"
- name: cluster.name
value: "elasticsearch"
- name: network.host
value: "0.0.0.0"
- name: ES_JAVA_OPTS
value: "-Xmx1g -Xms1g"
- name: node.data
value: "true"
- name: node.ingest
value: "true"
- name: node.master
value: "true"
- name: ELASTIC_PASSWORD
valueFrom:
secretKeyRef:
key: password
name: elastic-credentials
- name: ELASTIC_USERNAME
valueFrom:
secretKeyRef:
key: username
name: elastic-credentials
volumeMounts:
- name: "elasticsearch-master"
mountPath: /usr/share/elasticsearch/data
- name: elastic-certificates
mountPath: /usr/share/elasticsearch/config/certs
- name: esconfig
mountPath: /usr/share/elasticsearch/config/elasticsearch.yml
subPath: elasticsearch.yml
NOTES:
1. Watch all cluster members come up.
$ kubectl get pods --namespace=elasticsearch -l app=elasticsearch-master -w
2. Test cluster health using Helm test.
$ helm test elasticsearch
```
NOTE: the images above show 7.6.0 as I have manually updated the statefulset as a workaround.
</details>
**Describe the bug:**
Performing 'helm upgrade' returns the following error;
Error: UPGRADE FAILED: cannot patch "elasticsearch-master" with kind StatefulSet: StatefulSet.apps "elasticsearch-master" is invalid: spec: Forbidden: updates to statefulset spec for fields other than 'replicas', 'template', and 'updateStrategy' are forbidden
**Steps to reproduce:**
1. helm install elasticsearch elastic/elasticsearch -n elasticsearch -f values.yaml
2. helm upgrade elasticsearch elastic/elasticsearch -n elasticsearch -f values.yaml
**Expected behavior:**
Successful upgrade to newer image
**Provide logs and/or server output (if relevant):**
```
cat values.yaml
---
clusterName: "elasticsearch"
nodeGroup: "master"
# The service that non master groups will try to connect to when joining the cluster
# This should be set to clusterName + "-" + nodeGroup for your master group
masterService: ""
# Elasticsearch roles that will be applied to this nodeGroup
# These will be set as environment variables. E.g. node.master=true
roles:
master: "true"
ingest: "true"
data: "true"
replicas: 3
minimumMasterNodes: 2
esMajorVersion: ""
# Allows you to add any config files in /usr/share/elasticsearch/config/
# such as elasticsearch.yml and log4j2.properties
esConfig:
elasticsearch.yml: |
xpack.security.enabled: true
xpack.security.transport.ssl.enabled: true
xpack.security.transport.ssl.verification_mode: certificate
xpack.security.transport.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
xpack.security.transport.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
xpack.security.http.ssl.enabled: true
xpack.security.http.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
xpack.security.http.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
network.host: 0.0.0.0
# log4j2.properties: |
# key = value
# Extra environment variables to append to this nodeGroup
# This will be appended to the current 'env:' key. You can use any of the kubernetes env
# syntax here
extraEnvs:
- name: ELASTIC_PASSWORD
valueFrom:
secretKeyRef:
name: elastic-credentials
key: password
- name: ELASTIC_USERNAME
valueFrom:
secretKeyRef:
name: elastic-credentials
key: username
# A list of secrets and their paths to mount inside the pod
# This is useful for mounting certificates for security and for mounting
# the X-Pack license
secretMounts:
- name: elastic-certificates
secretName: elastic-certificates
path: /usr/share/elasticsearch/config/certs
image: "docker.elastic.co/elasticsearch/elasticsearch"
imageTag: "7.6.0"
imagePullPolicy: "IfNotPresent"
podAnnotations: {}
# iam.amazonaws.com/role: es-cluster
# additionals labels
labels: {}
esJavaOpts: "-Xmx1g -Xms1g"
resources:
requests:
cpu: "200m"
memory: "2Gi"
limits:
cpu: "1000m"
memory: "2Gi"
initResources: {}
# limits:
# cpu: "25m"
# # memory: "128Mi"
# requests:
# cpu: "25m"
# memory: "128Mi"
sidecarResources: {}
# limits:
# cpu: "25m"
# # memory: "128Mi"
# requests:
# cpu: "25m"
# memory: "128Mi"
networkHost: "0.0.0.0"
volumeClaimTemplate:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 50Gi
rbac:
create: false
serviceAccountName: ""
podSecurityPolicy:
create: false
name: ""
spec:
privileged: true
fsGroup:
rule: RunAsAny
runAsUser:
rule: RunAsAny
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
volumes:
- secret
- configMap
- persistentVolumeClaim
persistence:
enabled: true
annotations: {}
extraVolumes: ""
# - name: extras
# emptyDir: {}
extraVolumeMounts: ""
# - name: extras
# mountPath: /usr/share/extras
# readOnly: true
extraInitContainers: ""
# - name: do-something
# image: busybox
# command: ['do', 'something']
# This is the PriorityClass settings as defined in
# https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
priorityClassName: ""
# By default this will make sure two pods don't end up on the same node
# Changing this to a region would allow you to spread pods across regions
antiAffinityTopologyKey: "kubernetes.io/hostname"
# Hard means that by default pods will only be scheduled if there are enough nodes for them
# and that they will never end up on the same node. Setting this to soft will do this "best effort"
antiAffinity: "hard"
# This is the node affinity settings as defined in
# https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity-beta-feature
nodeAffinity: {}
# The default is to deploy all pods serially. By setting this to parallel all pods are started at
# the same time when bootstrapping the cluster
podManagementPolicy: "Parallel"
protocol: https
httpPort: 9200
transportPort: 9300
service:
labels: {}
labelsHeadless: {}
type: ClusterIP
nodePort: ""
annotations: {}
httpPortName: http
transportPortName: transport
updateStrategy: RollingUpdate
# This is the max unavailable setting for the pod disruption budget
# The default value of 1 will make sure that kubernetes won't allow more than 1
# of your pods to be unavailable during maintenance
maxUnavailable: 1
podSecurityContext:
fsGroup: 1000
runAsUser: 1000
# The following value is deprecated,
# please use the above podSecurityContext.fsGroup instead
fsGroup: ""
securityContext:
capabilities:
drop:
- ALL
# readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
# How long to wait for elasticsearch to stop gracefully
terminationGracePeriod: 120
sysctlVmMaxMapCount: 262144
readinessProbe:
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 3
timeoutSeconds: 5
# https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html#request-params wait_for_status
clusterHealthCheckParams: "wait_for_status=green&timeout=1s"
## Use an alternate scheduler.
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
schedulerName: ""
imagePullSecrets: []
nodeSelector: {}
tolerations: []
# Enabling this will publically expose your Elasticsearch instance.
# Only enable this if you have security enabled on your cluster
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
path: /
hosts:
- elasticsearch.local
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
nameOverride: ""
fullnameOverride: ""
# https://github.com/elastic/helm-charts/issues/63
masterTerminationFix: false
lifecycle: {}
# preStop:
# exec:
# command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]
# postStart:
# exec:
# command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]
sysctlInitContainer:
enabled: true
keystore: []
```
**Any additional context:**
I manually updated the statefulset to get around the problem, hoping helm would then recognise the new image version was in place, however that has gained me nothing. The upgrade still fails
| I have the same issue, following up the thread.
Those helm chart are not supporting officially Helm 3. I read this information in the Readme.md. As a friendly information :) But I also want those chart be supported by helm 3 so I will keep a eye on this issue 👍
I possible way to workaround is to use this chart with helm 3 but by using `helm template` instead of `helm install/upgrade` I will try it also.
> Those helm chart are not supporting officially Helm 3.
@metabsd Thanks for the info.
How did your test with `helm template` go? I haven't got an environment to test given I've manually updated the statefulset | 2020-03-13T14:21:10 |
NVIDIA-Merlin/NVTabular | 77 | NVIDIA-Merlin__NVTabular-77 | [
"61"
] | 743949354f38ebb491d2cd5d077a4d61a3a4dc75 | diff --git a/nvtabular/dask/io.py b/nvtabular/dask/io.py
--- a/nvtabular/dask/io.py
+++ b/nvtabular/dask/io.py
@@ -20,6 +20,7 @@
import cudf
import cupy
+import dask_cudf
import numba.cuda as cuda
import numpy as np
import pyarrow.parquet as pq
@@ -145,7 +146,9 @@ class DaskDataset:
-----------
path : str or list of str
Dataset path (or list of paths). If string, should specify
- a specific file or directory path.
+ a specific file or directory path. If this is a directory
+ path, the directory structure must be flat (nested directories
+ are not yet supported).
engine : str or DatasetEngine
DatasetEngine object or string identifier of engine. Current
string options include: ("parquet").
@@ -159,17 +162,28 @@ class DaskDataset:
to GPU memory capacity). Ignored if part_size is passed
directly. Note that the underlying engine may allow other
custom kwargs to override this argument.
+ storage_options: None or dict
+ Further parameters to pass to the bytes backend.
**kwargs :
Other arguments to be passed to DatasetEngine.
"""
- def __init__(self, path, engine, part_size=None, part_mem_fraction=0.125, **kwargs):
+ def __init__(
+ self,
+ path,
+ engine=None,
+ part_size=None,
+ part_mem_fraction=None,
+ storage_options=None,
+ **kwargs,
+ ):
if part_size:
# If a specific partition size is given, use it directly
part_size = parse_bytes(part_size)
else:
# If a fractional partition size is given, calculate part_size
+ part_mem_fraction = part_mem_fraction or 0.125
assert part_mem_fraction > 0.0 and part_mem_fraction < 1.0
if part_mem_fraction > 0.25:
warnings.warn(
@@ -178,43 +192,57 @@ def __init__(self, path, engine, part_size=None, part_mem_fraction=0.125, **kwar
)
part_size = int(cuda.current_context().get_memory_info()[1] * part_mem_fraction)
+ # Engine-agnostic path handling
+ if hasattr(path, "name"):
+ path = stringify_path(path)
+ storage_options = storage_options or {}
+ fs, fs_token, paths = get_fs_token_paths(path, mode="rb", storage_options=storage_options)
+ paths = sorted(paths, key=natural_sort_key)
+
+ # If engine is not provided, try to infer from end of paths[0]
+ if engine is None:
+ engine = paths[0].split(".")[-1]
+
if isinstance(engine, str):
if engine == "parquet":
- self.engine = ParquetDatasetEngine(path, part_size, **kwargs)
+ self.engine = ParquetDatasetEngine(paths, part_size, fs, fs_token, **kwargs)
+ elif engine == "csv":
+ self.engine = CSVDatasetEngine(paths, part_size, fs, fs_token, **kwargs)
else:
- raise ValueError("Only parquet supported for now")
+ raise ValueError("Only parquet and csv supported (for now).")
else:
- self.engine = engine(path, part_size, **kwargs)
-
- def meta_empty(self, columns=None):
- return self.engine.meta_empty(columns=columns)
+ self.engine = engine(paths, part_size, fs, fs_token, **kwargs)
def to_ddf(self, columns=None):
return self.engine.to_ddf(columns=columns)
class DatasetEngine:
- """ DaskDataset Class
- Converts dataset `pieces` to a dask_cudf DataFrame
+ """ DatasetEngine Class
+
+ Base class for Dask-powered IO engines. Engines must provide
+ a ``to_ddf`` method.
"""
- def __init__(self, path, part_size):
- if hasattr(path, "name"):
- path = stringify_path(path)
- fs, _, paths = get_fs_token_paths(path, mode="rb")
- self.fs = fs
- self.paths = sorted(paths, key=natural_sort_key)
+ def __init__(self, paths, part_size, fs, fs_token):
+ self.paths = paths
self.part_size = part_size
-
- def meta_empty(self, columns=None):
- raise NotImplementedError(""" Return an empty cudf.DataFrame with the correct schema """)
+ self.fs = fs
+ self.fs_token = fs_token
def to_ddf(self, columns=None):
raise NotImplementedError(""" Return a dask_cudf.DataFrame """)
class ParquetDatasetEngine(DatasetEngine):
+ """ ParquetDatasetEngine
+
+ Dask-based version of cudf.read_parquet.
+ """
+
def __init__(self, *args, row_groups_per_part=None):
+ # TODO: Improve dask_cudf.read_parquet performance so that
+ # this class can be slimmed down.
super().__init__(*args)
self._metadata, self._base = self.get_metadata()
self._pieces = None
@@ -308,7 +336,7 @@ def meta_empty(self, columns=None):
def to_ddf(self, columns=None):
pieces = self.pieces
- name = "parquet-to-ddf-" + tokenize(pieces, columns)
+ name = "parquet-to-ddf-" + tokenize(self.fs_token, pieces, columns)
dsk = {
(name, p): (ParquetDatasetEngine.read_piece, piece, columns)
for p, piece in enumerate(pieces)
@@ -316,3 +344,25 @@ def to_ddf(self, columns=None):
meta = self.meta_empty(columns=columns)
divisions = [None] * (len(pieces) + 1)
return new_dd_object(dsk, name, meta, divisions)
+
+
+class CSVDatasetEngine(DatasetEngine):
+ """ CSVDatasetEngine
+
+ Thin wrapper around dask_cudf.read_csv.
+ """
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args)
+ self._meta = {}
+ self.names = kwargs.pop("names", None)
+ self.csv_kwargs = kwargs
+ # CSV reader needs a list of files
+ # (Assume flat directory structure if this is a dir)
+ if len(self.paths) == 1 and self.fs.isdir(self.paths[0]):
+ self.paths = self.fs.glob(self.fs.sep.join([self.paths[0], "*"]))
+
+ def to_ddf(self, columns=None):
+ return dask_cudf.read_csv(
+ self.paths, names=self.names, chunksize=self.part_size, **self.csv_kwargs
+ )[columns]
| diff --git a/tests/unit/test_dask_nvt.py b/tests/unit/test_dask_nvt.py
--- a/tests/unit/test_dask_nvt.py
+++ b/tests/unit/test_dask_nvt.py
@@ -26,7 +26,7 @@
import nvtabular.ops as ops
from nvtabular import DaskDataset, Workflow
-from tests.conftest import mycols_pq
+from tests.conftest import allcols_csv, mycols_csv, mycols_pq
# LocalCluster Client Fixture
client = None
@@ -57,16 +57,29 @@ def _dummy_op_logic(gdf, target_columns, _id="dummy", **kwargs):
return new_gdf
[email protected]("engine", ["parquet"])
[email protected]("part_mem_fraction", [0.01, None])
[email protected]("engine", ["parquet", "csv", "csv-no-header"])
@pytest.mark.parametrize("freq_threshold", [0, 5])
-def test_dask_workflow_api_dlrm(dask_cluster, tmpdir, datasets, freq_threshold, engine):
+def test_dask_workflow_api_dlrm(
+ dask_cluster, tmpdir, datasets, freq_threshold, part_mem_fraction, engine
+):
paths = glob.glob(str(datasets[engine]) + "/*." + engine.split("-")[0])
- df1 = cudf.read_parquet(paths[0])[mycols_pq]
- df2 = cudf.read_parquet(paths[1])[mycols_pq]
+ if engine == "parquet":
+ df1 = cudf.read_parquet(paths[0])[mycols_pq]
+ df2 = cudf.read_parquet(paths[1])[mycols_pq]
+ elif engine == "csv":
+ df1 = cudf.read_csv(paths[0], header=0)[mycols_csv]
+ df2 = cudf.read_csv(paths[1], header=0)[mycols_csv]
+ else:
+ df1 = cudf.read_csv(paths[0], names=allcols_csv)[mycols_csv]
+ df2 = cudf.read_csv(paths[1], names=allcols_csv)[mycols_csv]
df0 = cudf.concat([df1, df2], axis=0)
- cat_names = ["name-cat", "name-string"]
+ if engine == "parquet":
+ cat_names = ["name-cat", "name-string"]
+ else:
+ cat_names = ["name-string"]
cont_names = ["x", "y", "id"]
label_name = ["label"]
@@ -78,7 +91,10 @@ def test_dask_workflow_api_dlrm(dask_cluster, tmpdir, datasets, freq_threshold,
processor.add_preprocess(ops.Categorify(freq_threshold=freq_threshold, out_path=str(tmpdir)))
processor.finalize()
- dataset = DaskDataset(paths, engine)
+ if engine in ("parquet", "csv"):
+ dataset = DaskDataset(paths, part_mem_fraction=part_mem_fraction)
+ else:
+ dataset = DaskDataset(paths, names=allcols_csv, part_mem_fraction=part_mem_fraction)
processor.apply(dataset, output_path=str(tmpdir))
result = processor.get_ddf().compute()
@@ -90,12 +106,16 @@ def test_dask_workflow_api_dlrm(dask_cluster, tmpdir, datasets, freq_threshold,
# Check category counts
if freq_threshold == 0:
- assert len(df0["name-cat"].unique()) == len(result["name-cat"].unique())
+ if engine == "parquet":
+ assert len(df0["name-cat"].unique()) == len(result["name-cat"].unique())
assert len(df0["name-string"].unique()) == len(result["name-string"].unique())
df0["_count"] = cupy.ones(len(df0))
result["_count"] = cupy.ones(len(result))
- for col in ["name-cat", "name-string"]:
+ cat_list = ["name-string"]
+ if engine == "parquet":
+ cat_list = ["name-cat", "name-string"]
+ for col in cat_list:
expect = df0.groupby(col, dropna=False).count()["_count"].sort_values("_count")
got = result.groupby(col, dropna=False).count()["_count"].sort_values("_count")
assert_eq(expect, got, check_index=False)
| [FEA] Implement CSV engine for `DaskDataset`
<a href="https://github.com/rjzamora"><img src="https://avatars0.githubusercontent.com/u/20461013?v=4" align="left" width="96" height="96" hspace="10"></img></a> **Issue by [rjzamora](https://github.com/rjzamora)**
_Thursday May 28, 2020 at 17:00 GMT_
_Originally opened as https://github.com/rapidsai/recsys/issues/181_
----
**Is your feature request related to a problem? Please describe.**
#136 introduces a new `DaskDataset` API for managing IO needs within a `DaskWorkflow`. This API only supports `"parquet"` at the moment. A similar engine for "csv" needs to be implemented.
**Describe the solution you'd like**
We need a `CSVDatasetEngine` implementation in `nvtabular/dask/io.py`.
| 2020-06-09T16:25:24 |
|
NVIDIA-Merlin/NVTabular | 105 | NVIDIA-Merlin__NVTabular-105 | [
"103"
] | 02f99659d94a7203f7ee9932dcbeffbbc428a84b | diff --git a/nvtabular/dask/io.py b/nvtabular/dask/io.py
--- a/nvtabular/dask/io.py
+++ b/nvtabular/dask/io.py
@@ -17,6 +17,7 @@
import warnings
from collections import defaultdict
from io import BytesIO
+from uuid import uuid4
import cudf
import cupy
@@ -33,10 +34,14 @@
from dask.utils import natural_sort_key, parse_bytes
from fsspec.core import get_fs_token_paths
from fsspec.utils import stringify_path
-from pyarrow.compat import guid
from nvtabular.io import GPUDatasetIterator, _shuffle_gdf, device_mem_size
+try:
+ import pyarrow.dataset as pa_ds
+except ImportError:
+ pa_ds = False
+
class WriterCache:
def __init__(self):
@@ -80,6 +85,12 @@ def clean_pw_cache():
return
+def guid():
+ """ Simple utility function to get random hex string
+ """
+ return uuid4().hex
+
+
def _write_metadata(meta_list):
# TODO: Write _metadata file here (need to collect metadata)
return meta_list
@@ -238,27 +249,48 @@ class ParquetDatasetEngine(DatasetEngine):
Dask-based version of cudf.read_parquet.
"""
- def __init__(self, *args, row_groups_per_part=None):
+ def __init__(self, *args, row_groups_per_part=None, legacy=False):
# TODO: Improve dask_cudf.read_parquet performance so that
# this class can be slimmed down.
super().__init__(*args)
- self._metadata, self._base = self.get_metadata()
- self._pieces = None
+
+ if pa_ds and not legacy:
+ # Use pyarrow.dataset API for "newer" pyarrow versions.
+ # Note that datasets API cannot handle a directory path
+ # within a list.
+ if len(self.paths) == 1 and self.fs.isdir(self.paths[0]):
+ self.paths = self.paths[0]
+ self._legacy = False
+ self._pieces = None
+ self._metadata, self._base = defaultdict(int), ""
+ path0 = None
+ ds = pa_ds.dataset(self.paths, format="parquet")
+ # TODO: Allow filtering while accessing fragments.
+ # This will require us to specify specific row-group indices
+ for file_frag in ds.get_fragments():
+ if path0 is None:
+ path0 = file_frag.path
+ for rg_frag in file_frag.get_row_group_fragments():
+ self._metadata[rg_frag.path] += len(list(rg_frag.row_groups))
+ else:
+ # Use pq.ParquetDataset API for <0.17.1
+ self._legacy = True
+ self._metadata, self._base = self.get_metadata()
+ self._pieces = None
+ if row_groups_per_part is None:
+ file_path = self._metadata.row_group(0).column(0).file_path
+ path0 = (
+ self.fs.sep.join([self._base, file_path])
+ if file_path != ""
+ else self._base # This is a single file
+ )
+
if row_groups_per_part is None:
- # TODO: Use `total_byte_size` metadata if/when we figure out how to
- # correct for apparent dict encoding of cat/string columns.
- file_path = self._metadata.row_group(0).column(0).file_path
- path0 = (
- self.fs.sep.join([self._base, file_path])
- if file_path != ""
- else self._base # This is a single file
- )
rg_byte_size_0 = (
cudf.io.read_parquet(path0, row_group=0).memory_usage(deep=True, index=True).sum()
)
- self.row_groups_per_part = int(self.part_size / rg_byte_size_0)
- else:
- self.row_groups_per_part = int(row_groups_per_part)
+ row_groups_per_part = self.part_size / rg_byte_size_0
+ self.row_groups_per_part = int(row_groups_per_part)
assert self.row_groups_per_part > 0
@property
@@ -304,13 +336,19 @@ def get_metadata(self):
@annotate("get_pieces", color="green", domain="nvt_python")
def _get_pieces(self, metadata, data_path):
+
# get the number of row groups per file
- file_row_groups = defaultdict(int)
- for rg in range(metadata.num_row_groups):
- fpath = metadata.row_group(rg).column(0).file_path
- if fpath is None:
- raise ValueError("metadata is missing file_path string.")
- file_row_groups[fpath] += 1
+ if self._legacy:
+ file_row_groups = defaultdict(int)
+ for rg in range(metadata.num_row_groups):
+ fpath = metadata.row_group(rg).column(0).file_path
+ if fpath is None:
+ raise ValueError("metadata is missing file_path string.")
+ file_row_groups[fpath] += 1
+ else:
+ # We already have this for pyarrow.datasets
+ file_row_groups = metadata
+
# create pieces from each file, limiting the number of row_groups in each piece
pieces = []
for filename, row_group_count in file_row_groups.items():
@@ -374,7 +412,6 @@ def __init__(self, *args, **kwargs):
super().__init__(*args)
self._meta = {}
self.csv_kwargs = kwargs
- self.names = self.csv_kwargs.get("names", None)
# CSV reader needs a list of files
# (Assume flat directory structure if this is a dir)
if len(self.paths) == 1 and self.fs.isdir(self.paths[0]):
@@ -389,7 +426,7 @@ def to_iter(self, columns=None):
self.paths,
engine="csv",
gpu_memory_frac=part_mem_fraction,
- names=self.names,
+ names=self.csv_kwargs.get("names", None),
columns=columns,
)
return iter(itr)
| [DEP] Handle future pyarrow-API deprecations
**Describe the bug**
Note that this is not really a "bug" **yet**, but will be in the future :)
Recent/future efforts in pyarrow will eventually cause problems for some of our dask-based I/O logic. The critical changes include:
- Deprecation of [pyarrow.compat](https://github.com/apache/arrow/blob/cfe114478d0c90b0822aec1c8c24635c3d35330e/python/pyarrow/compat.py#L24), which both dask and NVTabuar use for "random" file naming.
- Replacement of the `ParquetDataset` API with a new [pyarrow.dataset](https://github.com/apache/arrow/blob/cfe114478d0c90b0822aec1c8c24635c3d35330e/python/pyarrow/dataset.py#L494) API. For now, much of the original functionality is still supported. However, it seems that the plan is to change the legacy API into a [thin dataset-API shim layer](https://github.com/apache/arrow/blob/cfe114478d0c90b0822aec1c8c24635c3d35330e/python/pyarrow/parquet.py#L1374) (and to ultimately stop supporting and deprecate `ParquetDataset`)
**TODO**:
- [ ] Stop using `pyarrow.compat` API (`guid` for file naming)
- [ ] Switch to `pyarrow.dataset` API for newer pyarrow versions (Allow `ParquetDataset` for `pyarrow<0.17`)
Note that these same steps need to be addressed in the upstream `dask`/`cudf` libraries, but those projects are waiting on a bit more functionality from the *new* API (`nvtabular` can make the changes sooner).
| 2020-06-19T18:07:44 |
||
NVIDIA-Merlin/NVTabular | 110 | NVIDIA-Merlin__NVTabular-110 | [
"27"
] | 590c350054485492e613e511ebade027735ec5e1 | diff --git a/examples/criteo_benchmark.py b/examples/criteo_benchmark.py
--- a/examples/criteo_benchmark.py
+++ b/examples/criteo_benchmark.py
@@ -40,7 +40,7 @@ def parse_args():
from nvtabular import Workflow
from nvtabular.io import GPUDatasetIterator, device_mem_size
-from nvtabular.ops import Categorify, LogOp, Normalize, ZeroFill
+from nvtabular.ops import Categorify, LogOp, Normalize, ZeroFill, get_embedding_size
from nvtabular.torch_dataloader import DLCollator, DLDataLoader, FileItrDataset
@@ -144,9 +144,7 @@ def parse_args():
embeddings = [
x[1]
- for x in proc.df_ops["Categorify"].get_emb_sz(
- proc.stats["categories"], proc.columns_ctx["categorical"]["base"]
- )
+ for x in get_embedding_size(proc.stats["categories"], proc.columns_ctx["categorical"]["base"])
]
print("Creating Iterators for dataloader")
start = time()
diff --git a/nvtabular/ops.py b/nvtabular/ops.py
--- a/nvtabular/ops.py
+++ b/nvtabular/ops.py
@@ -1154,7 +1154,6 @@ class Categorify(DFOperator):
Replaces the transformed column with the original input
if set Yes
cat_names :
- embed_sz :
"""
default_in = CAT
@@ -1171,7 +1170,6 @@ def __init__(
preprocessing=True,
replace=True,
cat_names=None,
- embed_sz=None,
out_path=None,
split_out=None,
na_sentinel=None,
@@ -1186,7 +1184,6 @@ def __init__(
self.gpu_mem_util_limit = gpu_mem_util_limit
self.gpu_mem_trans_use = gpu_mem_trans_use
self.cat_names = cat_names if cat_names else []
- self.embed_sz = embed_sz if embed_sz else {}
self.out_path = out_path or "./"
self.split_out = split_out
self.na_sentinel = na_sentinel or 0
@@ -1242,30 +1239,32 @@ def op_logic(self, gdf: cudf.DataFrame, target_columns: list, stats_context={}):
new_gdf[new_col] = new_gdf[new_col].astype(self.dtype, copy=False)
return new_gdf
- def get_emb_sz(self, encoders, cat_names):
- # sorted key required to ensure same sort occurs for all values
- ret_list = [
- (n, self.def_emb_sz(encoders, n))
- for n in sorted(cat_names, key=lambda entry: entry.split("_")[0])
- ]
- return ret_list
- def emb_sz_rule(self, n_cat: int) -> int:
- return min(16, round(1.6 * n_cat ** 0.56))
+def get_embedding_order(cat_names):
+ """ Returns a consistent sorder order for categorical variables
- def def_emb_sz(self, classes, n, sz_dict=None):
- """Pick an embedding size for `n` depending on `classes` if not given in `sz_dict`.
- """
- sz_dict = sz_dict if sz_dict else {}
- n_cat = classes[n]
- sz = sz_dict.get(n, int(self.emb_sz_rule(n_cat))) # rule of thumb
- self.embed_sz[n] = sz
- return n_cat, sz
-
- def get_embeddings(self, encoders, cat_names):
- embeddings = {}
- for col in sorted(cat_names):
- path = encoders[col]
- num_rows, _, _ = cudf.io.read_parquet_metadata(path)
- embeddings[col] = (num_rows, self.emb_sz_rule(num_rows))
- return embeddings
+ Parameters
+ -----------
+ cat_names : list of str
+ names of the categorical columns
+ """
+ return sorted(cat_names)
+
+
+def get_embedding_size(encoders, cat_names):
+ """ Returns a suggested size of embeddings based off cardinality of encoding categorical
+ variables
+ Parameters
+ -----------
+ encoders : dict
+ The encoding statistics of the categorical variables (ie. from workflow.stats["categories"])
+ cat_names : list of str
+ names of the categorical columns
+ """
+ # sorted key required to ensure same sort occurs for all values
+ ret_list = [(n, _emb_sz_rule(encoders[n])) for n in get_embedding_order(cat_names)]
+ return ret_list
+
+
+def _emb_sz_rule(n_cat: int) -> int:
+ return n_cat, int(min(16, round(1.6 * n_cat ** 0.56)))
diff --git a/nvtabular/torch_dataloader.py b/nvtabular/torch_dataloader.py
--- a/nvtabular/torch_dataloader.py
+++ b/nvtabular/torch_dataloader.py
@@ -21,6 +21,7 @@
from torch.utils.dlpack import from_dlpack
from nvtabular.io import GPUFileIterator
+from nvtabular.ops import get_embedding_order
class FileItrDataset(torch.utils.data.IterableDataset):
@@ -124,11 +125,7 @@ def create_tensors_plain(gdf, cat_cols=None, cont_cols=None, label_cols=None):
def combine_tensors(cats, conts, label):
- cats_list = (
- [cats[x] for x in sorted(cats.keys(), key=lambda entry: entry.split("_")[0])]
- if cats
- else None
- )
+ cats_list = [cats[x] for x in get_embedding_order(cats.keys())] if cats else None
conts_list = [conts[x] for x in sorted(conts.keys())] if conts else None
label_list = [label[x] for x in sorted(label.keys())] if label else None
@@ -152,12 +149,10 @@ def _one_df(
_to_tensor(gdf_label, torch.float32, label, to_cpu=False)
-def get_final_cols(preproc):
+def _get_final_cols(preproc):
if "cols" not in preproc.columns_ctx["final"]:
preproc.create_final_cols()
- cat_names = sorted(
- preproc.columns_ctx["final"]["cols"]["categorical"], key=lambda entry: entry.split("_")[0],
- )
+ cat_names = get_embedding_order(preproc.columns_ctx["final"]["cols"]["categorical"])
cont_names = sorted(preproc.columns_ctx["final"]["cols"]["continuous"])
label_name = sorted(preproc.columns_ctx["final"]["cols"]["label"])
return cat_names, cont_names, label_name
@@ -178,7 +173,7 @@ def process_one_df(
gdf = preproc.apply_ops(gdf)
if preproc:
- cat_names, cont_names, label_names = get_final_cols(preproc)
+ cat_names, cont_names, label_names = _get_final_cols(preproc)
_one_df(
gdf,
@@ -228,8 +223,7 @@ def __iter__(self):
for _ in range(self.num_chunks):
chunk = buff.get()
yield from TensorItr(
- chunk,
- batch_size=self.batch_size,
+ chunk, batch_size=self.batch_size,
)
def load_chunk(self, out):
| [FEA] Move or remove get_emb_sz method on Categorify op
<a href="https://github.com/alecgunny"><img src="https://avatars0.githubusercontent.com/u/14932242?v=4" align="left" width="96" height="96" hspace="10"></img></a> **Issue by [alecgunny](https://github.com/alecgunny)**
_Wednesday May 13, 2020 at 16:53 GMT_
_Originally opened as https://github.com/rapidsai/recsys/issues/121_
----
It's not entirely clear to me why `Categorify.get_emb_sz` and its associated helper methods are included as methods on the `Categorify` object, especially when nothing in the code actually uses attributes from the object itself (with the exception of the `self.embed_sz`, which gets set rather than read from):
```python
def get_emb_sz(self, encoders, cat_names):
work_in = {}
for key in encoders.keys():
work_in[key] = encoders[key] + 1
# sorted key required to ensure same sort occurs for all values
ret_list = [
(n, self.def_emb_sz(work_in, n))
for n in sorted(cat_names, key=lambda entry: entry.split("_")[0])
]
return ret_list
def emb_sz_rule(self, n_cat: int) -> int:
return min(16, round(1.6 * n_cat ** 0.56))
def def_emb_sz(self, classes, n, sz_dict=None):
"""Pick an embedding size for `n` depending on `classes` if not given in `sz_dict`.
"""
sz_dict = sz_dict if sz_dict else {}
n_cat = classes[n]
sz = sz_dict.get(n, int(self.emb_sz_rule(n_cat))) # rule of thumb
self.embed_sz[n] = sz
return n_cat, sz
```
While I'm personally of the opinion that it's not our library's job to be providing rules of thumb for building deep learning models, just to provide the data for whatever rules the user wants to use, if we're intent on having this function somewhere it seems like it would be better served as a standalone function of a single encoder or as a property of the encoder itself:
```python
emb_szs = [get_embed_sz(proc.stats["categories"][column]) for column in proc.columns_ctx["categorical"]["base"]]
# or
emb_szs = [proc.stats["categories"][column].embed_sz[1] for column in proc.columns_ctx["categorical"]["base"]]
```
| Note that solutions like
```python
emb_szs = [get_embed_sz(proc.stats["categories"][column]) for column in proc.columns_ctx["categorical"]["base"]]
```
will break compatibility with `torch_dataloader` (and possibly other modules too), which sort columns (see for example [this line](https://github.com/NVIDIA/NVTabular/blob/2e5f90bc47c96292484f210a39016918038a6911/nvtabular/torch_dataloader.py#L128) in `create_tensors`).
@benfred @alecgunny is there any agreed upon convention for column ordering? I'm wondering why `torch_dataloader` reorders columns, and whether this is done elsewhere, too.
If reordering is necessary, then I'll create a new issue to order the `proc.stats["categories"]` dictionary (among others), since as of CPython 3.6 (and more officially Python 3.7 in general) dictionaries are ordered.
Just realized that this same logic is present in `get_emb_sz` itself. Is this sorting convention necessary or is it something we can get rid of? Sorting according to this key seems odd and error-prone:
```
sorted(cat_names, key=lambda entry: entry.split("_")[0])
```
@rdipietro I think this is kind of a distinct issue, though certainly one worth noting. My point with the snippet
```
emb_szs = [get_embed_sz(proc.stats["categories"][column]) for column in proc.columns_ctx["categorical"]["base"]]
```
was simply to show what the call signature (i.e. `get_embed_sz(...)`) looks like. How you choose to iterate through it in a list is up to you (though obviously a good implementation will match how our dataloader orders things, and makes more critical exposing the means by which our dataloader does that ordering), but won't change the underlying function call.
While I'm thinking about this, I think it's probably worth discussing the choice of the syntax `embed_sz`. I know it matches what FastAI uses, but as we begin to expand beyond it as the target use case, I think abandoning some of its more idiosyncratic usage of abbreviations is worth doing for the sake of clarity. | 2020-06-23T23:44:58 |
|
NVIDIA-Merlin/NVTabular | 151 | NVIDIA-Merlin__NVTabular-151 | [
"150"
] | 438e96b4fb9078d050a7108380a8cbd7a13d1da7 | diff --git a/nvtabular/categorify.py b/nvtabular/categorify.py
--- a/nvtabular/categorify.py
+++ b/nvtabular/categorify.py
@@ -191,7 +191,9 @@ def _write_uniques(dfs, base_path, col, on_host):
# Make sure first category is Null
df = df.sort_values(col, na_position="first")
if not df[col]._column.has_nulls:
- df = cudf.DataFrame({col: _concat([cudf.Series([None]), df[col]], ignore_index)})
+ df = cudf.DataFrame(
+ {col: _concat([cudf.Series([None], dtype=df[col].dtype), df[col]], ignore_index)}
+ )
df.to_parquet(path, write_index=False, compression=None)
else:
df_null = cudf.DataFrame({col: [None]})
diff --git a/nvtabular/io.py b/nvtabular/io.py
--- a/nvtabular/io.py
+++ b/nvtabular/io.py
@@ -17,6 +17,7 @@
import io
import json
import logging
+import math
import os
import queue
import threading
@@ -37,7 +38,6 @@
from dask.base import tokenize
from dask.dataframe.core import new_dd_object
from dask.dataframe.io.parquet.utils import _analyze_paths
-from dask.dataframe.utils import group_split_dispatch
from dask.distributed import get_worker
from dask.utils import natural_sort_key, parse_bytes
from fsspec.core import get_fs_token_paths
@@ -617,30 +617,18 @@ def _write_metadata(meta_list):
@annotate("write_output_partition", color="green", domain="nvt_python")
def _write_output_partition(gdf, processed_path, shuffle, out_files_per_proc, fs):
gdf_size = len(gdf)
- if shuffle == "full":
- # Dont need a real sort if we are doing in memory later
- typ = np.min_scalar_type(out_files_per_proc * 2)
- ind = cp.random.choice(cp.arange(out_files_per_proc, dtype=typ), gdf_size)
- result = group_split_dispatch(gdf, ind, out_files_per_proc, ignore_index=True)
- del ind
- del gdf
- # Write each split to a separate file
- for s, df in result.items():
- prefix = fs.sep.join([processed_path, "split." + str(s)])
- pw = get_cache().get_pq_writer(prefix, s, mem=True)
- pw.write_table(df)
- else:
+ out_files_per_proc = out_files_per_proc or 1
+ if shuffle and shuffle != "full":
# We should do a real sort here
- if shuffle == "partial":
- gdf = _shuffle_gdf(gdf, gdf_size=gdf_size)
- splits = list(range(0, gdf_size, int(gdf_size / out_files_per_proc)))
- if splits[-1] < gdf_size:
- splits.append(gdf_size)
- # Write each split to a separate file
- for s in range(0, len(splits) - 1):
- prefix = fs.sep.join([processed_path, "split." + str(s)])
- pw = get_cache().get_pq_writer(prefix, s, mem=False)
- pw.write_table(gdf.iloc[splits[s] : splits[s + 1]])
+ gdf = _shuffle_gdf(gdf, gdf_size=gdf_size)
+ split_size = math.ceil(gdf_size / out_files_per_proc)
+ ind = cp.ones(gdf_size).cumsum() // split_size
+ del ind
+ for s in range(out_files_per_proc):
+ prefix = fs.sep.join([processed_path, "split." + str(s)])
+ pw = get_cache().get_pq_writer(prefix, s, mem=(shuffle == "full"))
+ pw.write_table(gdf.iloc[s * split_size : (s + 1) * split_size])
+
return gdf_size # TODO: Make this metadata
diff --git a/nvtabular/workflow.py b/nvtabular/workflow.py
--- a/nvtabular/workflow.py
+++ b/nvtabular/workflow.py
@@ -705,10 +705,17 @@ def apply(
record_stats : boolean
record the stats in file or not. Only available
for apply_offline=True
- shuffle : boolean
- shuffles the data or not
+ shuffle : {"full", "partial", None}
+ Whether to shuffle output dataset. "partial" means
+ each worker will randomly shuffle data into a number
+ (`out_files_per_proc`) of different output files as the data
+ is processed. The output files are distinctly mapped to
+ each worker process. "full" means the workers will perform the
+ "partial" shuffle into BytesIO files, and then perform
+ a full shuffle of each in-memory file before writing to
+ disk. A "true" full shuffle is not yet implemented.
output_path : string
- path to export stats
+ path to output data
out_files_per_proc : integer
number of files to create (per process) after
shuffling the data
@@ -748,7 +755,7 @@ def apply(
if shuffle:
if isinstance(shuffle, str):
raise ValueError("TODO: Align shuffling/writing API for online/offline.")
- shuffler = nvt_io.Shuffler(output_path, num_out_files=num_out_files)
+ shuffler = nvt_io.Shuffler(output_path, num_out_files=out_files_per_proc)
if hugectr_gen_output:
self.cal_col_names = False
if hugectr_output_format == "binary":
@@ -823,11 +830,9 @@ def update_stats(
def to_dataset(self, output_path, shuffle=None, out_files_per_proc=None):
ddf = self.get_ddf()
- out_files_per_proc = out_files_per_proc or 1
fs = get_fs_token_paths(output_path)[0]
fs.mkdirs(output_path, exist_ok=True)
-
- if shuffle:
+ if shuffle or out_files_per_proc:
name = "write-processed"
write_name = name + tokenize(ddf, shuffle, out_files_per_proc)
task_list = []
| diff --git a/tests/unit/test_workflow.py b/tests/unit/test_workflow.py
--- a/tests/unit/test_workflow.py
+++ b/tests/unit/test_workflow.py
@@ -16,9 +16,11 @@
import glob
import math
+import os
import cudf
import numpy as np
+import pandas as pd
import pytest
from cudf.tests.utils import assert_eq
from pandas.api.types import is_integer_dtype
@@ -90,7 +92,7 @@ def get_norms(tar: cudf.Series):
processor.write_to_dataset(tmpdir, dataset, nfiles=10, shuffle=True, apply_ops=True)
dataset_2 = Dataset(
- glob.glob(str(tmpdir) + "/ds_part.*.parquet"), part_mem_fraction=gpu_memory_frac,
+ glob.glob(str(tmpdir) + "/ds_part.*.parquet"), part_mem_fraction=gpu_memory_frac
)
df_pp = cudf.concat(list(dataset_2.to_iter()), axis=0)
@@ -166,7 +168,7 @@ def get_norms(tar: cudf.Series):
processor.write_to_dataset(tmpdir, dataset, nfiles=10, shuffle=True, apply_ops=True)
dataset_2 = Dataset(
- glob.glob(str(tmpdir) + "/ds_part.*.parquet"), part_mem_fraction=gpu_memory_frac,
+ glob.glob(str(tmpdir) + "/ds_part.*.parquet"), part_mem_fraction=gpu_memory_frac
)
df_pp = cudf.concat(list(dataset_2.to_iter()), axis=0)
@@ -250,7 +252,7 @@ def get_norms(tar: cudf.Series):
processor.write_to_dataset(tmpdir, dataset, nfiles=10, shuffle=True, apply_ops=True)
dataset_2 = Dataset(
- glob.glob(str(tmpdir) + "/ds_part.*.parquet"), part_mem_fraction=gpu_memory_frac,
+ glob.glob(str(tmpdir) + "/ds_part.*.parquet"), part_mem_fraction=gpu_memory_frac
)
df_pp = cudf.concat(list(dataset_2.to_iter()), axis=0)
@@ -261,3 +263,27 @@ def get_norms(tar: cudf.Series):
num_rows, num_row_groups, col_names = cudf.io.read_parquet_metadata(str(tmpdir) + "/_metadata")
assert num_rows == len(df_pp)
+
+
[email protected]("shuffle", ["full", "partial", None])
+def test_output_count(tmpdir, shuffle):
+ out_files_per_proc = 2
+ out_path = os.path.join(tmpdir, "processed")
+ path = os.path.join(tmpdir, "simple.parquet")
+
+ df = pd.DataFrame({"a": np.arange(25)})
+ df.to_parquet(path, row_group_size=5, engine="pyarrow")
+
+ columns = ["a"]
+ dataset = nvt.Dataset(path, engine="parquet", row_groups_per_part=1)
+ processor = nvt.Workflow(cat_names=[], cont_names=columns, label_name=[])
+
+ processor.add_preprocess(ops.Normalize())
+ processor.finalize()
+
+ processor.update_stats(
+ dataset, output_path=out_path, shuffle=shuffle, out_files_per_proc=out_files_per_proc
+ )
+ result = glob.glob(os.path.join(out_path, "*"))
+
+ assert len(result) == out_files_per_proc
| [BUG] Output-file count is incorrect for shuffle="partial"
**Describe the bug**
As originally discovered by @rnyak, there seems to be an extra output file written for `shuffle="partial"`. That is, when using a single process with `out_files_per_proc=2`, there are **3** output files written.
**Steps/Code to reproduce bug**
```python
import nvtabular as nvt
import pandas as pd
import numpy as np
!rm -rf test.parquet processed
!mkdir processed
path = "test.parquet"
out_path = "processed/"
out_files_per_proc = 2
size = 25
df = pd.DataFrame({"a": np.arange(size)})
df.to_parquet(path, row_group_size=5, engine="pyarrow")
columns = ["a"]
dataset = nvt.Dataset(
path, engine="parquet", row_groups_per_part=1
)
processor = nvt.Workflow(
cat_names=[], cont_names=columns, label_name=[]
)
processor.add_preprocess(nvt.ops.Normalize())
processor.finalize()
processor.update_stats(
dataset,
output_path=out_path,
shuffle="partial",
out_files_per_proc=out_files_per_proc,
)
!ls processed
```
**Output has 3 files (should be 2)**:
```
split.0.459d88bc3f52405bae6fd572570fd452.parquet
split.1.ff7fd6b1f7d44504b24351afd8b43d4c.parquet
split.2.f6cfed77c5a848e998c60b2d2613a4dd.parquet
```
**Expected behavior**
The `out_files_per_proc` parameter should result in the expected number of output files.
| 2020-07-14T19:50:20 |
|
NVIDIA-Merlin/NVTabular | 155 | NVIDIA-Merlin__NVTabular-155 | [
"59"
] | 6a211fc9abdb8a4c6062c5bac49fb0ea6cf19eef | diff --git a/nvtabular/io.py b/nvtabular/io.py
--- a/nvtabular/io.py
+++ b/nvtabular/io.py
@@ -39,7 +39,9 @@
from dask.base import tokenize
from dask.dataframe.core import new_dd_object
from dask.dataframe.io.parquet.utils import _analyze_paths
+from dask.delayed import Delayed
from dask.distributed import get_worker
+from dask.highlevelgraph import HighLevelGraph
from dask.utils import natural_sort_key, parse_bytes
from fsspec.core import get_fs_token_paths
from fsspec.utils import stringify_path
@@ -612,17 +614,21 @@ def clean_pw_cache():
return
+def close_cached_pw(fs):
+ md_dict = {}
+ with get_cache() as cache:
+ for path, (pw, bio) in cache.pq_writer_cache.items():
+ fn = bio.split(fs.sep)[-1]
+ md_dict[fn] = pw.close(metadata_file_path=fn)
+ return md_dict
+
+
def guid():
""" Simple utility function to get random hex string
"""
return uuid4().hex
-def _write_metadata(meta_list):
- # TODO: Write _metadata file here (need to collect metadata)
- return meta_list
-
-
@annotate("write_output_partition", color="green", domain="nvt_python")
def _write_output_partition(gdf, processed_path, shuffle, out_files_per_proc, fs):
gdf_size = len(gdf)
@@ -639,13 +645,42 @@ def _write_output_partition(gdf, processed_path, shuffle, out_files_per_proc, fs
pw = cache.get_pq_writer(prefix, s, mem=(shuffle == "full"))
pw.write_table(gdf.iloc[s * split_size : (s + 1) * split_size])
- return gdf_size # TODO: Make this metadata
+ return gdf_size
+
+
+def _to_parquet_dataset(ddf, fs, output_path, shuffle, out_files_per_proc):
+ name = "write-processed"
+ write_name = name + tokenize(ddf, shuffle, out_files_per_proc)
+ task_list = []
+ dsk = {}
+ for idx in range(ddf.npartitions):
+ key = (write_name, idx)
+ dsk[key] = (
+ _write_output_partition,
+ (ddf._name, idx),
+ output_path,
+ shuffle,
+ out_files_per_proc,
+ fs,
+ )
+ task_list.append(key)
+ dsk[name] = (lambda x: x, task_list)
+ graph = HighLevelGraph.from_collections(name, dsk, dependencies=[ddf])
+ return Delayed(name, graph)
+
+
+def _write_pq_metadata_file(md_list, fs, path):
+ if md_list:
+ metadata_path = fs.sep.join([path, "_metadata"])
+ _meta = cudf.io.merge_parquet_filemetadata(md_list) if len(md_list) > 1 else md_list[0]
+ with fs.open(metadata_path, "wb") as fil:
+ _meta.tofile(fil)
+ return
@annotate("worker_shuffle", color="green", domain="nvt_python")
def _worker_shuffle(processed_path, fs):
- paths = []
-
+ metadata_dict = {}
with get_cache() as cache:
for path, (pw, bio) in cache.pq_writer_cache.items():
pw.close()
@@ -656,9 +691,10 @@ def _worker_shuffle(processed_path, fs):
gdf = _shuffle_gdf(gdf)
rel_path = "shuffled.%s.parquet" % (guid())
full_path = fs.sep.join([processed_path, rel_path])
- gdf.to_parquet(full_path, compression=None, index=False)
- paths.append(full_path)
- return paths
+ metadata_dict[rel_path] = gdf.to_parquet(
+ full_path, compression=None, index=False, metadata_file_path=rel_path
+ )
+ return metadata_dict
class Dataset:
diff --git a/nvtabular/workflow.py b/nvtabular/workflow.py
--- a/nvtabular/workflow.py
+++ b/nvtabular/workflow.py
@@ -22,9 +22,7 @@
import dask
import yaml
from cudf._lib.nvtx import annotate
-from dask.base import tokenize
-from dask.delayed import Delayed
-from dask.highlevelgraph import HighLevelGraph
+from dask.utils import natural_sort_key
from fsspec.core import get_fs_token_paths
import nvtabular.io as nvt_io
@@ -833,48 +831,58 @@ def to_dataset(self, output_path, shuffle=None, out_files_per_proc=None):
fs = get_fs_token_paths(output_path)[0]
fs.mkdirs(output_path, exist_ok=True)
if shuffle or out_files_per_proc:
- name = "write-processed"
- write_name = name + tokenize(ddf, shuffle, out_files_per_proc)
- task_list = []
- dsk = {}
- for idx in range(ddf.npartitions):
- key = (write_name, idx)
- dsk[key] = (
- nvt_io._write_output_partition,
- (ddf._name, idx),
- output_path,
- shuffle,
- out_files_per_proc,
- fs,
- )
- task_list.append(key)
- dsk[name] = (nvt_io._write_metadata, task_list)
- graph = HighLevelGraph.from_collections(name, dsk, dependencies=[ddf])
- out = Delayed(name, graph)
- # Would also be nice to clean the categorical
+ # Construct graph for Dask-based dataset write
+ out = nvt_io._to_parquet_dataset(ddf, fs, output_path, shuffle, out_files_per_proc)
+
+ # Would be nice to clean the categorical
# cache before the write (TODO)
- # Trigger the Dask-based write and do a
- # full (per-worker) shuffle if requested
+ # Trigger write execution
if self.client:
self.client.cancel(self.ddf_base_dataset)
self.ddf_base_dataset = None
out = self.client.compute(out).result()
- if shuffle == "full":
- self.client.cancel(self.ddf)
- self.ddf = None
- self.client.run(nvt_io._worker_shuffle, output_path, fs)
- self.client.run(nvt_io.clean_pw_cache)
else:
self.ddf_base_dataset = None
out = dask.compute(out, scheduler="synchronous")[0]
- if shuffle == "full":
+
+ # Deal with "full" (per-worker) shuffle here.
+ if shuffle == "full":
+ if self.client:
+ self.client.cancel(self.ddf)
self.ddf = None
- nvt_io._worker_shuffle(output_path, fs)
+ worker_md = self.client.run(nvt_io._worker_shuffle, output_path, fs)
+ worker_md = list(collections.ChainMap(*worker_md.values()).items())
+ else:
+ self.ddf = None
+ worker_md = nvt_io._worker_shuffle(output_path, fs)
+ worker_md = list(worker_md.items())
+
+ else:
+ # Collect parquet metadata while closing
+ # ParquetWriter object(s)
+ if self.client:
+ worker_md = self.client.run(nvt_io.close_cached_pw, fs)
+ worker_md = list(collections.ChainMap(*worker_md.values()).items())
+ else:
+ worker_md = nvt_io.close_cached_pw(fs)
+ worker_md = list(worker_md.items())
+
+ # Sort metadata by file name and convert list of
+ # tuples to a list of metadata byte-blobs
+ md_list = [m[1] for m in sorted(worker_md, key=lambda x: natural_sort_key(x[0]))]
+
+ # Aggregate metadata and write _metadata file
+ nvt_io._write_pq_metadata_file(md_list, fs, output_path)
+
+ # Close ParquetWriter Objects
+ if self.client:
+ self.client.run(nvt_io.clean_pw_cache)
+ else:
nvt_io.clean_pw_cache()
- return out
+ return
# Default (shuffle=False): Just use dask_cudf.to_parquet
fut = ddf.to_parquet(output_path, compression=None, write_index=False, compute=False)
| diff --git a/tests/unit/test_workflow.py b/tests/unit/test_workflow.py
--- a/tests/unit/test_workflow.py
+++ b/tests/unit/test_workflow.py
@@ -266,24 +266,38 @@ def get_norms(tar: cudf.Series):
@pytest.mark.parametrize("shuffle", ["full", "partial", None])
-def test_output_count(tmpdir, shuffle):
[email protected]("use_client", [True, False])
+def test_parquet_output(client, use_client, tmpdir, shuffle):
out_files_per_proc = 2
- out_path = os.path.join(tmpdir, "processed")
- path = os.path.join(tmpdir, "simple.parquet")
+ n_workers = len(client.cluster.workers) if use_client else 1
+ out_path = str(tmpdir.mkdir("processed"))
+ path = str(tmpdir.join("simple.parquet"))
- df = pd.DataFrame({"a": np.arange(25)})
- df.to_parquet(path, row_group_size=5, engine="pyarrow")
+ size = 25
+ row_group_size = 5
+ df = pd.DataFrame({"a": np.arange(size)})
+ df.to_parquet(path, row_group_size=row_group_size, engine="pyarrow")
columns = ["a"]
dataset = nvt.Dataset(path, engine="parquet", row_groups_per_part=1)
- processor = nvt.Workflow(cat_names=[], cont_names=columns, label_name=[])
-
+ processor = nvt.Workflow(
+ cat_names=[], cont_names=columns, label_name=[], client=client if use_client else None
+ )
processor.add_preprocess(ops.Normalize())
processor.finalize()
-
processor.update_stats(
dataset, output_path=out_path, shuffle=shuffle, out_files_per_proc=out_files_per_proc
)
- result = glob.glob(os.path.join(out_path, "*"))
- assert len(result) == out_files_per_proc
+ # Check that the number of output files is correct
+ result = glob.glob(os.path.join(out_path, "*.parquet"))
+ assert len(result) == out_files_per_proc * n_workers
+
+ # Make sure _metadata exists
+ meta_path = os.path.join(out_path, "_metadata")
+ assert os.path.exists(meta_path)
+
+ # Make sure _metadata makes sense
+ _metadata = cudf.io.read_parquet_metadata(meta_path)
+ assert _metadata[0] == size
+ assert _metadata[2] == columns
| [FEA] Write shared "_metadata" file for output parquet data in DaskWorkflow
<a href="https://github.com/rjzamora"><img src="https://avatars0.githubusercontent.com/u/20461013?v=4" align="left" width="96" height="96" hspace="10"></img></a> **Issue by [rjzamora](https://github.com/rjzamora)**
_Thursday May 28, 2020 at 16:43 GMT_
_Originally opened as https://github.com/rapidsai/recsys/issues/179_
----
**Is your feature request related to a problem? Please describe.**
The `DaskWorkflow` implemntation introduced in #136 does not currently write a shared parquet `"_metadata"` file when the `to_dataset` API is used. This is because of the limitation discussed in [cudf#284](https://github.com/rapidsai/cudf/issues/5284). Once that issue is resolved upstream, the `DaskWorkflow` code should be modified to write the shared file (having this file is a significant performance boost if/when dask is used to read the dataset back in).
**Describe the solution you'd like**
Once the upstream changes are made, metadata file creation will look very similar to [this existing cudf method](https://github.com/rapidsai/cudf/blob/56769d4755091e3fe892fcdd020bd1e2a9a19cfe/python/dask_cudf/dask_cudf/io/parquet.py#L130).
**Describe alternatives you've considered**
The alternative is to skip `"_metadata"` creation, or to create the shared file with pyarrow after the fact (requiring a followup python process to parse all the footer metadata after it is written).
| 2020-07-16T17:44:13 |
|
NVIDIA-Merlin/NVTabular | 158 | NVIDIA-Merlin__NVTabular-158 | [
"51"
] | a58b75896ee14f5ff706645d859b2ec24b66fc5c | diff --git a/nvtabular/ds_writer.py b/nvtabular/ds_writer.py
deleted file mode 100644
--- a/nvtabular/ds_writer.py
+++ /dev/null
@@ -1,120 +0,0 @@
-#
-# Copyright (c) 2020, NVIDIA CORPORATION.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-import glob
-import os
-
-import cudf
-import cupy as cp
-import numpy as np
-import pyarrow.parquet as pq
-
-
-class FileIterator:
- def __init__(self, path, nfiles, shuffle=True, **kwargs):
- self.path = path
- self.nfiles = nfiles
- self.shuffle = shuffle
- self.ind = 0
- self.inds = np.arange(self.nfiles)
- if self.shuffle:
- np.random.shuffle(self.inds)
-
- def __iter__(self):
- self.ind = 0
- self.inds = np.arange(self.nfiles)
- if self.shuffle:
- np.random.shuffle(self.inds)
- return self
-
- def __next__(self):
- if self.ind >= self.nfiles:
- raise StopIteration
- self.ind += 1
- # if self.name, return that naming convention.
- return "%s/ds_part.%d.parquet" % (self.path, self.ind - 1)
-
-
-class DatasetWriter:
- def __init__(self, path, nfiles=1, **kwargs):
- self.path = path
- self.nfiles = nfiles
- self.writers = {fn: None for fn in FileIterator(path, nfiles)}
- self.shared_meta_path = str(path) + "/_metadata"
- self.metadata = None
- self.new_metadata = {fn: [] for fn in FileIterator(path, nfiles)}
-
- # Check for _metadata
- metafile = glob.glob(self.shared_meta_path)
- if metafile:
- self.metadata = pq.ParquetDataset(metafile[0]).metadata
-
- def write(self, gdf, shuffle=True):
-
- # Shuffle the dataframe
- gdf_size = len(gdf)
- if shuffle:
- sort_key = "__sort_index__"
- arr = cp.arange(gdf_size)
- cp.random.shuffle(arr)
- gdf[sort_key] = cudf.Series(arr)
- gdf = gdf.sort_values(sort_key).drop(columns=[sort_key])
-
- # Write to
- chunk_size = int(gdf_size / self.nfiles)
- for i, fn in enumerate(FileIterator(self.path, self.nfiles)):
- s1 = i * chunk_size
- s2 = (i + 1) * chunk_size
- if i == (self.nfiles - 1):
- s2 = gdf_size
- chunk = gdf[s1:s2]
- pa_table = chunk.to_arrow()
- if self.writers[fn] is None:
- self.writers[fn] = pq.ParquetWriter(
- fn, pa_table.schema, metadata_collector=self.new_metadata[fn],
- )
- self.writers[fn].write_table(pa_table)
-
- def write_metadata(self):
- self.close_writers() # Writers must be closed to get metadata
- fns = [fn for fn in FileIterator(self.path, self.nfiles, shuffle=False)]
- if self.metadata is not None:
- _meta = self.metadata
- i_start = 0
- else:
- _meta = self.new_metadata[fns[0]]
- if _meta:
- _meta = _meta[0]
- i_start = 1
- for i in range(i_start, len(fns)):
- _meta_new = self.new_metadata[fns[i]]
- if _meta_new:
- _meta.append_row_groups(_meta_new[0])
- with open(self.shared_meta_path, "wb") as fil:
- _meta.write_metadata_file(fil)
- self.metadata = _meta
- return
-
- def close_writers(self):
- for fn, writer in self.writers.items():
- if writer is not None:
- writer.close()
- # Set row-group file paths
- self.new_metadata[fn][0].set_file_path(os.path.basename(fn))
- writer = None
-
- def __del__(self):
- self.close_writers()
diff --git a/nvtabular/io.py b/nvtabular/io.py
--- a/nvtabular/io.py
+++ b/nvtabular/io.py
@@ -14,6 +14,7 @@
# limitations under the License.
#
+import collections
import contextlib
import functools
import io
@@ -55,6 +56,80 @@
LOG = logging.getLogger("nvtabular")
+
+#
+# Cache-Specific Code
+#
+
+
+class WriterCache:
+ def __init__(self):
+ self.pq_writer_cache = {}
+
+ def __del__(self):
+ for path, (pw, fpath) in self.pq_writer_cache.items():
+ pw.close()
+
+ def get_pq_writer(self, prefix, s, mem):
+ pw, fil = self.pq_writer_cache.get(prefix, (None, None))
+ if pw is None:
+ if mem:
+ fil = BytesIO()
+ pw = pwriter(fil, compression=None)
+ self.pq_writer_cache[prefix] = (pw, fil)
+ else:
+ outfile_id = guid() + ".parquet"
+ full_path = ".".join([prefix, outfile_id])
+ pw = pwriter(full_path, compression=None)
+ self.pq_writer_cache[prefix] = (pw, full_path)
+ return pw
+
+
[email protected]
+def get_cache():
+ with _DEFAULT_CACHE_LOCK:
+ yield _get_cache()
+
+
+def _get_cache():
+ try:
+ worker = get_worker()
+ except ValueError:
+ # There is no dask.distributed worker.
+ # Assume client/worker are same process
+ global _DEFAULT_CACHE
+ if _DEFAULT_CACHE is None:
+ _DEFAULT_CACHE = WriterCache()
+ return _DEFAULT_CACHE
+ if not hasattr(worker, "pw_cache"):
+ worker.pw_cache = WriterCache()
+ return worker.pw_cache
+
+
+def clean_pw_cache():
+ with _DEFAULT_CACHE_LOCK:
+ try:
+ worker = get_worker()
+ except ValueError:
+ global _DEFAULT_CACHE
+ if _DEFAULT_CACHE is not None:
+ del _DEFAULT_CACHE
+ _DEFAULT_CACHE = None
+ return
+ if hasattr(worker, "pw_cache"):
+ del worker.pw_cache
+ return
+
+
+def close_cached_pw(fs):
+ md_dict = {}
+ with get_cache() as cache:
+ for path, (pw, bio) in cache.pq_writer_cache.items():
+ fn = bio.split(fs.sep)[-1]
+ md_dict[fn] = pw.close(metadata_file_path=fn)
+ return md_dict
+
+
#
# Helper Function definitions
#
@@ -82,6 +157,15 @@ def _get_read_engine(engine, file_path, **kwargs):
raise ValueError("Unrecognized read engine.")
+def _shuffle_gdf(gdf, gdf_size=None):
+ """ Shuffles a cudf dataframe, returning a new dataframe with randomly
+ ordered rows """
+ gdf_size = gdf_size or len(gdf)
+ arr = cp.arange(gdf_size)
+ cp.random.shuffle(arr)
+ return gdf.iloc[arr]
+
+
#
# GPUFileReader Base Class
#
@@ -315,13 +399,19 @@ def _set_dtypes(self, chunk):
chunk[col] = chunk[col].astype(dtype)
-def _shuffle_gdf(gdf, gdf_size=None):
- """ Shuffles a cudf dataframe, returning a new dataframe with randomly
- ordered rows """
- gdf_size = gdf_size or len(gdf)
- arr = cp.arange(gdf_size)
- cp.random.shuffle(arr)
- return gdf.iloc[arr]
+def writer_factory(output_format, output_path, out_files_per_proc, shuffle):
+ if output_format is None:
+ return None
+
+ if output_format == "parquet":
+ writer_cls = ParquetWriter
+ elif output_format == "hugectr":
+ writer_cls = HugeCTRWriter
+ else:
+ raise ValueError("Output format not yet supported.")
+
+ fs = get_fs_token_paths(output_path)[0]
+ return writer_cls(output_path, num_out_files=out_files_per_proc, shuffle=shuffle, fs=fs)
class Writer:
@@ -331,32 +421,39 @@ def __init__(self):
def add_data(self, gdf):
raise NotImplementedError()
- def close(self):
- pass
-
+ def package_general_metadata(self):
+ raise NotImplementedError()
-class Shuffler(Writer):
- def __init__(
- self, out_dir, num_out_files=30, num_threads=4, cats=None, conts=None, labels=None
- ):
- self.writer = ParquetWriter(out_dir, num_out_files, num_threads, cats, conts, labels)
+ @classmethod
+ def write_general_metadata(cls, data, fs, out_dir):
+ raise NotImplementedError()
- def add_data(self, gdf):
- self.writer.add_data(_shuffle_gdf(gdf))
+ @classmethod
+ def write_special_metadata(cls, data, fs, out_dir):
+ raise NotImplementedError()
def close(self):
- self.writer.close()
+ pass
class ThreadedWriter(Writer):
def __init__(
- self, out_dir, num_out_files=30, num_threads=4, cats=None, conts=None, labels=None
+ self,
+ out_dir,
+ num_out_files=30,
+ num_threads=4,
+ cats=None,
+ conts=None,
+ labels=None,
+ shuffle=None,
+ fs=None,
):
# set variables
self.out_dir = out_dir
self.cats = cats
self.conts = conts
self.labels = labels
+ self.shuffle = shuffle
self.column_names = None
if labels and conts:
self.column_names = labels + conts
@@ -367,7 +464,11 @@ def __init__(
self.num_out_files = num_out_files
self.num_samples = [0] * num_out_files
- self.data_files = None
+ self.data_paths = None
+ self.need_cal_col_names = True
+
+ # Resolve file system
+ self.fs = fs or get_fs_token_paths(str(out_dir))[0]
# create thread queue and locks
self.queue = queue.Queue(num_threads)
@@ -392,6 +493,11 @@ def _write_thread(self):
@annotate("add_data", color="orange", domain="nvt_python")
def add_data(self, gdf):
+
+ # Shuffle if necessary
+ if self.shuffle:
+ gdf = _shuffle_gdf(gdf)
+
# Populate columns idxs
if not self.col_idx:
for i, x in enumerate(gdf.columns.values):
@@ -413,17 +519,59 @@ def add_data(self, gdf):
# wait for all writes to finish before exitting (so that we aren't using memory)
self.queue.join()
- def _write_metadata(self):
- return
+ def package_general_metadata(self):
+ data = {}
+ if self.cats is None:
+ return data
+ data["data_paths"] = self.data_paths
+ data["file_stats"] = []
+ for i, path in enumerate(self.data_paths):
+ fn = path.split(self.fs.sep)[-1]
+ data["file_stats"].append({"file_name": fn, "num_rows": self.num_samples[i]})
+ # cats
+ data["cats"] = []
+ for c in self.cats:
+ data["cats"].append({"col_name": c, "index": self.col_idx[c]})
+ # conts
+ data["conts"] = []
+ for c in self.conts:
+ data["conts"].append({"col_name": c, "index": self.col_idx[c]})
+ # labels
+ data["labels"] = []
+ for c in self.labels:
+ data["labels"].append({"col_name": c, "index": self.col_idx[c]})
+
+ return data
+
+ @classmethod
+ def write_general_metadata(cls, data, fs, out_dir):
+ if not data:
+ return
+ data_paths = data.pop("data_paths", [])
+ num_out_files = len(data_paths)
- def _write_filelist(self):
- file_list_writer = open(os.path.join(self.out_dir, "file_list.txt"), "w")
- file_list_writer.write(str(self.num_out_files) + "\n")
- for f in self.data_files:
+ # Write file_list
+ file_list_writer = fs.open(fs.sep.join([out_dir, "file_list.txt"]), "w")
+ file_list_writer.write(str(num_out_files) + "\n")
+ for f in data_paths:
file_list_writer.write(f + "\n")
file_list_writer.close()
- def close(self):
+ # Write metadata json
+ metadata_writer = fs.open(fs.sep.join([out_dir, "metadata.json"]), "w")
+ json.dump(data, metadata_writer)
+ metadata_writer.close()
+
+ @classmethod
+ def write_special_metadata(cls, data, fs, out_dir):
+ pass
+
+ def _close_writers(self):
+ for writer in self.data_writers:
+ writer.close()
+ return None
+
+ def close(self, write_metadata=True):
# wake up all the worker threads and signal for them to exit
for _ in range(self.num_threads):
self.queue.put(self._eod)
@@ -431,21 +579,25 @@ def close(self):
# wait for pending writes to finish
self.queue.join()
- self._write_filelist()
- self._write_metadata()
-
- # Close writers
- for writer in self.data_writers:
- writer.close()
+ # Close writers and collect various metadata
+ _gen_meta = self.package_general_metadata()
+ _special_meta = self._close_writers()
+ return _gen_meta, _special_meta
class ParquetWriter(ThreadedWriter):
- def __init__(
- self, out_dir, num_out_files=30, num_threads=4, cats=None, conts=None, labels=None
- ):
- super().__init__(out_dir, num_out_files, num_threads, cats, conts, labels)
- self.data_files = [os.path.join(out_dir, f"{i}.parquet") for i in range(num_out_files)]
- self.data_writers = [pwriter(f, compression=None) for f in self.data_files]
+ def __init__(self, out_dir, use_guid=False, **kwargs):
+ super().__init__(out_dir, **kwargs)
+ self.data_paths = []
+ self.data_writers = []
+ for i in range(self.num_out_files):
+ if use_guid:
+ fn = f"{i}.{guid()}.parquet"
+ else:
+ fn = f"{i}.parquet"
+ path = os.path.join(out_dir, fn)
+ self.data_paths.append(path)
+ self.data_writers.append(pwriter(path, compression=None))
def _write_thread(self):
while True:
@@ -459,38 +611,28 @@ def _write_thread(self):
finally:
self.queue.task_done()
- def _write_metadata(self):
- if self.cats is None:
- return
- metadata_writer = open(os.path.join(self.out_dir, "metadata.json"), "w")
- data = {}
- data["file_stats"] = []
- for i in range(len(self.data_files)):
- data["file_stats"].append({"file_name": f"{i}.data", "num_rows": self.num_samples[i]})
- # cats
- data["cats"] = []
- for c in self.cats:
- data["cats"].append({"col_name": c, "index": self.col_idx[c]})
- # conts
- data["conts"] = []
- for c in self.conts:
- data["conts"].append({"col_name": c, "index": self.col_idx[c]})
- # labels
- data["labels"] = []
- for c in self.labels:
- data["labels"].append({"col_name": c, "index": self.col_idx[c]})
+ @classmethod
+ def write_special_metadata(cls, md, fs, out_dir):
+ # Sort metadata by file name and convert list of
+ # tuples to a list of metadata byte-blobs
+ md_list = [m[1] for m in sorted(list(md.items()), key=lambda x: natural_sort_key(x[0]))]
- json.dump(data, metadata_writer)
- metadata_writer.close()
+ # Aggregate metadata and write _metadata file
+ _write_pq_metadata_file(md_list, fs, out_dir)
+
+ def _close_writers(self):
+ md_dict = {}
+ for writer, path in zip(self.data_writers, self.data_paths):
+ fn = path.split(self.fs.sep)[-1]
+ md_dict[fn] = writer.close(metadata_file_path=fn)
+ return md_dict
class HugeCTRWriter(ThreadedWriter):
- def __init__(
- self, out_dir, num_out_files=30, num_threads=4, cats=None, conts=None, labels=None
- ):
- super().__init__(out_dir, num_out_files, num_threads, cats, conts, labels)
- self.data_files = [os.path.join(out_dir, f"{i}.data") for i in range(num_out_files)]
- self.data_writers = [open(f, "ab") for f in self.data_files]
+ def __init__(self, out_dir, **kwargs):
+ super().__init__(out_dir, **kwargs)
+ self.data_paths = [os.path.join(out_dir, f"{i}.data") for i in range(self.num_out_files)]
+ self.data_writers = [open(f, "ab") for f in self.data_paths]
def _write_thread(self):
while True:
@@ -509,32 +651,33 @@ def _write_thread(self):
finally:
self.queue.task_done()
- def _write_metadata(self):
- if self.cats is None:
- return
- for i in range(len(self.data_writers)):
- self.data_writers[i].seek(0)
- # error_check (0: no error check; 1: check_num)
- # num of samples in this file
- # Dimension of the labels
- # Dimension of the features
- # slot_num for each embedding
- # reserved for future use
- header = np.array(
- [
- 0,
- self.num_samples[i],
- len(self.labels),
- len(self.conts),
- len(self.cats),
- 0,
- 0,
- 0,
- ],
- dtype=np.longlong,
- )
-
- self.data_writers[i].write(header.tobytes())
+ def _close_writers(self):
+ for i, writer in enumerate(self.data_writers):
+ if self.cats:
+ # Write HugeCTR Metadata
+ writer.seek(0)
+ # error_check (0: no error check; 1: check_num)
+ # num of samples in this file
+ # Dimension of the labels
+ # Dimension of the features
+ # slot_num for each embedding
+ # reserved for future use
+ header = np.array(
+ [
+ 0,
+ self.num_samples[i],
+ len(self.labels),
+ len(self.conts),
+ len(self.cats),
+ 0,
+ 0,
+ 0,
+ ],
+ dtype=np.longlong,
+ )
+ writer.write(header.tobytes())
+ writer.close()
+ return None
def device_mem_size(kind="total"):
@@ -556,74 +699,6 @@ def device_mem_size(kind="total"):
return size
-class WriterCache:
- def __init__(self):
- self.pq_writer_cache = {}
-
- def __del__(self):
- for path, (pw, fpath) in self.pq_writer_cache.items():
- pw.close()
-
- def get_pq_writer(self, prefix, s, mem):
- pw, fil = self.pq_writer_cache.get(prefix, (None, None))
- if pw is None:
- if mem:
- fil = BytesIO()
- pw = pwriter(fil, compression=None)
- self.pq_writer_cache[prefix] = (pw, fil)
- else:
- outfile_id = guid() + ".parquet"
- full_path = ".".join([prefix, outfile_id])
- pw = pwriter(full_path, compression=None)
- self.pq_writer_cache[prefix] = (pw, full_path)
- return pw
-
-
[email protected]
-def get_cache():
- with _DEFAULT_CACHE_LOCK:
- yield _get_cache()
-
-
-def _get_cache():
- try:
- worker = get_worker()
- except ValueError:
- # There is no dask.distributed worker.
- # Assume client/worker are same process
- global _DEFAULT_CACHE
- if _DEFAULT_CACHE is None:
- _DEFAULT_CACHE = WriterCache()
- return _DEFAULT_CACHE
- if not hasattr(worker, "pw_cache"):
- worker.pw_cache = WriterCache()
- return worker.pw_cache
-
-
-def clean_pw_cache():
- with _DEFAULT_CACHE_LOCK:
- try:
- worker = get_worker()
- except ValueError:
- global _DEFAULT_CACHE
- if _DEFAULT_CACHE is not None:
- del _DEFAULT_CACHE
- _DEFAULT_CACHE = None
- return
- if hasattr(worker, "pw_cache"):
- del worker.pw_cache
- return
-
-
-def close_cached_pw(fs):
- md_dict = {}
- with get_cache() as cache:
- for path, (pw, bio) in cache.pq_writer_cache.items():
- fn = bio.split(fs.sep)[-1]
- md_dict[fn] = pw.close(metadata_file_path=fn)
- return md_dict
-
-
def guid():
""" Simple utility function to get random hex string
"""
@@ -649,7 +724,7 @@ def _write_output_partition(gdf, processed_path, shuffle, out_files_per_proc, fs
return gdf_size
-def _to_parquet_dataset(ddf, fs, output_path, shuffle, out_files_per_proc):
+def _ddf_to_pq_dataset(ddf, fs, output_path, shuffle, out_files_per_proc):
name = "write-processed"
write_name = name + tokenize(ddf, shuffle, out_files_per_proc)
task_list = []
@@ -670,6 +745,42 @@ def _to_parquet_dataset(ddf, fs, output_path, shuffle, out_files_per_proc):
return Delayed(name, graph)
+def _finish_pq_dataset(client, ddf, shuffle, output_path, fs):
+ # Deal with "full" (per-worker) shuffle here.
+ if shuffle == "full":
+ if client:
+ client.cancel(ddf)
+ ddf = None
+ worker_md = client.run(_worker_shuffle, output_path, fs)
+ worker_md = list(collections.ChainMap(*worker_md.values()).items())
+ else:
+ ddf = None
+ worker_md = _worker_shuffle(output_path, fs)
+ worker_md = list(worker_md.items())
+ else:
+ # Collect parquet metadata while closing
+ # ParquetWriter object(s)
+ if client:
+ worker_md = client.run(close_cached_pw, fs)
+ worker_md = list(collections.ChainMap(*worker_md.values()).items())
+ else:
+ worker_md = close_cached_pw(fs)
+ worker_md = list(worker_md.items())
+
+ # Sort metadata by file name and convert list of
+ # tuples to a list of metadata byte-blobs
+ md_list = [m[1] for m in sorted(worker_md, key=lambda x: natural_sort_key(x[0]))]
+
+ # Aggregate metadata and write _metadata file
+ _write_pq_metadata_file(md_list, fs, output_path)
+
+ # Close ParquetWriter Objects
+ if client:
+ client.run(clean_pw_cache)
+ else:
+ clean_pw_cache()
+
+
def _write_pq_metadata_file(md_list, fs, path):
if md_list:
metadata_path = fs.sep.join([path, "_metadata"])
diff --git a/nvtabular/workflow.py b/nvtabular/workflow.py
--- a/nvtabular/workflow.py
+++ b/nvtabular/workflow.py
@@ -15,18 +15,14 @@
#
import collections
import logging
-import os
import time
import warnings
import dask
import yaml
-from cudf._lib.nvtx import annotate
-from dask.utils import natural_sort_key
from fsspec.core import get_fs_token_paths
import nvtabular.io as nvt_io
-from nvtabular.ds_writer import DatasetWriter
from nvtabular.ops import DFOperator, StatOperator, TransformOperator
from nvtabular.worker import clean_worker_cache
@@ -232,22 +228,6 @@ def finalize(self):
"""
self.load_config(self.config)
- def write_to_dataset(self, path, dataset, apply_ops=False, nfiles=1, shuffle=True, **kwargs):
- """ Write data to shuffled parquet dataset.
- """
- if isinstance(dataset, nvt_io.Dataset):
- itr = dataset.to_iter()
- else:
- itr = dataset
-
- writer = DatasetWriter(path, nfiles=nfiles)
-
- for gdf in itr:
- if apply_ops:
- gdf = self.apply_ops(gdf)
- writer.write(gdf, shuffle=shuffle)
- writer.write_metadata()
-
def load_config(self, config, pro=False):
"""
This function extracts all the operators from the given phases and produces a
@@ -492,14 +472,7 @@ def _run_trans_ops_for_phase(self, gdf, tasks):
return gdf
def apply_ops(
- self,
- gdf,
- start_phase=None,
- end_phase=None,
- shuffler=None,
- output_path=None,
- num_out_files=None,
- huge_ctr=None,
+ self, gdf, start_phase=None, end_phase=None, writer=None, output_path=None, shuffle=None
):
"""
gdf: cudf dataframe
@@ -515,33 +488,22 @@ def apply_ops(
start = time.time()
gdf = self._run_trans_ops_for_phase(gdf, self.phases[phase_index])
self.timings["preproc_apply"] += time.time() - start
- if phase_index == len(self.phases) - 1 and output_path:
- self.write_df(gdf, output_path, shuffler=shuffler, num_out_files=num_out_files)
+ if phase_index == len(self.phases) - 1 and writer and output_path:
- if huge_ctr and phase_index == len(self.phases) - 1:
- if not self.cal_col_names:
+ if writer.need_cal_col_names:
cat_names = self.get_final_cols_names("categorical")
cont_names = self.get_final_cols_names("continuous")
label_names = self.get_final_cols_names("label")
- huge_ctr.set_col_names(labels=label_names, cats=cat_names, conts=cont_names)
- self.cal_col_names = True
- huge_ctr.add_data(gdf)
+ writer.set_col_names(labels=label_names, cats=cat_names, conts=cont_names)
+ writer.need_cal_col_names = False
- return gdf
+ start_write = time.time()
+ writer.add_data(gdf)
+ self.timings["shuffle_df"] += time.time() - start_write
- @annotate("Write_df", color="red", domain="nvt_python")
- def write_df(self, gdf, export_path, shuffler, num_out_files):
- if shuffler:
- start = time.time()
- shuffler.add_data(gdf)
- self.timings["shuffle_df"] += time.time() - start
- else:
- file_name = f"{self.current_file_num}.parquet"
- path = os.path.join(export_path, file_name)
- gdf.to_parquet(path, compression=None)
- self.current_file_num += 1
+ return gdf
- def _update_stats(self, stat_op):
+ def _update_statistics(self, stat_op):
self.stats.update(stat_op.stats_collected())
def save_stats(self, path):
@@ -667,16 +629,38 @@ def exec_phase(self, phase_index, record_stats=True):
for r in self.client.compute(stats):
computed_stats, op = r.result()
op.finalize(computed_stats)
- self._update_stats(op)
+ self._update_statistics(op)
op.clear()
else:
for r in dask.compute(stats, scheduler="synchronous")[0]:
computed_stats, op = r
op.finalize(computed_stats)
- self._update_stats(op)
+ self._update_statistics(op)
op.clear()
del stats
+ def reorder_tasks(self, end):
+ if end != 2:
+ # Opt only works for two phases (for now)
+ return
+ stat_tasks = []
+ trans_tasks = []
+ for idx, _ in enumerate(self.phases[:end]):
+ for task in self.phases[idx]:
+ deps = task[2]
+ if isinstance(task[0], StatOperator):
+ if deps == ["base"]:
+ stat_tasks.append(task)
+ else:
+ # This statistics depends on a transform
+ # (Opt wont work)
+ return
+ elif isinstance(task[0], TransformOperator):
+ trans_tasks.append(task)
+
+ self.phases[0] = stat_tasks
+ self.phases[1] = trans_tasks
+
def apply(
self,
dataset,
@@ -684,11 +668,8 @@ def apply(
record_stats=True,
shuffle=None,
output_path="./ds_export",
+ output_format="parquet",
out_files_per_proc=None,
- hugectr_gen_output=False,
- hugectr_output_path="./hugectr",
- hugectr_num_out_files=None,
- hugectr_output_format=None,
**kwargs,
):
"""
@@ -714,6 +695,9 @@ def apply(
disk. A "true" full shuffle is not yet implemented.
output_path : string
path to output data
+ output_format : {"parquet", "hugectr", None}
+ Output format for processed/shuffled dataset. If None,
+ no output dataset will be written.
out_files_per_proc : integer
number of files to create (per process) after
shuffling the data
@@ -734,83 +718,87 @@ def apply(
# If no tasks have been loaded then we need to load internal config
if not self.phases:
self.finalize()
+
+ # Gather statstics (if apply_offline), and/or transform
+ # and write out processed data
if apply_offline:
- if hugectr_gen_output:
- raise ValueError(
- "TODO: Support HugeCTR output for offline processing with Dask."
- " This is part of the larger task of aligning online/offline API."
- )
- self.update_stats(
+ self.build_and_process_graph(
dataset,
output_path=output_path,
record_stats=record_stats,
shuffle=shuffle,
+ output_format=output_format,
out_files_per_proc=out_files_per_proc,
)
else:
- shuffler = None
- huge_ctr = None
- if shuffle:
- if isinstance(shuffle, str):
- raise ValueError("TODO: Align shuffling/writing API for online/offline.")
- shuffler = nvt_io.Shuffler(output_path, num_out_files=out_files_per_proc)
- if hugectr_gen_output:
- self.cal_col_names = False
- if hugectr_output_format == "binary":
- huge_ctr = nvt_io.HugeCTRWriter(
- hugectr_output_path, num_out_files=hugectr_num_out_files
- )
- elif hugectr_output_format == "parquet":
- huge_ctr = nvt_io.ParquetWriter(
- hugectr_output_path, num_out_files=hugectr_num_out_files
- )
- # "Online" apply currently requires manual iteration
+ self.iterate_online(
+ dataset,
+ output_path=output_path,
+ shuffle=shuffle,
+ output_format=output_format,
+ out_files_per_proc=out_files_per_proc,
+ )
+
+ def iterate_online(
+ self,
+ dataset,
+ end_phase=None,
+ output_path=None,
+ shuffle=None,
+ output_format=None,
+ out_files_per_proc=None,
+ apply_ops=True,
+ ):
+ """ Iterate through dataset and (optionally) apply/shuffle/write.
+ """
+ # Check if we have a (supported) writer
+ output_path = output_path or "./"
+ output_path = str(output_path)
+ writer = nvt_io.writer_factory(output_format, output_path, out_files_per_proc, shuffle)
+
+ # Iterate through dataset, apply ops, and write out processed data
+ if apply_ops:
for gdf in dataset.to_iter():
- self.apply_ops(
- gdf,
- output_path=output_path,
- shuffler=shuffler,
- num_out_files=out_files_per_proc,
- huge_ctr=huge_ctr,
- )
- if shuffler:
- shuffler.close()
- if huge_ctr:
- huge_ctr.close()
+ self.apply_ops(gdf, output_path=output_path, writer=writer, shuffle=shuffle)
- def reorder_tasks(self, end):
- if end != 2:
- # Opt only works for two phases (for now)
- return
- stat_tasks = []
- trans_tasks = []
- for idx, _ in enumerate(self.phases[:end]):
- for task in self.phases[idx]:
- deps = task[2]
- if isinstance(task[0], StatOperator):
- if deps == ["base"]:
- stat_tasks.append(task)
- else:
- # This statistics depends on a transform
- # (Opt wont work)
- return
- elif isinstance(task[0], TransformOperator):
- trans_tasks.append(task)
+ # Close writer and write general/specialized metadata
+ if writer:
+ general_md, special_md = writer.close()
- self.phases[0] = stat_tasks
- self.phases[1] = trans_tasks
+ # Note that we "could" have the special and general metadata
+ # written during `writer.close()` (just above) for the single-GPU case.
+ # Instead, the metadata logic is separated from the `Writer` object to
+ # simplify multi-GPU integration. When using Dask, we cannot assume
+ # that the "shared" metadata files can/will be written by the same
+ # process that writes the data.
+ type(writer).write_special_metadata(special_md, writer.fs, output_path)
+ type(writer).write_general_metadata(general_md, writer.fs, output_path)
+
+ def update_stats(self, dataset, end_phase=None):
+ """ Colllect statistics only.
+ """
+ self.build_and_process_graph(dataset, end_phase=end_phase, record_stats=True)
- def update_stats(
+ def build_and_process_graph(
self,
dataset,
end_phase=None,
output_path=None,
record_stats=True,
shuffle=None,
+ output_format=None,
out_files_per_proc=None,
+ apply_ops=True,
):
+ """ Build Dask-task graph for workflow.
+
+ Full graph is only executed if `output_format` is specified.
+ """
end = end_phase if end_phase else len(self.phases)
+ if output_format not in ("parquet", None):
+ raise ValueError("Output format not yet supported with Dask.")
+
# Reorder tasks for two-phase workflows
self.reorder_tasks(end)
@@ -821,19 +809,66 @@ def update_stats(
clean_worker_cache()
self.set_ddf(dataset)
- for idx, _ in enumerate(self.phases[:end]):
- self.exec_phase(idx, record_stats=record_stats)
- if output_path:
- self.to_dataset(output_path, shuffle=shuffle, out_files_per_proc=out_files_per_proc)
+ if apply_ops:
+ for idx, _ in enumerate(self.phases[:end]):
+ self.exec_phase(idx, record_stats=record_stats)
+ if output_format:
+ output_path = output_path or "./"
+ output_path = str(output_path)
+ self.ddf_to_dataset(output_path, shuffle=shuffle, out_files_per_proc=out_files_per_proc)
+
+ def write_to_dataset(
+ self,
+ path,
+ dataset,
+ apply_ops=False,
+ nfiles=1,
+ shuffle=True,
+ output_format="parquet",
+ iterate=True,
+ ):
+ """ Write data to shuffled parquet dataset.
- def to_dataset(self, output_path, shuffle=None, out_files_per_proc=None):
+ Assumes statistics are already gathered.
+ """
+ path = str(path)
+ if iterate:
+ self.iterate_online(
+ dataset,
+ output_path=path,
+ shuffle=shuffle,
+ output_format=output_format,
+ out_files_per_proc=nfiles,
+ apply_ops=apply_ops,
+ )
+ else:
+ self.build_and_process_graph(
+ dataset,
+ output_path=path,
+ record_stats=False,
+ shuffle=shuffle,
+ output_format=output_format,
+ out_files_per_proc=nfiles,
+ apply_ops=apply_ops,
+ )
+
+ def ddf_to_dataset(
+ self, output_path, shuffle=None, out_files_per_proc=None, output_format="parquet"
+ ):
+ """ Dask-based dataset output.
+
+ Currently supports parquet only.
+ TODO: Leverage `ThreadedWriter` implementations.
+ """
+ if output_format != "parquet":
+ raise ValueError("Only parquet output supported with Dask.")
ddf = self.get_ddf()
fs = get_fs_token_paths(output_path)[0]
fs.mkdirs(output_path, exist_ok=True)
if shuffle or out_files_per_proc:
# Construct graph for Dask-based dataset write
- out = nvt_io._to_parquet_dataset(ddf, fs, output_path, shuffle, out_files_per_proc)
+ out = nvt_io._ddf_to_pq_dataset(ddf, fs, output_path, shuffle, out_files_per_proc)
# Would be nice to clean the categorical
# cache before the write (TODO)
@@ -847,40 +882,8 @@ def to_dataset(self, output_path, shuffle=None, out_files_per_proc=None):
self.ddf_base_dataset = None
out = dask.compute(out, scheduler="synchronous")[0]
- # Deal with "full" (per-worker) shuffle here.
- if shuffle == "full":
- if self.client:
- self.client.cancel(self.ddf)
- self.ddf = None
- worker_md = self.client.run(nvt_io._worker_shuffle, output_path, fs)
- worker_md = list(collections.ChainMap(*worker_md.values()).items())
- else:
- self.ddf = None
- worker_md = nvt_io._worker_shuffle(output_path, fs)
- worker_md = list(worker_md.items())
-
- else:
- # Collect parquet metadata while closing
- # ParquetWriter object(s)
- if self.client:
- worker_md = self.client.run(nvt_io.close_cached_pw, fs)
- worker_md = list(collections.ChainMap(*worker_md.values()).items())
- else:
- worker_md = nvt_io.close_cached_pw(fs)
- worker_md = list(worker_md.items())
-
- # Sort metadata by file name and convert list of
- # tuples to a list of metadata byte-blobs
- md_list = [m[1] for m in sorted(worker_md, key=lambda x: natural_sort_key(x[0]))]
-
- # Aggregate metadata and write _metadata file
- nvt_io._write_pq_metadata_file(md_list, fs, output_path)
-
- # Close ParquetWriter Objects
- if self.client:
- self.client.run(nvt_io.clean_pw_cache)
- else:
- nvt_io.clean_pw_cache()
+ # Follow-up Shuffling and _metadata creation
+ nvt_io._finish_pq_dataset(self.client, self.ddf, shuffle, output_path, fs)
return
| diff --git a/tests/unit/test_io.py b/tests/unit/test_io.py
--- a/tests/unit/test_io.py
+++ b/tests/unit/test_io.py
@@ -25,7 +25,7 @@
import nvtabular as nvt
import nvtabular.io
import nvtabular.ops as ops
-from nvtabular.io import Shuffler
+from nvtabular.io import ParquetWriter
from tests.conftest import allcols_csv, mycols_csv, mycols_pq
@@ -37,9 +37,9 @@ def test_shuffle_gpu(tmpdir, datasets, engine):
df1 = cudf.read_parquet(paths[0])[mycols_pq]
else:
df1 = cudf.read_csv(paths[0], header=False, names=allcols_csv)[mycols_csv]
- shuf = Shuffler(tmpdir, num_files)
+ shuf = ParquetWriter(tmpdir, num_out_files=num_files, shuffle=True)
shuf.add_data(df1)
- writer_files = shuf.writer.data_files
+ writer_files = shuf.data_paths
shuf.close()
if engine == "parquet":
df3 = cudf.read_parquet(writer_files[0])[mycols_pq]
@@ -88,7 +88,7 @@ def test_dask_dataset(datasets, engine, num_files):
assert_eq(ddf0, result)
[email protected]("output_format", ["binary", "parquet"])
[email protected]("output_format", ["hugectr", "parquet"])
@pytest.mark.parametrize("engine", ["parquet", "csv", "csv-no-header"])
@pytest.mark.parametrize("op_columns", [["x"], None])
def test_hugectr(tmpdir, df, dataset, output_format, engine, op_columns):
@@ -99,10 +99,8 @@ def test_hugectr(tmpdir, df, dataset, output_format, engine, op_columns):
# set variables
nfiles = 10
ext = ""
- outdir = tmpdir + "/dontcare"
- h_outdir = tmpdir + "/hugectr"
+ outdir = tmpdir + "/hugectr"
os.mkdir(outdir)
- os.mkdir(h_outdir)
# process data
processor = nvt.Workflow(cat_names=cat_names, cont_names=cont_names, label_name=label_names)
@@ -120,22 +118,19 @@ def test_hugectr(tmpdir, df, dataset, output_format, engine, op_columns):
apply_offline=False,
record_stats=False,
output_path=outdir,
+ out_files_per_proc=nfiles,
+ output_format=output_format,
shuffle=False,
- hugectr_gen_output=True,
- hugectr_output_path=h_outdir,
- hugectr_num_out_files=nfiles,
- hugectr_output_format=output_format,
)
# Check files
ext = ""
if output_format == "parquet":
ext = "parquet"
- assert os.path.isfile(h_outdir + "/metadata.json")
- elif output_format == "binary":
+ assert os.path.isfile(outdir + "/metadata.json")
+ elif output_format == "hugectr":
ext = "data"
- assert os.path.isfile(h_outdir + "/file_list.txt")
-
+ assert os.path.isfile(outdir + "/file_list.txt")
for n in range(nfiles):
- assert os.path.isfile(os.path.join(h_outdir, str(n) + "." + ext))
+ assert os.path.isfile(os.path.join(outdir, str(n) + "." + ext))
diff --git a/tests/unit/test_notebooks.py b/tests/unit/test_notebooks.py
--- a/tests/unit/test_notebooks.py
+++ b/tests/unit/test_notebooks.py
@@ -35,6 +35,7 @@ def test_optimize_criteo(tmpdir):
def test_rossman_example(tmpdir):
+ pytest.importorskip("tensorflow")
_get_random_rossmann_data(1000).to_csv(os.path.join(tmpdir, "train.csv"))
_get_random_rossmann_data(1000).to_csv(os.path.join(tmpdir, "valid.csv"))
diff --git a/tests/unit/test_ops.py b/tests/unit/test_ops.py
--- a/tests/unit/test_ops.py
+++ b/tests/unit/test_ops.py
@@ -400,7 +400,7 @@ def test_lambdaop(tmpdir, df, dataset, gpu_memory_frac, engine, client):
processor.write_to_dataset(tmpdir, dataset, nfiles=10, shuffle=True, apply_ops=True)
dataset_2 = nvtabular.io.Dataset(
- glob.glob(str(tmpdir) + "/ds_part.*.parquet"), part_mem_fraction=gpu_memory_frac
+ glob.glob(str(tmpdir) + "/*.parquet"), part_mem_fraction=gpu_memory_frac
)
df_pp = cudf.concat(list(dataset_2.to_iter()), axis=0)
assert is_integer_dtype(df_pp["name-cat"].dtype)
@@ -420,7 +420,7 @@ def test_lambdaop(tmpdir, df, dataset, gpu_memory_frac, engine, client):
processor.write_to_dataset(tmpdir, dataset, nfiles=10, shuffle=True, apply_ops=True)
dataset_2 = nvtabular.io.Dataset(
- glob.glob(str(tmpdir) + "/ds_part.*.parquet"), part_mem_fraction=gpu_memory_frac
+ glob.glob(str(tmpdir) + "/*.parquet"), part_mem_fraction=gpu_memory_frac
)
df_pp = cudf.concat(list(dataset_2.to_iter()), axis=0)
assert is_integer_dtype(df_pp["name-cat"].dtype)
@@ -446,7 +446,7 @@ def test_lambdaop(tmpdir, df, dataset, gpu_memory_frac, engine, client):
processor.update_stats(dataset)
processor.write_to_dataset(tmpdir, dataset, nfiles=10, shuffle=True, apply_ops=True)
dataset_2 = nvtabular.io.Dataset(
- glob.glob(str(tmpdir) + "/ds_part.*.parquet"), part_mem_fraction=gpu_memory_frac
+ glob.glob(str(tmpdir) + "/*.parquet"), part_mem_fraction=gpu_memory_frac
)
df_pp = cudf.concat(list(dataset_2.to_iter()), axis=0)
@@ -470,7 +470,7 @@ def test_lambdaop(tmpdir, df, dataset, gpu_memory_frac, engine, client):
processor.write_to_dataset(tmpdir, dataset, nfiles=10, shuffle=True, apply_ops=True)
dataset_2 = nvtabular.io.Dataset(
- glob.glob(str(tmpdir) + "/ds_part.*.parquet"), part_mem_fraction=gpu_memory_frac
+ glob.glob(str(tmpdir) + "/*.parquet"), part_mem_fraction=gpu_memory_frac
)
df_pp = cudf.concat(list(dataset_2.to_iter()), axis=0)
assert is_integer_dtype(df_pp["name-cat_add100"].dtype)
@@ -497,7 +497,7 @@ def test_lambdaop(tmpdir, df, dataset, gpu_memory_frac, engine, client):
processor.write_to_dataset(tmpdir, dataset, nfiles=10, shuffle=True, apply_ops=True)
dataset_2 = nvtabular.io.Dataset(
- glob.glob(str(tmpdir) + "/ds_part.*.parquet"), part_mem_fraction=gpu_memory_frac
+ glob.glob(str(tmpdir) + "/*.parquet"), part_mem_fraction=gpu_memory_frac
)
df_pp = cudf.concat(list(dataset_2.to_iter()), axis=0)
assert np.sum(df_pp["x_mul0_add100"] < 100) == 0
diff --git a/tests/unit/test_tf_dataloader.py b/tests/unit/test_tf_dataloader.py
--- a/tests/unit/test_tf_dataloader.py
+++ b/tests/unit/test_tf_dataloader.py
@@ -54,7 +54,7 @@ def test_tf_gpu_dl(tmpdir, paths, use_paths, dataset, batch_size, gpu_memory_fra
engine=engine,
shuffle=False,
)
- processor.update_stats(dataset, record_stats=True)
+ processor.update_stats(dataset)
data_itr.map(processor)
rows = 0
diff --git a/tests/unit/test_torch_dataloader.py b/tests/unit/test_torch_dataloader.py
--- a/tests/unit/test_torch_dataloader.py
+++ b/tests/unit/test_torch_dataloader.py
@@ -148,7 +148,7 @@ def get_norms(tar: cudf.Series):
torch_dataloader.FileItrDataset(
x, use_row_groups=True, gpu_memory_frac=gpu_memory_frac, names=allcols_csv
)
- for x in glob.glob(str(tmpdir) + "/ds_part.*.parquet")
+ for x in glob.glob(str(tmpdir) + "/*.parquet")
]
data_itr = torch.utils.data.ChainDataset(data_files)
@@ -160,9 +160,7 @@ def get_norms(tar: cudf.Series):
for chunk in dl:
len_df_pp += len(chunk[0][0])
- dataset = Dataset(
- glob.glob(str(tmpdir) + "/ds_part.*.parquet"), part_mem_fraction=gpu_memory_frac,
- )
+ dataset = Dataset(glob.glob(str(tmpdir) + "/*.parquet"), part_mem_fraction=gpu_memory_frac)
x = processor.ds_to_tensors(dataset.to_iter(), apply_ops=False)
num_rows, num_row_groups, col_names = cudf.io.read_parquet_metadata(str(tmpdir) + "/_metadata")
diff --git a/tests/unit/test_workflow.py b/tests/unit/test_workflow.py
--- a/tests/unit/test_workflow.py
+++ b/tests/unit/test_workflow.py
@@ -91,9 +91,7 @@ def get_norms(tar: cudf.Series):
# Write to new "shuffled" and "processed" dataset
processor.write_to_dataset(tmpdir, dataset, nfiles=10, shuffle=True, apply_ops=True)
- dataset_2 = Dataset(
- glob.glob(str(tmpdir) + "/ds_part.*.parquet"), part_mem_fraction=gpu_memory_frac
- )
+ dataset_2 = Dataset(glob.glob(str(tmpdir) + "/*.parquet"), part_mem_fraction=gpu_memory_frac)
df_pp = cudf.concat(list(dataset_2.to_iter()), axis=0)
@@ -167,9 +165,7 @@ def get_norms(tar: cudf.Series):
# Write to new "shuffled" and "processed" dataset
processor.write_to_dataset(tmpdir, dataset, nfiles=10, shuffle=True, apply_ops=True)
- dataset_2 = Dataset(
- glob.glob(str(tmpdir) + "/ds_part.*.parquet"), part_mem_fraction=gpu_memory_frac
- )
+ dataset_2 = Dataset(glob.glob(str(tmpdir) + "/*.parquet"), part_mem_fraction=gpu_memory_frac)
df_pp = cudf.concat(list(dataset_2.to_iter()), axis=0)
@@ -250,9 +246,7 @@ def get_norms(tar: cudf.Series):
# Write to new "shuffled" and "processed" dataset
processor.write_to_dataset(tmpdir, dataset, nfiles=10, shuffle=True, apply_ops=True)
- dataset_2 = Dataset(
- glob.glob(str(tmpdir) + "/ds_part.*.parquet"), part_mem_fraction=gpu_memory_frac
- )
+ dataset_2 = Dataset(glob.glob(str(tmpdir) + "/*.parquet"), part_mem_fraction=gpu_memory_frac)
df_pp = cudf.concat(list(dataset_2.to_iter()), axis=0)
@@ -284,7 +278,7 @@ def test_parquet_output(client, use_client, tmpdir, shuffle):
)
processor.add_preprocess(ops.Normalize())
processor.finalize()
- processor.update_stats(
+ processor.apply(
dataset, output_path=out_path, shuffle=shuffle, out_files_per_proc=out_files_per_proc
)
| [Task] Write global parquet metadata file
<a href="https://github.com/benfred"><img src="https://avatars2.githubusercontent.com/u/69536?v=4" align="left" width="96" height="96" hspace="10"></img></a> **Issue by [benfred](https://github.com/benfred)**
_Friday May 22, 2020 at 19:52 GMT_
_Originally opened as https://github.com/rapidsai/recsys/issues/168_
----
We should write out a global metadata file when exporting a parquet dataset.
| <a href="https://github.com/benfred"><img src="https://avatars2.githubusercontent.com/u/69536?v=4" align="left" width="48" height="48" hspace="10"></img></a> **Comment by [benfred](https://github.com/benfred)**
_Wednesday May 27, 2020 at 00:04 GMT_
----
See also https://github.com/rapidsai/cudf/issues/5284
| 2020-07-17T16:38:24 |
NVIDIA-Merlin/NVTabular | 187 | NVIDIA-Merlin__NVTabular-187 | [
"121"
] | e41d77a1ecdf78b69c31a051895d5d224a01ccda | diff --git a/nvtabular/categorify.py b/nvtabular/categorify.py
--- a/nvtabular/categorify.py
+++ b/nvtabular/categorify.py
@@ -29,34 +29,51 @@
from nvtabular.worker import fetch_table_data, get_worker_cache
-def _make_name(*args):
- return "_".join(args)
+def _make_name(*args, sep="_"):
+ return sep.join(args)
@annotate("top_level_groupby", color="green", domain="nvt_python")
-def _top_level_groupby(gdf, cat_cols, tree_width, cont_cols, sum_sq, on_host):
+def _top_level_groupby(
+ gdf, cat_col_groups, tree_width, cont_cols, sum_sq, on_host, concat_groups, name_sep
+):
# Top-level operation for category-based groupby aggregations
output = {}
k = 0
- for i, cat_col in enumerate(cat_cols):
+ for i, cat_col_group in enumerate(cat_col_groups):
+
+ if isinstance(cat_col_group, str):
+ cat_col_group = [cat_col_group]
+ cat_col_group_str = _make_name(*cat_col_group, sep=name_sep)
+
+ if concat_groups and len(cat_col_group) > 1:
+ # Concatenate columns and replace cat_col_group
+ # with the single name
+ df_gb = cudf.DataFrame()
+ ignore_index = True
+ df_gb[cat_col_group_str] = _concat([gdf[col] for col in cat_col_group], ignore_index)
+ cat_col_group = [cat_col_group_str]
+ else:
+ # Compile aggregation dictionary and add "squared-sum"
+ # column(s) (necessary when `cont_cols` is non-empty)
+ df_gb = gdf[cat_col_group + cont_cols].copy(deep=False)
- # Compile aggregation dictionary and add "squared-sum"
- # column(s) (necessary when `cont_cols` is non-empty)
- df_gb = gdf[[cat_col] + cont_cols].copy(deep=False)
agg_dict = {}
- agg_dict[cat_col] = ["count"]
+ agg_dict[cat_col_group[0]] = ["count"]
for col in cont_cols:
agg_dict[col] = ["sum"]
if sum_sq:
- name = _make_name(col, "pow2")
+ name = _make_name(col, "pow2", sep=name_sep)
df_gb[name] = df_gb[col].pow(2)
agg_dict[name] = ["sum"]
# Perform groupby and flatten column index
# (flattening provides better cudf support)
- gb = df_gb.groupby(cat_col, dropna=False).agg(agg_dict)
+ gb = df_gb.groupby(cat_col_group, dropna=False).agg(agg_dict)
gb.columns = [
- _make_name(*name) if name[0] == cat_col else _make_name(*((cat_col,) + name))
+ _make_name(*(tuple(cat_col_group) + name[1:]), sep=name_sep)
+ if name[0] == cat_col_group[0]
+ else _make_name(*(tuple(cat_col_group) + name), sep=name_sep)
for name in gb.columns.to_flat_index()
]
gb.reset_index(inplace=True, drop=False)
@@ -64,7 +81,7 @@ def _top_level_groupby(gdf, cat_cols, tree_width, cont_cols, sum_sq, on_host):
# Split the result by the hash value of the categorical column
for j, split in enumerate(
- gb.partition_by_hash([cat_col], tree_width[cat_col], keep_index=False)
+ gb.partition_by_hash(cat_col_group, tree_width[cat_col_group_str], keep_index=False)
):
if on_host:
output[k] = split.to_pandas()
@@ -76,37 +93,46 @@ def _top_level_groupby(gdf, cat_cols, tree_width, cont_cols, sum_sq, on_host):
@annotate("mid_level_groupby", color="green", domain="nvt_python")
-def _mid_level_groupby(dfs, col, cont_cols, agg_list, freq_limit, on_host):
+def _mid_level_groupby(
+ dfs, col_group, cont_cols, agg_list, freq_limit, on_host, concat_groups, name_sep
+):
+
+ if isinstance(col_group, str):
+ col_group = [col_group]
+
+ if concat_groups and len(col_group) > 1:
+ col_group = [_make_name(*col_group, sep=name_sep)]
+
ignore_index = True
if on_host:
- gb = cudf.from_pandas(_concat(dfs, ignore_index)).groupby(col, dropna=False).sum()
+ gb = cudf.from_pandas(_concat(dfs, ignore_index)).groupby(col_group, dropna=False).sum()
else:
- gb = _concat(dfs, ignore_index).groupby(col, dropna=False).sum()
+ gb = _concat(dfs, ignore_index).groupby(col_group, dropna=False).sum()
gb.reset_index(drop=False, inplace=True)
- name_count = _make_name(col, "count")
+ name_count = _make_name(*(col_group + ["count"]), sep=name_sep)
if freq_limit:
gb = gb[gb[name_count] >= freq_limit]
- required = [col]
+ required = col_group.copy()
if "count" in agg_list:
required.append(name_count)
ddof = 1
for cont_col in cont_cols:
- name_sum = _make_name(col, cont_col, "sum")
+ name_sum = _make_name(*(col_group + [cont_col, "sum"]), sep=name_sep)
if "sum" in agg_list:
required.append(name_sum)
if "mean" in agg_list:
- name_mean = _make_name(col, cont_col, "mean")
+ name_mean = _make_name(*(col_group + [cont_col, "mean"]), sep=name_sep)
required.append(name_mean)
gb[name_mean] = gb[name_sum] / gb[name_count]
if "var" in agg_list or "std" in agg_list:
n = gb[name_count]
x = gb[name_sum]
- x2 = gb[_make_name(col, cont_col, "pow2", "sum")]
+ x2 = gb[_make_name(*(col_group + [cont_col, "pow2", "sum"]), sep=name_sep)]
result = x2 - x ** 2 / n
div = n - ddof
div[div < 1] = 1
@@ -114,11 +140,11 @@ def _mid_level_groupby(dfs, col, cont_cols, agg_list, freq_limit, on_host):
result[(n - ddof) == 0] = np.nan
if "var" in agg_list:
- name_var = _make_name(col, cont_col, "var")
+ name_var = _make_name(*(col_group + [cont_col, "var"]), sep=name_sep)
required.append(name_var)
gb[name_var] = result
if "std" in agg_list:
- name_std = _make_name(col, cont_col, "std")
+ name_std = _make_name(*(col_group + [cont_col, "std"]), sep=name_sep)
required.append(name_std)
gb[name_std] = np.sqrt(result)
@@ -130,43 +156,61 @@ def _mid_level_groupby(dfs, col, cont_cols, agg_list, freq_limit, on_host):
@annotate("write_gb_stats", color="green", domain="nvt_python")
-def _write_gb_stats(dfs, base_path, col, on_host):
+def _write_gb_stats(dfs, base_path, col_group, on_host, concat_groups, name_sep):
+ if concat_groups and len(col_group) > 1:
+ col_group = [_make_name(*col_group, sep=name_sep)]
ignore_index = True
df = _concat(dfs, ignore_index)
if on_host:
df = cudf.from_pandas(df)
- rel_path = "cat_stats.%s.parquet" % (col)
+ if isinstance(col_group, str):
+ col_group = [col_group]
+ rel_path = "cat_stats.%s.parquet" % (_make_name(*col_group, sep=name_sep))
path = os.path.join(base_path, rel_path)
if len(df):
- df = df.sort_values(col, na_position="first")
+ df = df.sort_values(col_group, na_position="first")
df.to_parquet(path, write_index=False, compression=None)
else:
- df_null = cudf.DataFrame({col: [None]})
- df_null[col] = df_null[col].astype(df[col].dtype)
+ df_null = cudf.DataFrame({c: [None] for c in col_group})
+ for c in col_group:
+ df_null[c] = df_null[c].astype(df[c].dtype)
df_null.to_parquet(path, write_index=False, compression=None)
del df
return path
@annotate("write_uniques", color="green", domain="nvt_python")
-def _write_uniques(dfs, base_path, col, on_host):
+def _write_uniques(dfs, base_path, col_group, on_host, concat_groups, name_sep):
+ if concat_groups and len(col_group) > 1:
+ col_group = [_make_name(*col_group, sep=name_sep)]
ignore_index = True
+ if isinstance(col_group, str):
+ col_group = [col_group]
df = _concat(dfs, ignore_index)
if on_host:
df = cudf.from_pandas(df)
- rel_path = "unique.%s.parquet" % (col)
+ rel_path = "unique.%s.parquet" % (_make_name(*col_group, sep=name_sep))
path = "/".join([base_path, rel_path])
if len(df):
# Make sure first category is Null
- df = df.sort_values(col, na_position="first")
- if not df[col]._column.has_nulls:
- df = cudf.DataFrame(
- {col: _concat([cudf.Series([None], dtype=df[col].dtype), df[col]], ignore_index)}
- )
+ df = df.sort_values(col_group, na_position="first")
+ new_cols = {}
+ nulls_missing = False
+ for col in col_group:
+ if not df[col]._column.has_nulls:
+ nulls_missing = True
+ new_cols[col] = _concat(
+ [cudf.Series([None], dtype=df[col].dtype), df[col]], ignore_index
+ )
+ else:
+ new_cols[col] = df[col].copy(deep=False)
+ if nulls_missing:
+ df = cudf.DataFrame(new_cols)
df.to_parquet(path, write_index=False, compression=None)
else:
- df_null = cudf.DataFrame({col: [None]})
- df_null[col] = df_null[col].astype(df[col].dtype)
+ df_null = cudf.DataFrame({c: [None] for c in col_group})
+ for c in col_group:
+ df_null[c] = df_null[c].astype(df[c].dtype)
df_null.to_parquet(path, write_index=False, compression=None)
del df
return path
@@ -179,7 +223,7 @@ def _finish_labels(paths, cols):
def _groupby_to_disk(
ddf,
write_func,
- cols,
+ col_groups,
agg_cols,
agg_list,
out_path,
@@ -187,19 +231,30 @@ def _groupby_to_disk(
tree_width,
on_host,
stat_name="categories",
+ concat_groups=False,
+ name_sep="_",
):
- if not cols:
+ if not col_groups:
return {}
+ if concat_groups:
+ if agg_list and agg_list != ["count"]:
+ raise ValueError("Cannot use concat_groups=True with aggregations other than count")
+ if agg_cols:
+ raise ValueError("Cannot aggregate continuous-column stats with concat_groups=True")
+
# Update tree_width
- if tree_width is None:
- tree_width = {c: 8 for c in cols}
- elif isinstance(tree_width, int):
- tree_width = {c: tree_width for c in cols}
- else:
- for col in cols:
- if col not in tree_width:
- tree_width[col] = 8
+ tw = {}
+ for col in col_groups:
+ col = [col] if isinstance(col, str) else col
+ col_str = _make_name(*col, sep=name_sep)
+ if tree_width is None:
+ tw[col_str] = 8
+ elif isinstance(tree_width, int):
+ tw[col_str] = tree_width
+ else:
+ tw[col_str] = tree_width.get(col_str, None) or 8
+ tree_width = tw
# Make dedicated output directory for the categories
fs = get_fs_token_paths(out_path)[0]
@@ -207,7 +262,7 @@ def _groupby_to_disk(
fs.mkdirs(out_path, exist_ok=True)
dsk = {}
- token = tokenize(ddf, cols, out_path, freq_limit, tree_width, on_host)
+ token = tokenize(ddf, col_groups, out_path, freq_limit, tree_width, on_host)
level_1_name = "level_1-" + token
split_name = "split-" + token
level_2_name = "level_2-" + token
@@ -217,20 +272,28 @@ def _groupby_to_disk(
dsk[(level_1_name, p)] = (
_top_level_groupby,
(ddf._name, p),
- cols,
+ col_groups,
tree_width,
agg_cols,
("std" in agg_list or "var" in agg_list),
on_host,
+ concat_groups,
+ name_sep,
)
k = 0
- for c, col in enumerate(cols):
- for s in range(tree_width[col]):
+ for c, col in enumerate(col_groups):
+ col = [col] if isinstance(col, str) else col
+ col_str = _make_name(*col, sep=name_sep)
+ for s in range(tree_width[col_str]):
dsk[(split_name, p, c, s)] = (getitem, (level_1_name, p), k)
k += 1
- for c, col in enumerate(cols):
- for s in range(tree_width[col]):
+ col_groups_str = []
+ for c, col in enumerate(col_groups):
+ col = [col] if isinstance(col, str) else col
+ col_str = _make_name(*col, sep=name_sep)
+ col_groups_str.append(col_str)
+ for s in range(tree_width[col_str]):
dsk[(level_2_name, c, s)] = (
_mid_level_groupby,
[(split_name, p, c, s) for p in range(ddf.npartitions)],
@@ -239,27 +302,41 @@ def _groupby_to_disk(
agg_list,
freq_limit,
on_host,
+ concat_groups,
+ name_sep,
)
dsk[(level_3_name, c)] = (
write_func,
- [(level_2_name, c, s) for s in range(tree_width[col])],
+ [(level_2_name, c, s) for s in range(tree_width[col_str])],
out_path,
col,
on_host,
+ concat_groups,
+ name_sep,
)
dsk[finalize_labels_name] = (
_finish_labels,
- [(level_3_name, c) for c, col in enumerate(cols)],
- cols,
+ [(level_3_name, c) for c, col in enumerate(col_groups)],
+ col_groups_str,
)
graph = HighLevelGraph.from_collections(finalize_labels_name, dsk, dependencies=[ddf])
return graph, finalize_labels_name
def _category_stats(
- ddf, cols, agg_cols, agg_list, out_path, freq_limit, tree_width, on_host, stat_name="categories"
+ ddf,
+ col_groups,
+ agg_cols,
+ agg_list,
+ out_path,
+ freq_limit,
+ tree_width,
+ on_host,
+ stat_name="categories",
+ concat_groups=False,
+ name_sep="_",
):
# Check if we only need categories
if agg_cols == [] and agg_list == []:
@@ -267,7 +344,7 @@ def _category_stats(
return _groupby_to_disk(
ddf,
_write_uniques,
- cols,
+ col_groups,
agg_cols,
agg_list,
out_path,
@@ -275,6 +352,8 @@ def _category_stats(
tree_width,
on_host,
stat_name=stat_name,
+ concat_groups=concat_groups,
+ name_sep=name_sep,
)
# Otherwise, getting category-statistics
@@ -285,7 +364,7 @@ def _category_stats(
return _groupby_to_disk(
ddf,
_write_gb_stats,
- cols,
+ col_groups,
agg_cols,
agg_list,
out_path,
@@ -293,40 +372,51 @@ def _category_stats(
tree_width,
on_host,
stat_name=stat_name,
+ concat_groups=concat_groups,
+ name_sep=name_sep,
)
-def _encode(name, path, gdf, cat_cache, na_sentinel=-1, freq_threshold=0):
+def _encode(name, storage_name, path, gdf, cat_cache, na_sentinel=-1, freq_threshold=0):
value = None
+ selection_l = name if isinstance(name, list) else [name]
+ selection_r = name if isinstance(name, list) else [storage_name]
if path:
if cat_cache is not None:
- cat_cache = cat_cache if isinstance(cat_cache, str) else cat_cache.get(name, "disk")
+ cat_cache = (
+ cat_cache if isinstance(cat_cache, str) else cat_cache.get(storage_name, "disk")
+ )
if len(gdf):
with get_worker_cache("cats") as cache:
value = fetch_table_data(
- cache, path, columns=[name], cache=cat_cache, cats_only=True
+ cache, path, columns=selection_r, cache=cat_cache, cats_only=True
)
else:
- value = cudf.io.read_parquet(path, index=False, columns=[name])
+ value = cudf.io.read_parquet(path, index=False, columns=selection_r)
value.index.name = "labels"
value.reset_index(drop=False, inplace=True)
- vals = gdf[name].copy(deep=False)
if value is None:
- value = cudf.DataFrame({name: [None]})
- value[name] = value[name].astype(vals.dtype)
+ value = cudf.DataFrame()
+ for c in selection_r:
+ typ = gdf[selection_l[0]].dtype if len(selection_l) == 1 else gdf[c].dtype
+ value[c] = cudf.Series([None], dtype=typ)
value.index.name = "labels"
value.reset_index(drop=False, inplace=True)
if freq_threshold > 0:
- codes = cudf.DataFrame({name: vals.copy(), "order": cp.arange(len(vals))})
- codes = codes.merge(value, on=name, how="left").sort_values("order")["labels"]
+ codes = cudf.DataFrame({"order": cp.arange(len(gdf))})
+ for c in selection_l:
+ codes[c] = gdf[c].copy()
+ codes = codes.merge(
+ value, left_on=selection_l, right_on=selection_r, how="left"
+ ).sort_values("order")["labels"]
codes.fillna(na_sentinel, inplace=True)
return codes.values
else:
# Use `searchsorted` if we are using a "full" encoding
- labels = value[name].searchsorted(vals, side="left", na_position="first")
- labels[labels >= len(value[name])] = na_sentinel
+ labels = value[selection_r].searchsorted(gdf[selection_l], side="left", na_position="first")
+ labels[labels >= len(value[selection_r])] = na_sentinel
return labels
@@ -337,3 +427,19 @@ def _read_groupby_stat_df(path, name, cat_cache):
if cache:
return fetch_table_data(cache, path, cache=cat_cache)
return cudf.io.read_parquet(path, index=False)
+
+
+def _get_multicolumn_names(column_groups, gdf_columns, name_sep):
+ cat_names = []
+ multi_col_group = {}
+ for col_group in column_groups:
+ if isinstance(col_group, list):
+ name = _make_name(*col_group, sep=name_sep)
+ if name not in cat_names:
+ cat_names.append(name)
+ # TODO: Perhaps we should check that all columns from the group
+ # are in gdf here?
+ multi_col_group[name] = col_group
+ elif col_group in gdf_columns:
+ cat_names.append(col_group)
+ return cat_names, multi_col_group
diff --git a/nvtabular/io.py b/nvtabular/io.py
--- a/nvtabular/io.py
+++ b/nvtabular/io.py
@@ -329,12 +329,10 @@ def add_data(self, gdf):
gdf.scatter_by_map(ind, map_size=self.num_out_files, keep_index=False)
):
self.num_samples[x] += len(group)
- # It seems that the `copy()` operations here are necessary
- # (test_io.py::test_mulifile_parquet fails otherwise)...
if self.num_threads > 1:
- self.queue.put((x, group.copy()))
+ self.queue.put((x, group))
else:
- self._write_table(x, group.copy())
+ self._write_table(x, group)
# wait for all writes to finish before exiting
# (so that we aren't using memory)
diff --git a/nvtabular/ops.py b/nvtabular/ops.py
--- a/nvtabular/ops.py
+++ b/nvtabular/ops.py
@@ -17,6 +17,7 @@
import cupy
import numpy as np
from cudf._lib.nvtx import annotate
+from dask.core import flatten
from dask.delayed import Delayed
from nvtabular import categorify as nvt_cat
@@ -470,7 +471,7 @@ class Normalize(DFOperator):
@property
def req_stats(self):
- return [Moments()]
+ return [Moments(columns=self.columns)]
@annotate("Normalize_op", color="darkgreen", domain="nvt_python")
def op_logic(self, gdf: cudf.DataFrame, target_columns: list, stats_context=None):
@@ -606,7 +607,7 @@ def op_logic(self, gdf: cudf.DataFrame, target_columns: list, stats_context=None
return new_gdf
-class CategoryStatistics(StatOperator):
+class GroupbyStatistics(StatOperator):
"""
Uses groupby aggregation to determine the unique groups of a categorical
feature and calculates the desired statistics of requested continuous
@@ -624,9 +625,16 @@ class CategoryStatistics(StatOperator):
that "count" corresponds to the group itself, while all
other statistics correspond to a specific continuous column.
Supported statistics include ["count", "sum", "mean", "std", "var"].
- columns : list of str, default None
- Categorical columns to collect statistics for. If None,
- the operation will target all known categorical columns.
+ columns : list of str or list(str), default None
+ Categorical columns (or "column groups") to collect statistics for.
+ If None, the operation will target all known categorical columns.
+ concat_groups : bool, default False
+ Applies only if there are list elements in the ``columns`` input. If True,
+ the values within these column groups will be concatenated, and the
+ new (temporary) columns will be used to perform the groupby. The purpose of
+ this option is to enable multiple columns to be label-encoded jointly.
+ (see Categorify). Note that this option is only allowed for the "count"
+ statistics (with cont_names == None).
tree_width : dict or int, optional
Tree width of the hash-based groupby reduction for each categorical
column. High-cardinality columns may require a large `tree_width`,
@@ -638,12 +646,15 @@ class CategoryStatistics(StatOperator):
parquet format.
freq_threshold : int, default 0
Categories with a `count` statistic less than this number will
- be omitted from the `CategoryStatistics` output.
+ be omitted from the `GroupbyStatistics` output.
on_host : bool, default True
Whether to convert cudf data to pandas between tasks in the hash-based
groupby reduction. The extra host <-> device data movement can reduce
performance. However, using `on_host=True` typically improves stability
(by avoiding device-level memory pressure).
+ name_sep : str, default "_"
+ String separator to use between concatenated column names
+ for multi-column groups.
"""
def __init__(
@@ -656,8 +667,18 @@ def __init__(
on_host=True,
freq_threshold=None,
stat_name=None,
+ concat_groups=False,
+ name_sep="_",
):
- super(CategoryStatistics, self).__init__(columns)
+ # Set column_groups if the user has passed in a list of columns
+ self.column_groups = None
+ if isinstance(columns, str):
+ columns = [columns]
+ if isinstance(columns, list):
+ self.column_groups = columns
+ columns = list(set(flatten(columns, container=list)))
+
+ super(GroupbyStatistics, self).__init__(columns)
self.cont_names = cont_names or []
self.stats = stats or []
self.categories = {}
@@ -666,15 +687,16 @@ def __init__(
self.freq_threshold = freq_threshold or 0
self.out_path = out_path or "./"
self.stat_name = stat_name or "categories"
- self.op_name = "CategoryStatistics-" + self.stat_name
+ self.op_name = "GroupbyStatistics-" + self.stat_name
+ self.concat_groups = concat_groups
+ self.name_sep = name_sep
@property
def _id(self):
return str(self.op_name)
def stat_logic(self, ddf, columns_ctx, input_cols, target_cols):
- cols = self.get_columns(columns_ctx, input_cols, target_cols)
-
+ col_groups = self.column_groups or self.get_columns(columns_ctx, input_cols, target_cols)
supported_ops = ["count", "sum", "mean", "std", "var"]
for op in self.stats:
if op not in supported_ops:
@@ -684,7 +706,7 @@ def stat_logic(self, ddf, columns_ctx, input_cols, target_cols):
agg_list = self.stats
dsk, key = nvt_cat._category_stats(
ddf,
- cols,
+ col_groups,
agg_cols,
agg_list,
self.out_path,
@@ -692,6 +714,8 @@ def stat_logic(self, ddf, columns_ctx, input_cols, target_cols):
self.tree_width,
self.on_host,
stat_name=self.stat_name,
+ concat_groups=self.concat_groups,
+ name_sep=self.name_sep,
)
return Delayed(key, dsk)
@@ -711,11 +735,11 @@ def clear(self):
return
-class GroupBy(DFOperator):
+class JoinGroupby(DFOperator):
"""
One of the ways to create new features is to calculate
- the basic statistics of the data that is grouped by a categorical
- feature. This operator groups the data by the given categorical
+ the basic statistics of the data that is grouped by categorical
+ features. This operator groups the data by the given categorical
feature(s) and calculates the desired statistics of requested continuous
features (along with the count of rows in each group). The aggregated
statistics are merged with the data (by joining on the desired
@@ -735,19 +759,22 @@ class GroupBy(DFOperator):
that "count" corresponds to the group itself, while all
other statistics correspond to a specific continuous column.
Supported statistics include ["count", "sum", "mean", "std", "var"].
- columns : list of str, default None
- Categorical columns to target for this operation. If None,
- the operation will target all known categorical columns.
+ columns : list of str or list(str), default None
+ Categorical columns (or multi-column "groups") to target for this op.
+ If None, the operation will target all known categorical columns.
preprocessing : bool, default True
Sets if this is a pre-processing operation or not
replace : bool, default False
This parameter is ignored
tree_width : dict or int, optional
- Passed to `CategoryStatistics` dependency.
+ Passed to `GroupbyStatistics` dependency.
out_path : str, optional
- Passed to `CategoryStatistics` dependency.
+ Passed to `GroupbyStatistics` dependency.
on_host : bool, default True
- Passed to `CategoryStatistics` dependency.
+ Passed to `GroupbyStatistics` dependency.
+ name_sep : str, default "_"
+ String separator to use between concatenated column names
+ for multi-column groups.
"""
default_in = CAT
@@ -764,7 +791,22 @@ def __init__(
cat_cache="host",
out_path=None,
on_host=True,
+ name_sep="_",
):
+ self.column_groups = None
+ self.storage_name = {}
+ self.name_sep = name_sep
+ if isinstance(columns, str):
+ columns = [columns]
+ if isinstance(columns, list):
+ self.column_groups = columns
+ columns = list(set(flatten(columns, container=list)))
+ for group in self.column_groups:
+ if isinstance(group, list) and len(group) > 1:
+ name = nvt_cat._make_name(*group, sep=self.name_sep)
+ for col in group:
+ self.storage_name[col] = name
+
super().__init__(columns=columns, preprocessing=preprocessing, replace=False)
self.cont_names = cont_names
self.stats = stats
@@ -777,26 +819,45 @@ def __init__(
@property
def req_stats(self):
return [
- CategoryStatistics(
- columns=self.columns,
+ GroupbyStatistics(
+ columns=self.column_groups or self.columns,
+ concat_groups=False,
cont_names=self.cont_names,
stats=self.stats,
tree_width=self.tree_width,
out_path=self.out_path,
on_host=self.on_host,
stat_name=self.stat_name,
+ name_sep=self.name_sep,
)
]
def op_logic(self, gdf: cudf.DataFrame, target_columns: list, stats_context=None):
+
new_gdf = cudf.DataFrame()
tmp = "__tmp__" # Temporary column for sorting
gdf[tmp] = cupy.arange(len(gdf), dtype="int32")
- for col, path in stats_context[self.stat_name].items():
- stat_gdf = nvt_cat._read_groupby_stat_df(path, col, self.cat_cache)
- tran_gdf = gdf[[col, tmp]].merge(stat_gdf, on=col, how="left")
+ if self.column_groups:
+ cat_names, multi_col_group = nvt_cat._get_multicolumn_names(
+ self.column_groups, gdf.columns, self.name_sep
+ )
+ else:
+ multi_col_group = {}
+ cat_names = [name for name in target_columns if name in gdf.columns]
+
+ for name in cat_names:
+ storage_name = self.storage_name.get(name, name)
+ name = multi_col_group.get(name, name)
+ path = stats_context[self.stat_name][storage_name]
+ selection_l = name.copy() if isinstance(name, list) else [name]
+ selection_r = name if isinstance(name, list) else [storage_name]
+
+ stat_gdf = nvt_cat._read_groupby_stat_df(path, storage_name, self.cat_cache)
+ tran_gdf = gdf[selection_l + [tmp]].merge(
+ stat_gdf, left_on=selection_l, right_on=selection_r, how="left"
+ )
tran_gdf = tran_gdf.sort_values(tmp)
- tran_gdf.drop(columns=[col, tmp], inplace=True)
+ tran_gdf.drop(columns=selection_l + [tmp], inplace=True)
new_cols = [c for c in tran_gdf.columns if c not in new_gdf.columns]
new_gdf[new_cols] = tran_gdf[new_cols].reset_index(drop=True)
gdf.drop(columns=[tmp], inplace=True)
@@ -937,20 +998,29 @@ class Categorify(DFOperator):
Categories with a count/frequency below this threshold will be
ommited from the encoding and corresponding data will be mapped
to the "null" category.
- columns : list of str, default None
- Categorical columns to target for this operation. If None,
- the operation will target all known categorical columns.
+ columns : list of str or list(str), default None
+ Categorical columns (or multi-column "groups") to target for this op.
+ If None, the operation will target all known categorical columns.
+ If columns contains 1+ list(str) elements, the columns within each
+ list/group will be encoded according to the `encode_type` setting.
+ encode_type : {"joint", "combo"}, default "joint"
+ If "joint", the columns within any multi-column group will be
+ jointly encoded. If "combo", the combination of values will be
+ encoded as a new column. Note that replacement is not allowed for
+ "combo", because the same column name can be included in
+ multiple groups.
preprocessing : bool, default True
Sets if this is a pre-processing operation or not
replace : bool, default True
- Replaces the transformed column with the original input
- if set Yes
+ Replaces the transformed column with the original input.
+ Note that this does not apply to multi-column groups with
+ `encoded_type="combo"`.
tree_width : dict or int, optional
- Passed to `CategoryStatistics` dependency.
+ Passed to `GroupbyStatistics` dependency.
out_path : str, optional
- Passed to `CategoryStatistics` dependency.
+ Passed to `GroupbyStatistics` dependency.
on_host : bool, default True
- Passed to `CategoryStatistics` dependency.
+ Passed to `GroupbyStatistics` dependency.
na_sentinel : default 0
Label to use for null-category mapping
cat_cache : {"device", "host", "disk"} or dict
@@ -961,6 +1031,9 @@ class Categorify(DFOperator):
dtype :
If specified, categorical labels will be cast to this dtype
after encoding is performed.
+ name_sep : str, default "_"
+ String separator to use between concatenated column names
+ for multi-column groups.
"""
default_in = CAT
@@ -978,7 +1051,71 @@ def __init__(
cat_cache="host",
dtype=None,
on_host=True,
+ encode_type="joint",
+ name_sep="_",
):
+
+ # We need to handle three types of encoding here:
+ #
+ # (1) Conventional encoding. There are no multi-column groups. So,
+ # each categorical column is separately transformed into a new
+ # "encoded" column (1-to-1). The unique values are calculated
+ # separately for each column.
+ #
+ # (2) Multi-column "Joint" encoding (there are multi-column groups
+ # in `columns` and `encode_type="joint"`). Still a
+ # 1-to-1 transofrmation of categorical columns. However,
+ # we concatenate column groups to determine uniques (rather
+ # than getting uniques of each categorical column separately).
+ #
+ # (3) Multi-column "Group" encoding (there are multi-column groups
+ # in `columns` and `encode_type="combo"`). No longer
+ # a 1-to-1 transformation of categorical columns. Each column
+ # group will be transformed to a single "encoded" column. This
+ # means the unique "values" correspond to unique combinations.
+ # Since the same column may be included in multiple groups,
+ # replacement is not allowed for this transform.
+
+ # Set column_groups if the user has passed in a list of columns.
+ # The purpose is to capture multi-column groups. If the user doesn't
+ # specify `columns`, there are no multi-column groups to worry about.
+ self.column_groups = None
+ self.name_sep = name_sep
+
+ # For case (2), we need to keep track of the multi-column group name
+ # that will be used for the joint encoding of each column in that group.
+ # For case (3), we also use this "storage name" to signify the name of
+ # the file with the required "combination" groupby statistics.
+ self.storage_name = {}
+
+ if isinstance(columns, str):
+ columns = [columns]
+ if isinstance(columns, list):
+ # User passed in a list of column groups. We need to figure out
+ # if this list contains any multi-column groups, and if there
+ # are any (obvious) problems with these groups
+ self.column_groups = columns
+ columns = list(set(flatten(columns, container=list)))
+ columns_all = list(flatten(columns, container=list))
+ if sorted(columns_all) != sorted(columns) and encode_type == "joint":
+ # If we are doing "joint" encoding, there must be unique mapping
+ # between input column names and column groups. Otherwise, more
+ # than one unique-value table could be used to encode the same
+ # column.
+ raise ValueError("Same column name included in multiple groups.")
+ for group in self.column_groups:
+ if isinstance(group, list) and len(group) > 1:
+ # For multi-column groups, we concatenate column names
+ # to get the "group" name.
+ name = nvt_cat._make_name(*group, sep=self.name_sep)
+ for col in group:
+ self.storage_name[col] = name
+
+ # Only support two kinds of multi-column encoding
+ if encode_type not in ("joint", "combo"):
+ raise ValueError(f"encode_type={encode_type} not supported.")
+
+ # Other self-explanatory intialization
super().__init__(columns=columns, preprocessing=preprocessing, replace=replace)
self.freq_threshold = freq_threshold
self.out_path = out_path or "./"
@@ -988,12 +1125,14 @@ def __init__(
self.on_host = on_host
self.cat_cache = cat_cache
self.stat_name = "categories"
+ self.encode_type = encode_type
@property
def req_stats(self):
return [
- CategoryStatistics(
- columns=self.columns,
+ GroupbyStatistics(
+ columns=self.column_groups or self.columns,
+ concat_groups=self.encode_type == "joint",
cont_names=[],
stats=[],
freq_threshold=self.freq_threshold,
@@ -1001,23 +1140,57 @@ def req_stats(self):
out_path=self.out_path,
on_host=self.on_host,
stat_name=self.stat_name,
+ name_sep=self.name_sep,
)
]
@annotate("Categorify_op", color="darkgreen", domain="nvt_python")
- def op_logic(self, gdf: cudf.DataFrame, target_columns: list, stats_context={}):
- cat_names = target_columns
- new_gdf = cudf.DataFrame()
- if not cat_names:
- return gdf
- cat_names = [name for name in cat_names if name in gdf.columns]
- new_cols = []
+ def apply_op(
+ self,
+ gdf: cudf.DataFrame,
+ columns_ctx: dict,
+ input_cols,
+ target_cols=["base"],
+ stats_context={},
+ ):
+ new_gdf = gdf.copy(deep=False)
+ target_columns = self.get_columns(columns_ctx, input_cols, target_cols)
+ if not target_columns:
+ return new_gdf
+
+ if self.column_groups and not self.encode_type == "joint":
+ # Case (3) - We want to track multi- and single-column groups separately
+ # when we are NOT performing a joint encoding. This is because
+ # there is not a 1-to-1 mapping for columns in multi-col groups.
+ # We use `multi_col_group` to preserve the list format of
+ # multi-column groups only, and use `cat_names` to store the
+ # string representation of both single- and multi-column groups.
+ #
+ cat_names, multi_col_group = nvt_cat._get_multicolumn_names(
+ self.column_groups, gdf.columns, self.name_sep
+ )
+ else:
+ # Case (1) & (2) - Simple 1-to-1 mapping
+ multi_col_group = {}
+ cat_names = [name for name in target_columns if name in gdf.columns]
+
+ # Encode each column-group separately
for name in cat_names:
new_col = f"{name}_{self._id}"
- new_cols.append(new_col)
- path = stats_context[self.stat_name][name]
+
+ # Use the column-group `list` directly (not the string name)
+ use_name = multi_col_group.get(name, name)
+ # Storage name may be different than group for case (2)
+ # Only use the "aliased" `storage_name` if we are dealing with
+ # a multi-column group, or if we are doing joint encoding
+ if use_name != name or self.encode_type == "joint":
+ storage_name = self.storage_name.get(name, name)
+ else:
+ storage_name = name
+ path = stats_context[self.stat_name][storage_name]
new_gdf[new_col] = nvt_cat._encode(
- name,
+ use_name,
+ storage_name,
path,
gdf,
self.cat_cache,
@@ -1026,6 +1199,15 @@ def op_logic(self, gdf: cudf.DataFrame, target_columns: list, stats_context={}):
)
if self.dtype:
new_gdf[new_col] = new_gdf[new_col].astype(self.dtype, copy=False)
+
+ # Deal with replacement
+ if self.replace:
+ for name in cat_names:
+ new_col = f"{name}_{self._id}"
+ new_gdf[name] = new_gdf[new_col]
+ new_gdf.drop(columns=[new_col], inplace=True)
+
+ self.update_columns_ctx(columns_ctx, input_cols, new_gdf.columns, target_columns)
return new_gdf
| diff --git a/tests/unit/test_dask_nvt.py b/tests/unit/test_dask_nvt.py
--- a/tests/unit/test_dask_nvt.py
+++ b/tests/unit/test_dask_nvt.py
@@ -147,7 +147,7 @@ def test_dask_groupby_stats(client, tmpdir, datasets, part_mem_fraction):
)
processor.add_preprocess(
- ops.GroupBy(cont_names=cont_names, stats=["count", "sum", "std"], out_path=str(tmpdir))
+ ops.JoinGroupby(cont_names=cont_names, stats=["count", "sum", "std"], out_path=str(tmpdir))
)
processor.finalize()
@@ -205,7 +205,7 @@ def test_cats_and_groupby_stats(client, tmpdir, datasets, part_mem_fraction, use
processor.add_preprocess(ops.Categorify(out_path=str(tmpdir), freq_threshold=10, on_host=True))
processor.add_cat_feature(
- ops.GroupBy(cont_names=cont_names, stats=["count", "sum"], out_path=str(tmpdir))
+ ops.JoinGroupby(cont_names=cont_names, stats=["count", "sum"], out_path=str(tmpdir))
)
processor.finalize()
diff --git a/tests/unit/test_ops.py b/tests/unit/test_ops.py
--- a/tests/unit/test_ops.py
+++ b/tests/unit/test_ops.py
@@ -14,9 +14,11 @@
# limitations under the License.
#
import math
+import os
import cudf
import numpy as np
+import pandas as pd
import pytest
from cudf.tests.utils import assert_eq
from pandas.api.types import is_integer_dtype
@@ -99,7 +101,7 @@ def test_encoder(tmpdir, df, dataset, gpu_memory_frac, engine, op_columns):
cont_names = ["x", "y", "id"]
label_name = ["label"]
- encoder = ops.CategoryStatistics(columns=op_columns)
+ encoder = ops.GroupbyStatistics(columns=op_columns)
config = nvt.workflow.get_new_config()
config["PP"]["categorical"] = [encoder]
@@ -118,6 +120,37 @@ def test_encoder(tmpdir, df, dataset, gpu_memory_frac, engine, op_columns):
assert cats1.tolist() == [None] + cats_expected1.tolist()
[email protected]("engine", ["parquet"])
[email protected]("groups", [[["name-cat", "name-string"], "name-cat"], "name-string"])
[email protected]("concat_groups", [True, False])
+def test_multicolumn_cats(tmpdir, df, dataset, engine, groups, concat_groups):
+ cat_names = ["name-cat", "name-string"]
+ cont_names = ["x", "y", "id"]
+ label_name = ["label"]
+
+ encoder = ops.GroupbyStatistics(
+ columns=groups,
+ cont_names=None if concat_groups else ["x"],
+ stats=None if concat_groups else ["count", "mean"],
+ out_path=str(tmpdir),
+ concat_groups=concat_groups,
+ )
+ config = nvt.workflow.get_new_config()
+ config["PP"]["categorical"] = [encoder]
+
+ processor = nvt.Workflow(
+ cat_names=cat_names, cont_names=cont_names, label_name=label_name, config=config
+ )
+ processor.update_stats(dataset)
+
+ groups = [groups] if isinstance(groups, str) else groups
+ for group in groups:
+ group = [group] if isinstance(group, str) else group
+ prefix = "unique." if concat_groups else "cat_stats."
+ fn = prefix + "_".join(group) + ".parquet"
+ cudf.read_parquet(os.path.join(tmpdir, "categories", fn))
+
+
@pytest.mark.parametrize("gpu_memory_frac", [0.01, 0.1])
@pytest.mark.parametrize("engine", ["parquet", "csv", "csv-no-header"])
@pytest.mark.parametrize("op_columns", [["x"], None])
@@ -230,7 +263,7 @@ def test_normalize(tmpdir, df, dataset, gpu_memory_frac, engine, op_columns):
label_name = ["label"]
config = nvt.workflow.get_new_config()
- config["PP"]["continuous"] = [ops.Moments()]
+ config["PP"]["continuous"] = [ops.Moments(columns=op_columns)]
processor = nvtabular.Workflow(
cat_names=cat_names, cont_names=cont_names, label_name=label_name, config=config
@@ -242,7 +275,7 @@ def test_normalize(tmpdir, df, dataset, gpu_memory_frac, engine, op_columns):
columns_ctx = {}
columns_ctx["continuous"] = {}
- columns_ctx["continuous"]["base"] = cont_names
+ columns_ctx["continuous"]["base"] = op_columns or cont_names
new_gdf = op.apply_op(df, columns_ctx, "continuous", stats_context=processor.stats)
df["x"] = (df["x"] - processor.stats["means"]["x"]) / processor.stats["stds"]["x"]
@@ -518,6 +551,110 @@ def test_lambdaop(tmpdir, df, dataset, gpu_memory_frac, engine, client):
assert np.sum(df_pp["x_mul0_add100"] < 100) == 0
[email protected]("groups", [[["Author", "Engaging User"]], None])
[email protected]("kind", ["joint", "combo"])
+def test_categorify_multi(tmpdir, groups, kind):
+
+ df = pd.DataFrame(
+ {
+ "Author": ["User_A", "User_E", "User_B", "User_C"],
+ "Engaging User": ["User_B", "User_B", "User_A", "User_D"],
+ "Post": [1, 2, 3, 4],
+ }
+ )
+
+ cat_names = ["Author", "Engaging User"]
+ cont_names = []
+ label_name = ["Post"]
+
+ processor = nvt.Workflow(cat_names=cat_names, cont_names=cont_names, label_name=label_name)
+
+ processor.add_preprocess(ops.Categorify(columns=groups, out_path=str(tmpdir), encode_type=kind))
+ processor.finalize()
+ processor.apply(nvt.Dataset(df), output_format=None)
+ df_out = processor.get_ddf().compute(scheduler="synchronous")
+
+ if groups:
+ if kind == "joint":
+ # Columns are encoded jointly
+ assert df_out["Author"].to_arrow().to_pylist() == [1, 5, 2, 3]
+ assert df_out["Engaging User"].to_arrow().to_pylist() == [2, 2, 1, 4]
+ else:
+ # Column combinations are encoded
+ assert df_out["Author_Engaging User"].to_arrow().to_pylist() == [1, 4, 2, 3]
+ else:
+ # Columns are encoded independently
+ assert df_out["Author"].to_arrow().to_pylist() == [1, 4, 2, 3]
+ assert df_out["Engaging User"].to_arrow().to_pylist() == [2, 2, 1, 3]
+
+
+def test_categorify_multi_combo(tmpdir):
+
+ groups = [["Author", "Engaging User"], ["Author"], "Engaging User"]
+ kind = "combo"
+ df = pd.DataFrame(
+ {
+ "Author": ["User_A", "User_E", "User_B", "User_C"],
+ "Engaging User": ["User_B", "User_B", "User_A", "User_D"],
+ "Post": [1, 2, 3, 4],
+ }
+ )
+
+ cat_names = ["Author", "Engaging User"]
+ cont_names = []
+ label_name = ["Post"]
+
+ processor = nvt.Workflow(cat_names=cat_names, cont_names=cont_names, label_name=label_name)
+
+ processor.add_preprocess(ops.Categorify(columns=groups, out_path=str(tmpdir), encode_type=kind))
+ processor.finalize()
+ processor.apply(nvt.Dataset(df), output_format=None)
+ df_out = processor.get_ddf().compute(scheduler="synchronous")
+
+ # Column combinations are encoded
+ assert df_out["Author"].to_arrow().to_pylist() == [1, 4, 2, 3]
+ assert df_out["Engaging User"].to_arrow().to_pylist() == [2, 2, 1, 3]
+ assert df_out["Author_Engaging User"].to_arrow().to_pylist() == [1, 4, 2, 3]
+
+
[email protected]("groups", [[["Author", "Engaging-User"]], "Author"])
+def test_joingroupby_multi(tmpdir, groups):
+
+ df = pd.DataFrame(
+ {
+ "Author": ["User_A", "User_A", "User_A", "User_B"],
+ "Engaging-User": ["User_B", "User_B", "User_C", "User_C"],
+ "Cost": [100.0, 200.0, 300.0, 400.0],
+ "Post": [1, 2, 3, 4],
+ }
+ )
+
+ cat_names = ["Author", "Engaging-User"]
+ cont_names = ["Cost"]
+ label_name = ["Post"]
+
+ processor = nvt.Workflow(cat_names=cat_names, cont_names=cont_names, label_name=label_name)
+
+ processor.add_preprocess(
+ ops.JoinGroupby(columns=groups, out_path=str(tmpdir), stats=["sum"], cont_names=["Cost"])
+ )
+ processor.finalize()
+ processor.apply(nvt.Dataset(df), output_format=None)
+ df_out = processor.get_ddf().compute(scheduler="synchronous")
+
+ if isinstance(groups, list):
+ # Join on ["Author", "Engaging-User"]
+ assert df_out["Author_Engaging-User_Cost_sum"].to_arrow().to_pylist() == [
+ 300.0,
+ 300.0,
+ 300.0,
+ 400.0,
+ ]
+ else:
+ # Join on ["Author"]
+ assert df_out["Author_Cost_sum"].to_arrow().to_pylist() == [600.0, 600.0, 600.0, 400.0]
+
+
@pytest.mark.parametrize("engine", ["parquet"])
@pytest.mark.parametrize("kind_ext", ["cudf", "pandas", "arrow", "parquet", "csv"])
@pytest.mark.parametrize("cache", ["host", "device"])
| [OP] Categorify multi-column support
**Is your feature request related to a problem? Please describe.**
Categorify is applied only to single columns - There are use cases, when a user want to apply Categorify to multiple columns.
For example in social media, where a user can write a post and another user can engage with a post.
The dataset has two columns:
- author: userid
- engaging user: userid
| Author | Engaging User | Post |
| ------------- | ------------- | ------------- |
| User_A | User_B | 1 |
| User_A | User_C | 2 |
| User_B | User_A | 3 |
| User_C | User_D | 4 |
Both columns have userids, but I do not want to categorify them independently.
The result should be
| Author | Engaging User | Post |
| ------------- | ------------- | ------------- |
| 0 | 1 | 1 |
| 0 | 2 | 2 |
| 1 | 0 | 3 |
| 2 | 3 | 4 |
**Describe the solution you'd like**
I want to categorify both column, together.
I concat Author and Engaging User, factorize them and then apply the mapping to both columns
**Additional context**
Pseudocode:
```
uid = pd.factorize( np.concatenate( ( df['a_user_id'].values, df['b_user_id'].values ) ) )[0]
df['a_user_id'] = uid[:df.shape[0] ]
df['b_user_id'] = uid[df.shape[0]:]
```
| 2020-07-30T21:36:42 |
|
NVIDIA-Merlin/NVTabular | 190 | NVIDIA-Merlin__NVTabular-190 | [
"149"
] | e64cfda2d1e7229d8beddcdc2d75a888a90d2459 | diff --git a/nvtabular/torch_dataloader.py b/nvtabular/torch_dataloader.py
--- a/nvtabular/torch_dataloader.py
+++ b/nvtabular/torch_dataloader.py
@@ -29,7 +29,6 @@
class TensorItr:
"""
Tensor dataset, for data already in tensor format.
- (see preproc::ds_to_tensor)
Parameters
-----------
@@ -44,7 +43,7 @@ def __init__(self, tensors, batch_size=1, pin_memory=False, shuffle=False):
self.tensors = tensors
self.batch_size = batch_size
- self.num_samples = self.tensors[0].size(0)
+ self.num_samples = self.tensors[2].size(0)
if shuffle:
self.shuffle()
@@ -60,9 +59,10 @@ def __len__(self):
def __iter__(self):
for idx in range(0, self.num_samples, self.batch_size):
- tens = [tensor[idx : idx + self.batch_size] for tensor in self.tensors]
- yield tens[0], tens[1], tens[2]
- del tens
+ yield [
+ tensor[idx : idx + self.batch_size] if tensor is not None else None
+ for tensor in self.tensors
+ ]
def shuffle(self):
idx = torch.randperm(self.num_samples, dtype=torch.int64)
@@ -349,9 +349,11 @@ def gather_indices(self):
return self.indices[start : start + per_worker]
def _to_tensor(self, gdf, dtype=None):
+ if gdf.empty:
+ return
dl_pack = self.to_dlpack(gdf)
tens = from_dlpack(dl_pack).type(dtype)
- return tens, gdf.columns, dtype
+ return tens
def create_tensors(self, gdf, cat_names=None, cont_names=None, label_names=None):
gdf_cats, gdf_conts, gdf_label = (
@@ -360,14 +362,11 @@ def create_tensors(self, gdf, cat_names=None, cont_names=None, label_names=None)
gdf[label_names],
)
del gdf
- if len(gdf_cats) > 0:
- cats = self._to_tensor(gdf_cats, torch.long)
- if len(gdf_conts) > 0:
- conts = self._to_tensor(gdf_conts, torch.float32)
- if len(gdf_label) > 0:
- label = self._to_tensor(gdf_label, torch.float32)
+ cats = self._to_tensor(gdf_cats, torch.long)
+ conts = self._to_tensor(gdf_conts, torch.float32)
+ label = self._to_tensor(gdf_label, torch.float32)
del gdf_cats, gdf_conts, gdf_label
- return [cats[0], conts[0], label[0]]
+ return [cats, conts, label]
class DLDataLoader(torch.utils.data.DataLoader):
diff --git a/nvtabular/workflow.py b/nvtabular/workflow.py
--- a/nvtabular/workflow.py
+++ b/nvtabular/workflow.py
@@ -527,11 +527,6 @@ def _set_stats(self, stats_dict):
def clear_stats(self):
self.stats = {}
- def ds_to_tensors(self, itr, apply_ops=True):
- from nvtabular.torch_dataloader import create_tensors
-
- return create_tensors(self, itr=itr, apply_ops=apply_ops)
-
def get_new_config():
"""
| diff --git a/tests/unit/test_torch_dataloader.py b/tests/unit/test_torch_dataloader.py
--- a/tests/unit/test_torch_dataloader.py
+++ b/tests/unit/test_torch_dataloader.py
@@ -41,6 +41,23 @@ def test_gpu_file_iterator_ds(df, dataset, batch, engine):
assert_eq(df_itr.reset_index(drop=True), df.reset_index(drop=True))
[email protected]("engine", ["parquet"])
+def test_empty_cols(tmpdir, df, dataset, engine):
+ # test out https://github.com/NVIDIA/NVTabular/issues/149 making sure we can iterate over
+ # empty cats/conts
+ # first with no continuous columns
+ no_conts = torch_dataloader.AsyncTensorBatchDatasetItr(
+ dataset, cats=["id"], conts=[], labels=["label"], batch_size=1
+ )
+ assert all(conts is None for _, conts, _ in no_conts)
+
+ # and with no categorical columns
+ no_cats = torch_dataloader.AsyncTensorBatchDatasetItr(
+ dataset, cats=[], conts=["x"], labels=["label"]
+ )
+ assert all(cats is None for cats, _, _ in no_cats)
+
+
@pytest.mark.parametrize("part_mem_fraction", [0.000001, 0.1])
@pytest.mark.parametrize("batch_size", [1, 10, 100])
@pytest.mark.parametrize("engine", ["parquet"])
| [BUG] proc.ds_to_tensors does not support empy category/numeric columns
**Describe the bug**
proc.ds_to_tensors expect that categorical/numerical columns are defined.
I think there are datasets,
* which does not have a categorical column - e.g. all columns are numeric.
* which does not have a numeric column - e.g. all columns are categorical
**Steps/Code to reproduce bug**
```
dataset = nvt.Dataset('./train.csv')
proc = nvt.Workflow(
cat_names=['ROLE_ROLLUP_1'],
cont_names=[],
label_name=['ACTION']
)
proc.finalize()
proc.ds_to_tensors(dataset.to_iter())
```
Throws following error:
```
/opt/conda/envs/rapids/lib/python3.6/site-packages/nvtabular/torch_dataloader.py in _get_final_cols(preproc)
154 preproc.create_final_cols()
155 cat_names = _get_embedding_order(preproc.columns_ctx["final"]["cols"]["categorical"])
--> 156 cont_names = sorted(preproc.columns_ctx["final"]["cols"]["continuous"])
157 label_name = sorted(preproc.columns_ctx["final"]["cols"]["label"])
158 return cat_names, cont_names, label_name
TypeError: 'NoneType' object is not iterable
```
If I run, then it works.
```
dataset = nvt.Dataset('./train.csv')
proc = nvt.Workflow(
cat_names=['ROLE_ROLLUP_1'],
cont_names=['ROLE_ROLLUP_1'],
label_name=['ACTION']
)
proc.finalize()
proc.ds_to_tensors(dataset.to_iter())
```
I know, that the error is not related to dataset.to_iter() - this works - it is related to proc.ds_to_tensors
**Expected behavior**
I think the expected behavior should be that it supports empty lists for categorical and numerical features
| 2020-07-31T22:27:29 |
|
NVIDIA-Merlin/NVTabular | 198 | NVIDIA-Merlin__NVTabular-198 | [
"197"
] | 5dba81f5bb83e0a67f30b34e442943b0300dce88 | diff --git a/nvtabular/io.py b/nvtabular/io.py
--- a/nvtabular/io.py
+++ b/nvtabular/io.py
@@ -295,7 +295,7 @@ def add_data(self, gdf):
# path is probably worth the (possible) minor overhead.
nrows = gdf.shape[0]
typ = np.min_scalar_type(nrows * 2)
- if self.shuffle and self.shuffle != "full":
+ if self.shuffle:
ind = cp.random.choice(cp.arange(self.num_out_files, dtype=typ), nrows)
else:
ind = cp.arange(nrows, dtype=typ)
| diff --git a/tests/unit/test_torch_dataloader.py b/tests/unit/test_torch_dataloader.py
--- a/tests/unit/test_torch_dataloader.py
+++ b/tests/unit/test_torch_dataloader.py
@@ -74,7 +74,7 @@ def test_gpu_dl(tmpdir, df, dataset, batch_size, part_mem_fraction, engine):
nvt_data = nvt.Dataset(tar_paths[0], engine="parquet", part_mem_fraction=part_mem_fraction)
data_itr = nvt.torch_dataloader.AsyncTensorBatchDatasetItr(
- nvt_data, batch_size=batch_size, cats=cat_names, conts=cont_names, labels=["label"],
+ nvt_data, batch_size=batch_size, cats=cat_names, conts=cont_names, labels=["label"]
)
columns = mycols_pq
@@ -124,11 +124,7 @@ def test_kill_dl(tmpdir, df, dataset, part_mem_fraction, engine):
os.mkdir(output_train)
processor.apply(
- dataset,
- apply_offline=True,
- record_stats=True,
- shuffle="partial",
- output_path=output_train,
+ dataset, apply_offline=True, record_stats=True, shuffle="partial", output_path=output_train
)
tar_paths = [
@@ -138,7 +134,7 @@ def test_kill_dl(tmpdir, df, dataset, part_mem_fraction, engine):
nvt_data = nvt.Dataset(tar_paths[0], engine="parquet", part_mem_fraction=part_mem_fraction)
data_itr = nvt.torch_dataloader.AsyncTensorBatchDatasetItr(
- nvt_data, cats=cat_names, conts=cont_names, labels=["label"],
+ nvt_data, cats=cat_names, conts=cont_names, labels=["label"]
)
results = {}
| [BUG] Rossmann notebook not hitting expected RMSPE (again)
**Describe the bug**
Rossmann convergence and final RMSPEs are again considerably worse than they once were.
I'm separating this out from https://github.com/NVIDIA/NVTabular/issues/146 because that issue was resolved by @rjzamora 's PR (I confirmed this, as described below).
**Steps/Code to reproduce bug**
First, rewind to @rjzamora 's PR, which (for reasons that are right now unknown) fixed https://github.com/NVIDIA/NVTabular/issues/146:
```
# Align Dask and Single-GPU Writer Logic (#160)
git checkout 7407cfd
```
Run `examples/rossmann-store-sales-preproc.ipynb`
Run `examples/rossmann-store-sales-example.ipynb`
Here we see consistent convergence and good final RMSPEs, confirming the fix.
Note: NVTabular's `Workflow` outputs are now saved in `examples/data/jp_ross`
Now fast forward to master as of 2020 08 04:
```
# [REVIEW] Async torch Dataloaders (#127)
git checkout 7935f7e
```
Note: `examples/rossmann-store-sales-preproc.ipynb` is completely unchanged from `7407cfd ` to `7935f7e`.
If we run `examples/rossmann-store-sales-example.ipynb` 3 times, we now 1. see unstable convergence and 2. obtain final RMSPEs of
TensorFlow: 25.0%, 22.3%, 22.3%
fast.ai: 29.9%, 29.1%, 21.5%
**The problem seems to do with `Workflow` processing.**
Note that the newer version of `examples/rossmann-store-sales-example.ipynb` does not use `examples/data/jp_ross` for exporting `Workflow` data, but rather `examples/data/ross_pre`.
So, we can now run this notebook exactly as is but using `7407cfd `'s `Workflow` outputs instead. This was done by inserting
```
PREPROCESS_DIR = os.path.join(DATA_DIR, 'jp_ross')
PREPROCESS_DIR_TRAIN = os.path.join(PREPROCESS_DIR, 'train')
PREPROCESS_DIR_VALID = os.path.join(PREPROCESS_DIR, 'valid')
```
right before the Training a Network section.
Now, if we rerun the notebook 3 times, we once again get stable convergence, and the final RMSPEs are
```
TensorFlow: 18.9%, 17.4%, 17.9%
fast.ai: 19.7%, 19.5%, 21.4%
```
@benfred @rjzamora @jperez999 for visibility
| I haven't thought through this issue yet, but it may be worth confirming that #196 (recently merged) doesn't fix things. We discovered that there is a bug in cudf that is resulting in bad parquet data when slicing string columns (which we are doing when split the output data between multiple files)
[**EDIT**: This does **not** seem to fix the issue for me.]
Ok – I tried, too. Same here: problem not resolved as of `5a2d53d`.
Can you use ```git bisect``` to figure out where this started to break? | 2020-08-06T04:02:25 |
NVIDIA-Merlin/NVTabular | 203 | NVIDIA-Merlin__NVTabular-203 | [
"3"
] | 6750183d3eb56bc6ec3e7e4ea579102c27f2e7b8 | diff --git a/examples/dask-nvtabular-criteo-benchmark.py b/examples/dask-nvtabular-criteo-benchmark.py
--- a/examples/dask-nvtabular-criteo-benchmark.py
+++ b/examples/dask-nvtabular-criteo-benchmark.py
@@ -114,7 +114,7 @@ def main(args):
processor = Workflow(
cat_names=cat_names, cont_names=cont_names, label_name=label_name, client=client
)
- processor.add_feature([ops.ZeroFill(), ops.LogOp()])
+ processor.add_feature([ops.FillMissing(), ops.Clip(min_value=0), ops.LogOp()])
processor.add_preprocess(
ops.Categorify(
out_path=out_path,
diff --git a/nvtabular/ops.py b/nvtabular/ops.py
--- a/nvtabular/ops.py
+++ b/nvtabular/ops.py
@@ -336,26 +336,38 @@ def clear(self):
return
-class ZeroFill(TransformOperator):
+class Clip(TransformOperator):
"""
- This operation sets negative values to zero.
-
- Although you can directly call methods of this class to
- transform your continuous features, it's typically used within a
- Workflow class.
+ This operation clips values continous values so that they are with a min/max bound.
+ For instance by setting the min value to 0, you can replace all negative values with 0.
+ This is helpful in cases where you want to log normalize values.
"""
default_in = CONT
default_out = CONT
- @annotate("ZeroFill_op", color="darkgreen", domain="nvt_python")
+ def __init__(
+ self, min_value=None, max_value=None, columns=None, preprocessing=True, replace=True
+ ):
+ if min_value is None and max_value is None:
+ raise ValueError("Must specify a min or max value to clip to")
+ super().__init__(columns=columns, preprocessing=preprocessing, replace=replace)
+ self.min_value = min_value
+ self.max_value = max_value
+
+ @annotate("Clip_op", color="darkgreen", domain="nvt_python")
def op_logic(self, gdf: cudf.DataFrame, target_columns: list, stats_context=None):
cont_names = target_columns
if not cont_names:
return gdf
- z_gdf = gdf[cont_names].fillna(0)
+
+ z_gdf = gdf[cont_names]
z_gdf.columns = [f"{col}_{self._id}" for col in z_gdf.columns]
- z_gdf[z_gdf < 0] = 0
+ if self.min_value is not None:
+ z_gdf[z_gdf < self.min_value] = self.min_value
+ if self.max_value is not None:
+ z_gdf[z_gdf > self.max_value] = self.max_value
+
return z_gdf
diff --git a/nvtabular/workflow.py b/nvtabular/workflow.py
--- a/nvtabular/workflow.py
+++ b/nvtabular/workflow.py
@@ -150,7 +150,7 @@ def add_cont_feature(self, operators):
Parameters
-----------
operators : object
- continuous objects such as ZeroFill and LogOp
+ continuous objects such as FillMissing, Clip and LogOp
"""
operators = self.op_default_check(operators, "continuous")
| diff --git a/tests/unit/test_dask_nvt.py b/tests/unit/test_dask_nvt.py
--- a/tests/unit/test_dask_nvt.py
+++ b/tests/unit/test_dask_nvt.py
@@ -75,7 +75,7 @@ def test_dask_workflow_api_dlrm(
client=client, cat_names=cat_names, cont_names=cont_names, label_name=label_name
)
- processor.add_feature([ops.ZeroFill(), ops.LogOp()])
+ processor.add_feature([ops.FillMissing(), ops.Clip(min_value=0), ops.LogOp()])
processor.add_preprocess(
ops.Categorify(
freq_threshold=freq_threshold,
diff --git a/tests/unit/test_io.py b/tests/unit/test_io.py
--- a/tests/unit/test_io.py
+++ b/tests/unit/test_io.py
@@ -119,7 +119,13 @@ def test_hugectr(
processor = nvt.Workflow(
client=client, cat_names=cat_names, cont_names=cont_names, label_name=label_names
)
- processor.add_feature([ops.ZeroFill(columns=op_columns), ops.LogOp()])
+ processor.add_feature(
+ [
+ ops.FillMissing(columns=op_columns),
+ ops.Clip(min_value=0, columns=op_columns),
+ ops.LogOp(),
+ ]
+ )
processor.add_preprocess(ops.Normalize())
processor.add_preprocess(ops.Categorify())
processor.finalize()
diff --git a/tests/unit/test_s3.py b/tests/unit/test_s3.py
--- a/tests/unit/test_s3.py
+++ b/tests/unit/test_s3.py
@@ -56,7 +56,7 @@ def test_s3_dataset(s3, paths, engine, df):
processor = nvt.Workflow(cat_names=cat_names, cont_names=cont_names, label_name=label_name,)
- processor.add_feature([ops.ZeroFill(), ops.LogOp()])
+ processor.add_feature([ops.FillMissing(), ops.Clip(min_value=0), ops.LogOp()])
processor.add_preprocess(ops.Normalize())
processor.add_preprocess(ops.Categorify(cat_cache="host"))
processor.finalize()
diff --git a/tests/unit/test_workflow.py b/tests/unit/test_workflow.py
--- a/tests/unit/test_workflow.py
+++ b/tests/unit/test_workflow.py
@@ -50,7 +50,9 @@ def test_gpu_workflow_api(
client=client if use_client else None,
)
- processor.add_feature([ops.ZeroFill(columns=op_columns), ops.LogOp()])
+ processor.add_feature(
+ [ops.FillMissing(), ops.Clip(min_value=0, columns=op_columns), ops.LogOp()]
+ )
processor.add_preprocess(ops.Normalize())
processor.add_preprocess(ops.Categorify(cat_cache="host"))
processor.finalize()
@@ -69,7 +71,7 @@ def get_norms(tar: cudf.Series):
gdf = np.log(gdf + 1)
return gdf
- # Check mean and std - No good right now we have to add all other changes; Zerofill, Log
+ # Check mean and std - No good right now we have to add all other changes; Clip, Log
if not op_columns:
assert math.isclose(get_norms(df.y).mean(), processor.stats["means"]["y"], rel_tol=1e-1)
@@ -120,8 +122,8 @@ def test_gpu_workflow(tmpdir, client, df, dataset, gpu_memory_frac, engine, dump
label_name = ["label"]
config = nvt.workflow.get_new_config()
- config["FE"]["continuous"] = [ops.ZeroFill()]
- config["PP"]["continuous"] = [[ops.ZeroFill(), ops.Normalize()]]
+ config["FE"]["continuous"] = [ops.FillMissing(), ops.Clip(min_value=0)]
+ config["PP"]["continuous"] = [[ops.FillMissing(), ops.Clip(min_value=0), ops.Normalize()]]
config["PP"]["categorical"] = [ops.Categorify()]
processor = nvt.Workflow(
@@ -146,12 +148,8 @@ def get_norms(tar: cudf.Series):
assert math.isclose(get_norms(df.x).mean(), processor.stats["means"]["x"], rel_tol=1e-4)
assert math.isclose(get_norms(df.y).mean(), processor.stats["means"]["y"], rel_tol=1e-4)
- # assert math.isclose(get_norms(df.id).mean(),
- # processor.stats["means"]["id_ZeroFill_LogOp"], rel_tol=1e-4)
assert math.isclose(get_norms(df.x).std(), processor.stats["stds"]["x"], rel_tol=1e-3)
assert math.isclose(get_norms(df.y).std(), processor.stats["stds"]["y"], rel_tol=1e-3)
- # assert math.isclose(get_norms(df.id).std(),
- # processor.stats["stds"]["id_ZeroFill_LogOp"], rel_tol=1e-3)
# Check that categories match
if engine == "parquet":
@@ -218,7 +216,7 @@ def get_norms(tar: cudf.Series):
gdf = np.log(gdf + 1)
return gdf
- # Check mean and std - No good right now we have to add all other changes; Zerofill, Log
+ # Check mean and std - No good right now we have to add all other changes; Clip, Log
concat_ops = "_FillMissing_LogOp"
if replace:
| Decoupling ZeroFill from Relu op
Currently the ZeroFill op also implicitly does something akin to a Relu op i.e. replacing negative values with 0 via `z_gdf[z_gdf < 0] = 0`. I think these two behaviours should be decoupled and Relu should be made into an explicit op.
```
class ZeroFill(TransformOperator):
default_in = CONT
default_out = CONT
@annotate("ZeroFill_op", color="darkgreen", domain="nvt_python")
def op_logic(self, gdf: cudf.DataFrame, target_columns: list, stats_context=None):
cont_names = target_columns
if not cont_names:
return gdf
z_gdf = gdf[cont_names].fillna(0)
z_gdf.columns = [f"{col}_{self._id}" for col in z_gdf.columns]
z_gdf[z_gdf < 0] = 0
return z_gdf
```
| 2020-08-08T02:17:37 |
|
NVIDIA-Merlin/NVTabular | 227 | NVIDIA-Merlin__NVTabular-227 | [
"175"
] | 70797f9e572bb3a41d1d8f18de70e0abe6346e93 | diff --git a/nvtabular/io.py b/nvtabular/io.py
--- a/nvtabular/io.py
+++ b/nvtabular/io.py
@@ -131,10 +131,16 @@ def _merge_general_metadata(meta_list):
meta = None
for md in meta_list:
if meta:
- meta["data_paths"] += md["data_paths"]
- meta["file_stats"] += md["file_stats"]
+ if "data_paths" in md:
+ meta["data_paths"] += md["data_paths"]
+ if "file_stats" in md:
+ meta["file_stats"] += md["file_stats"]
else:
meta = md.copy()
+ if "data_paths" not in meta:
+ meta["data_paths"] = []
+ if "file_stats" not in meta:
+ meta["file_stats"] = []
return meta
| diff --git a/tests/conftest.py b/tests/conftest.py
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -1,4 +1,5 @@
import glob
+import os
import random
import cudf
@@ -41,6 +42,7 @@
]
_CLIENT = None
+_CUDA_CLUSTER = None
@pytest.fixture(scope="session")
@@ -51,6 +53,18 @@ def client():
return _CLIENT
[email protected](scope="session")
+def cuda_cluster():
+ from dask_cuda import LocalCUDACluster
+
+ global _CUDA_CLUSTER
+ if _CUDA_CLUSTER is None:
+ CUDA_VISIBLE_DEVICES = os.environ.get("CUDA_VISIBLE_DEVICES", "0")
+ n_workers = min(2, len(CUDA_VISIBLE_DEVICES.split(",")))
+ _CUDA_CLUSTER = LocalCUDACluster(n_workers=n_workers)
+ return _CUDA_CLUSTER
+
+
@pytest.fixture(scope="session")
def datasets(tmpdir_factory):
df = cudf.datasets.timeseries(
diff --git a/tests/unit/test_notebooks.py b/tests/unit/test_notebooks.py
--- a/tests/unit/test_notebooks.py
+++ b/tests/unit/test_notebooks.py
@@ -49,6 +49,27 @@ def test_rossman_example(tmpdir):
_run_notebook(tmpdir, notebook_path, lambda line: line.replace("EPOCHS = 25", "EPOCHS = 1"))
+def test_multigpu_dask_example(tmpdir, cuda_cluster):
+ pytest.importorskip("dask_cuda")
+ os.environ["BASE_DIR"] = str(tmpdir)
+ scheduler_port = cuda_cluster.scheduler_address
+
+ def _nb_modify(line):
+ # Use cuda_cluster "fixture" port rather than allowing notebook
+ # to deploy a LocalCUDACluster within the subprocess
+ line = line.replace("cluster = None", f"cluster = '{scheduler_port}'")
+ # Use a much smaller "toy" dataset
+ line = line.replace("write_count = 25", "write_count = 4")
+ line = line.replace('freq = "1s"', 'freq = "1h"')
+ # Use smaller partitions for smaller dataset
+ line = line.replace("part_mem_fraction=0.1", "part_size=1_000_000")
+ line = line.replace("out_files_per_proc=8", "out_files_per_proc=1")
+ return line
+
+ notebook_path = os.path.join(dirname(TEST_PATH), "examples", "multi-gpu_dask.ipynb")
+ _run_notebook(tmpdir, notebook_path, _nb_modify)
+
+
def _run_notebook(tmpdir, notebook_path, transform=None):
# read in the notebook as JSON, and extract a python script from it
notebook = json.load(open(notebook_path))
| [DOC] Add dask-cudf instructions to documentation
We should add documentation on how to use the multi-gpu version of nvtabular.
We have a [benchmark](https://www.reviewnb.com/) using a dask client for multi-gpu already. We should export a simplified view of this as an example notebook, and link that notebook into the sphinx docs.
| 2020-08-19T21:00:01 |
|
NVIDIA-Merlin/NVTabular | 231 | NVIDIA-Merlin__NVTabular-231 | [
"177"
] | 0a6c0a5ad6368a88b557217a7f212d5737a96334 | diff --git a/nvtabular/categorify.py b/nvtabular/categorify.py
--- a/nvtabular/categorify.py
+++ b/nvtabular/categorify.py
@@ -35,8 +35,12 @@ def _make_name(*args, sep="_"):
@annotate("top_level_groupby", color="green", domain="nvt_python")
def _top_level_groupby(
- gdf, cat_col_groups, tree_width, cont_cols, sum_sq, on_host, concat_groups, name_sep
+ gdf, cat_col_groups, tree_width, cont_cols, agg_list, on_host, concat_groups, name_sep
):
+ sum_sq = "std" in agg_list or "var" in agg_list
+ calculate_min = "min" in agg_list
+ calculate_max = "max" in agg_list
+
# Top-level operation for category-based groupby aggregations
output = {}
k = 0
@@ -67,6 +71,11 @@ def _top_level_groupby(
df_gb[name] = df_gb[col].pow(2)
agg_dict[name] = ["sum"]
+ if calculate_min:
+ agg_dict[col].append("min")
+ if calculate_max:
+ agg_dict[col].append("max")
+
# Perform groupby and flatten column index
# (flattening provides better cudf support)
gb = df_gb.groupby(cat_col_group, dropna=False).agg(agg_dict)
@@ -103,11 +112,11 @@ def _mid_level_groupby(
if concat_groups and len(col_group) > 1:
col_group = [_make_name(*col_group, sep=name_sep)]
- ignore_index = True
+ df = _concat(dfs, ignore_index=True)
if on_host:
- gb = cudf.from_pandas(_concat(dfs, ignore_index)).groupby(col_group, dropna=False).sum()
- else:
- gb = _concat(dfs, ignore_index).groupby(col_group, dropna=False).sum()
+ df = cudf.from_pandas(df)
+ groups = df.groupby(col_group, dropna=False)
+ gb = groups.agg({col: _get_aggregation_type(col) for col in df.columns if col not in col_group})
gb.reset_index(drop=False, inplace=True)
name_count = _make_name(*(col_group + ["count"]), sep=name_sep)
@@ -129,6 +138,14 @@ def _mid_level_groupby(
required.append(name_mean)
gb[name_mean] = gb[name_sum] / gb[name_count]
+ if "min" in agg_list:
+ name_min = _make_name(*(col_group + [cont_col, "min"]), sep=name_sep)
+ required.append(name_min)
+
+ if "max" in agg_list:
+ name_max = _make_name(*(col_group + [cont_col, "max"]), sep=name_sep)
+ required.append(name_max)
+
if "var" in agg_list or "std" in agg_list:
n = gb[name_count]
x = gb[name_sum]
@@ -155,6 +172,15 @@ def _mid_level_groupby(
return gb[required]
+def _get_aggregation_type(col):
+ if col.endswith("_min"):
+ return "min"
+ elif col.endswith("_max"):
+ return "max"
+ else:
+ return "sum"
+
+
@annotate("write_gb_stats", color="green", domain="nvt_python")
def _write_gb_stats(dfs, base_path, col_group, on_host, concat_groups, name_sep):
if concat_groups and len(col_group) > 1:
@@ -275,7 +301,7 @@ def _groupby_to_disk(
col_groups,
tree_width,
agg_cols,
- ("std" in agg_list or "var" in agg_list),
+ agg_list,
on_host,
concat_groups,
name_sep,
diff --git a/nvtabular/ops.py b/nvtabular/ops.py
--- a/nvtabular/ops.py
+++ b/nvtabular/ops.py
@@ -636,7 +636,7 @@ class GroupbyStatistics(StatOperator):
List of statistics to calculate for each unique group. Note
that "count" corresponds to the group itself, while all
other statistics correspond to a specific continuous column.
- Supported statistics include ["count", "sum", "mean", "std", "var"].
+ Supported statistics include ["count", "sum", "mean", "std", "var", "min", "max"].
columns : list of str or list(str), default None
Categorical columns (or "column groups") to collect statistics for.
If None, the operation will target all known categorical columns.
@@ -709,7 +709,7 @@ def _id(self):
def stat_logic(self, ddf, columns_ctx, input_cols, target_cols):
col_groups = self.column_groups or self.get_columns(columns_ctx, input_cols, target_cols)
- supported_ops = ["count", "sum", "mean", "std", "var"]
+ supported_ops = ["count", "sum", "mean", "std", "var", "min", "max"]
for op in self.stats:
if op not in supported_ops:
raise ValueError(op + " operation is not supported.")
| diff --git a/tests/unit/test_dask_nvt.py b/tests/unit/test_dask_nvt.py
--- a/tests/unit/test_dask_nvt.py
+++ b/tests/unit/test_dask_nvt.py
@@ -147,7 +147,9 @@ def test_dask_groupby_stats(client, tmpdir, datasets, part_mem_fraction):
)
processor.add_preprocess(
- ops.JoinGroupby(cont_names=cont_names, stats=["count", "sum", "std"], out_path=str(tmpdir))
+ ops.JoinGroupby(
+ cont_names=cont_names, stats=["count", "sum", "std", "min"], out_path=str(tmpdir)
+ )
)
processor.finalize()
@@ -173,6 +175,16 @@ def test_dask_groupby_stats(client, tmpdir, datasets, part_mem_fraction):
check_names=False,
)
+ # Check "min"
+ assert_eq(
+ result[["name-string", "name-string_x_min"]]
+ .drop_duplicates()
+ .sort_values("name-string")["name-string_x_min"],
+ df0.groupby("name-string").agg({"x": "min"})["x"],
+ check_index=False,
+ check_names=False,
+ )
+
# Check "std"
assert_eq(
result[["name-string", "name-string_x_std"]]
| [OP] Add min/max support to groupby op
We want to be able to calculate the minimum timestamp of an item from each interaction. This minimum timestamp will be used as the age of the item. The groupby op currently can compute several statistics like mean,std,var etc - but needs to be extended to support min/max to be able to calculate this.
| @benfred this is actually required by Outbrain also. | 2020-08-21T02:48:51 |
NVIDIA-Merlin/NVTabular | 253 | NVIDIA-Merlin__NVTabular-253 | [
"117"
] | 56573e8770947fc0f0e48672b97785a7d67f42a4 | diff --git a/nvtabular/ops.py b/nvtabular/ops.py
--- a/nvtabular/ops.py
+++ b/nvtabular/ops.py
@@ -1600,3 +1600,61 @@ def apply_op(
new_gdf = self.f(gdf)
new_gdf.reset_index(drop=True, inplace=True)
return new_gdf
+
+
+class DifferenceLag(TransformOperator):
+ """ Calculates the difference between two consecutive rows of the dataset. For instance, this
+ operator can calculate the time since a user last had another interaction.
+
+ This requires a dataset partitioned by one set of columns (userid) and sorted further by another
+ set (userid, timestamp). The dataset must already be partitioned and sorted before being passed
+ to the workflow. This can be easily done using dask-cudf::
+
+ # get a nvt dataset and convert to a dask dataframe
+ ddf = nvtabular.Dataset(PATHS).to_ddf()
+
+ # partition the dask dataframe by userid, then sort by userid/timestamp
+ ddf = ddf.shuffle("userid").sort_values(["userid", "timestamp"])
+
+ # create a new nvtabular dataset on the partitioned/sorted values
+ dataset = nvtabular.Dataset(ddf)
+
+ Once passed an appropiate dataset, this operator can be added to a nvtabular workflow to
+ compute the lagged difference within a partition::
+
+ # compute the delta in timestamp for each users session
+ workflow.add_feature(DifferenceLag("userid', columns=["timestamp"]))
+
+ Parameters
+ -----------
+ partition_cols : str or list of str
+ Column or Columns that are used to partition the data.
+ shift : int, default 1
+ The number of rows to look backwards when computing the difference lag. Negative values
+ indicate the number of rows to look forwards, making this compute the lead instead of lag.
+ columns :
+ replace: bool, default False
+ Whether to replace existing columns or create new ones
+ """
+
+ default_in = CONT
+ default_out = CONT
+
+ def __init__(self, partition_cols, shift=1, columns=None, replace=False):
+ super(DifferenceLag, self).__init__(columns=columns, replace=replace)
+ self.partition_cols = partition_cols
+ self.shift = shift
+
+ @annotate("DifferenceLag_op", color="darkgreen", domain="nvt_python")
+ def op_logic(self, gdf: cudf.DataFrame, target_columns: list, stats_context=None):
+ # compute a mask indicating partition boundaries, handling multiple partition_cols
+ # represent partition boundaries by None values
+ mask = gdf[self.partition_cols] == gdf[self.partition_cols].shift(self.shift)
+ if isinstance(mask, cudf.DataFrame):
+ mask = mask.all(axis=1)
+ mask[mask == False] = None # noqa
+
+ output = {}
+ for col in target_columns:
+ output[f"{col}_{self._id}"] = (gdf[col] - gdf[col].shift(self.shift)) * mask
+ return cudf.DataFrame(output)
| diff --git a/tests/unit/test_ops.py b/tests/unit/test_ops.py
--- a/tests/unit/test_ops.py
+++ b/tests/unit/test_ops.py
@@ -876,3 +876,22 @@ def test_filter(tmpdir, df, dataset, gpu_memory_frac, engine, client):
new_gdf = filter_op.apply_op(df, columns_ctx, "all", target_cols=columns)
assert new_gdf.columns.all() == df.columns.all()
assert new_gdf.shape[0] < df.shape[0], "null values do not exist"
+
+
+def test_difference_lag():
+ df = cudf.DataFrame(
+ {"userid": [0, 0, 0, 1, 1, 2], "timestamp": [1000, 1005, 1100, 2000, 2001, 3000]}
+ )
+
+ columns = ["userid", "timestamp"]
+ columns_ctx = {}
+ columns_ctx["all"] = {}
+ columns_ctx["all"]["base"] = columns
+
+ op = ops.DifferenceLag("userid", columns=["timestamp"])
+ new_gdf = op.apply_op(df, columns_ctx, "all", target_cols=["timestamp"])
+
+ assert new_gdf["timestamp_DifferenceLag"][0] is None
+ assert new_gdf["timestamp_DifferenceLag"][1] == 5
+ assert new_gdf["timestamp_DifferenceLag"][2] == 95
+ assert new_gdf["timestamp_DifferenceLag"][3] is None
| [OP] DifferenceLag
**Is your feature request related to a problem? Please describe.**
DifferenceLag calculates the difference between the current row value and the previous/next row value.
**Describe the solution you'd like**
DifferenceLag Logic is following:
1. sorts the data frame given a key column
2. shifts the to transformed column n steps (positive n shifts forward, negative n shits backward
3. calculates the difference between the current value and the shifted value
DifferenceLag can be applied with a category column - meaning that differences are only calculated with the current value and shifted value have the same category. Kind of groupby version.
**Additional context**
Implementation examples:
https://github.com/rapidsai/recsysChallenge2020/blob/master/Baselines/DASK-XGB/Demo-e2e-multi-CPU.ipynb : diff_encode_cudf_v1
https://github.com/rapidsai/recsysChallenge2020/blob/master/final_models/XGB1334/XGB1334-submit-singleGPU.ipynb : diff_encode_cudf_v1
| 2020-08-29T05:30:10 |
|
NVIDIA-Merlin/NVTabular | 280 | NVIDIA-Merlin__NVTabular-280 | [
"276"
] | cfed87a56e6a5460e241673219808f0a3508cf05 | diff --git a/nvtabular/io/dataset.py b/nvtabular/io/dataset.py
--- a/nvtabular/io/dataset.py
+++ b/nvtabular/io/dataset.py
@@ -38,8 +38,93 @@
class Dataset:
- """Dask-based Dataset Class
- Converts a dataset into a dask_cudf DataFrame on demand
+ """Universal external-data wrapper for NVTabular
+
+ The NVTabular `Workflow` and `DataLoader`-related APIs require all
+ external data to be converted to the universal `Dataset` type. The
+ main purpose of this class is to abstract away the raw format of the
+ data, and to allow other NVTabular classes to reliably materialize a
+ `dask_cudf.DataFrame` collection (and/or collection-based iterator)
+ on demand.
+
+ A new `Dataset` object can be initialized from a variety of different
+ raw-data formats. To initialize an object from a directory path or
+ file list, the `engine` argument should be used to specify either
+ "parquet" or "csv" format. If the first argument contains a list
+ of files with a suffix of either "parquet" or "csv", the engine can
+ be inferred::
+
+ # Initialize Dataset with a parquet-dataset directory.
+ # must specify engine="parquet"
+ dataset = Dataset("/path/to/data_pq", engine="parquet")
+
+ # Initialize Dataset with list of csv files.
+ # engine="csv" argument is optional
+ dataset = Dataset(["file_0.csv", "file_1.csv"])
+
+ Since NVTabular leverages `fsspec` as a file-system interface,
+ the underlying data can be stored either locally, or in a remote/cloud
+ data store. To read from remote storage, like gds or s3, the
+ appropriate protocol should be prepended to the `Dataset` path
+ argument(s), and any special backend parameters should be passed
+ in a `storage_options` dictionary::
+
+ # Initialize Dataset with s3 parquet data
+ dataset = Dataset(
+ "s3://bucket/path",
+ engine="parquet",
+ storage_options={'anon': True, 'use_ssl': False},
+ )
+
+ By default, both parquet and csv-based data will be converted to
+ a Dask-DataFrame collection with a maximum partition size of
+ roughly 12.5 percent of the total memory on a single device. The
+ partition size can be changed to a different fraction of total
+ memory on a single device with the `part_mem_fraction` argument.
+ Alternatively, a specific byte size can be specified with the
+ `part_size` argument::
+
+ # Dataset partitions will be ~10% single-GPU memory (or smaller)
+ dataset = Dataset("bigfile.parquet", part_mem_fraction=0.1)
+
+ # Dataset partitions will be ~1GB (or smaller)
+ dataset = Dataset("bigfile.parquet", part_size="1GB")
+
+ Note that, if both the fractional and literal options are used
+ at the same time, `part_size` will take precedence. Also, for
+ parquet-formatted data, the partitioning is done at the row-
+ group level, and the byte-size of the first row-group (after
+ CuDF conversion) is used to map all other partitions.
+ Therefore, if the distribution of row-group sizes is not
+ uniform, the partition sizes will not be balanced.
+
+ In addition to handling data stored on disk, a `Dataset` object
+ can also be initialized from an existing CuDF/Pandas DataFrame,
+ or from a Dask-DataFrame collection (e.g. `dask_cudf.DataFrame`).
+ For these in-memory formats, the size/number of partitions will
+ not be modified. That is, a CuDF/Pandas DataFrame (or PyArrow
+ Table) will produce a single-partition collection, while the
+ number/size of a Dask-DataFrame collection will be preserved::
+
+ # Initialize from CuDF DataFrame (creates 1 partition)
+ gdf = cudf.DataFrame(...)
+ dataset = Dataset(gdf)
+
+ # Initialize from Dask-CuDF DataFrame (preserves partitions)
+ ddf = dask_cudf.read_parquet(...)
+ dataset = Dataset(ddf)
+
+ Since the `Dataset` API can both ingest and output a Dask
+ collection, it is straightforward to transform data either before
+ or after an NVTabular workflow is executed. This means that some
+ complex pre-processing operations, that are not yet supported
+ in NVTabular, can still be accomplished with the Dask-CuDF API::
+
+ # Sort input data before final Dataset initialization
+ # Warning: Global sorting requires significant device memory!
+ ddf = Dataset("/path/to/data_pq", engine="parquet").to_ddf()
+ ddf = ddf.sort_values("user_rank", ignore_index=True)
+ dataset = Dataset(ddf)
Parameters
-----------
| [DOC] Dataset docstring needs example usage
| 2020-09-08T22:15:54 |
||
NVIDIA-Merlin/NVTabular | 369 | NVIDIA-Merlin__NVTabular-369 | [
"314"
] | d4773a09cbff59058b769d6195d61510a55836bf | diff --git a/nvtabular/io/avro.py b/nvtabular/io/avro.py
new file mode 100644
--- /dev/null
+++ b/nvtabular/io/avro.py
@@ -0,0 +1,175 @@
+#
+# Copyright (c) 2020, NVIDIA CORPORATION.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import warnings
+
+import cudf
+import uavro as ua
+from dask.base import tokenize
+from dask.dataframe.core import new_dd_object
+
+from .dataset_engine import DatasetEngine
+
+
+class AvroDatasetEngine(DatasetEngine):
+ """AvroDatasetEngine
+
+ Uses `uavro` to decompose dataset into groups of avro blocks.
+ Uses `cudf` to create new partitions.
+ """
+
+ def __init__(self, paths, part_size, storage_options=None, **kwargs):
+ super().__init__(paths, part_size, storage_options)
+ if kwargs != {}:
+ raise ValueError("Unexpected AvroDatasetEngine argument(s).")
+ self.blocksize = part_size
+
+ # Avro reader needs a list of files
+ # (Assume flat directory structure if this is a dir)
+ if len(self.paths) == 1 and self.fs.isdir(self.paths[0]):
+ self.paths = self.fs.glob(self.fs.sep.join([self.paths[0], "*"]))
+
+ def to_ddf(self, columns=None):
+
+ # Get list of pieces for each output
+ pieces, meta = self.process_metadata(columns=columns)
+
+ # TODO: Remove warning and avoid use of uavro in read_partition when
+ # cudf#6529 is fixed (https://github.com/rapidsai/cudf/issues/6529)
+ if len(pieces) > len(self.paths):
+ warnings.warn(
+ "Row-subset selection in cudf avro reader is currently broken. "
+ "Using uavro engine until cudf#6529 is addressed. "
+ "EXPECT POOR PERFORMANCE!! (compared to cuio-based reader)"
+ )
+
+ # Construct collection
+ token = tokenize(self.fs, self.paths, self.part_size, columns)
+ read_avro_name = "read-avro-partition-" + token
+ dsk = {
+ (read_avro_name, i): (AvroDatasetEngine.read_partition, self.fs, piece, columns)
+ for i, piece in enumerate(pieces)
+ }
+ return new_dd_object(dsk, read_avro_name, meta.iloc[:0], [None] * (len(pieces) + 1))
+
+ def process_metadata(self, columns=None):
+
+ with open(self.paths[0], "rb") as fo:
+ header = ua.core.read_header(fo)
+
+ # Use first block for metadata
+ num_rows = header["blocks"][0]["nrows"]
+ file_byte_count = header["blocks"][0]["size"]
+ meta = cudf.io.read_avro(self.paths[0], skiprows=0, num_rows=num_rows)
+
+ # Convert the desired in-memory GPU size to the expected
+ # on-disk storage size (blocksize)
+ df_byte_count = meta.memory_usage(deep=True).sum()
+ self.blocksize = int(float(file_byte_count) / df_byte_count * self.part_size)
+
+ # Break apart files at the "Avro block" granularity
+ pieces = []
+ for path in self.paths:
+ file_size = self.fs.du(path)
+ if file_size > self.blocksize:
+ part_count = 0
+ with open(path, "rb") as fo:
+ header = ua.core.read_header(fo)
+ ua.core.scan_blocks(fo, header, file_size)
+ blocks = header["blocks"]
+
+ file_row_offset, part_row_count = 0, 0
+ file_block_offset, part_block_count = 0, 0
+ file_byte_offset, part_byte_count = blocks[0]["offset"], 0
+
+ for i, block in enumerate(blocks):
+ part_row_count += block["nrows"]
+ part_block_count += 1
+ part_byte_count += block["size"]
+ if part_byte_count >= self.blocksize:
+ pieces.append(
+ {
+ "path": path,
+ "rows": (file_row_offset, part_row_count),
+ "blocks": (file_block_offset, part_block_count),
+ "bytes": (file_byte_offset, part_byte_count),
+ }
+ )
+ part_count += 1
+ file_row_offset += part_row_count
+ file_block_offset += part_block_count
+ file_byte_offset += part_byte_count
+ part_row_count = part_block_count = part_byte_count = 0
+
+ if part_block_count:
+ pieces.append(
+ {
+ "path": path,
+ "rows": (file_row_offset, part_row_count),
+ "blocks": (file_block_offset, part_block_count),
+ "bytes": (file_byte_offset, part_byte_count),
+ }
+ )
+ part_count += 1
+ if part_count == 1:
+ # No need to specify a byte range since we
+ # will need to read the entire file anyway.
+ pieces[-1] = {"path": pieces[-1]["path"]}
+ else:
+ pieces.append({"path": path})
+
+ return pieces, meta
+
+ @classmethod
+ def read_partition(cls, fs, piece, columns):
+
+ path = piece["path"]
+ if "rows" in piece:
+
+ # See: (https://github.com/rapidsai/cudf/issues/6529)
+ # Using `uavro` library for now. This means we must covert
+ # data to pandas, and then to cudf (which is much slower
+ # than `cudf.read_avro`). TODO: Once `num_rows` is fixed,
+ # this can be changed to:
+ #
+ # skiprows, num_rows = piece["rows"]
+ # df = cudf.io.read_avro(
+ # path, skiprows=skiprows, num_rows=num_rows
+ # )
+
+ block_offset, part_blocks = piece["blocks"]
+ file_size = fs.du(piece["path"])
+ with fs.open(piece["path"], "rb") as fo:
+ header = ua.core.read_header(fo)
+ ua.core.scan_blocks(fo, header, file_size)
+ header["blocks"] = header["blocks"][block_offset : block_offset + part_blocks]
+
+ # Adjust the total row count
+ nrows = 0
+ for block in header["blocks"]:
+ nrows += block["nrows"]
+ header["nrows"] = nrows
+
+ # Read in as pandas and convert to cudf (avoid block scan)
+ df = cudf.from_pandas(
+ ua.core.filelike_to_dataframe(fo, file_size, header, scan=False)
+ )
+ else:
+ df = cudf.io.read_avro(path)
+
+ # Deal with column selection
+ if columns is None:
+ columns = list(df.columns)
+ return df[columns]
diff --git a/nvtabular/io/dataset.py b/nvtabular/io/dataset.py
--- a/nvtabular/io/dataset.py
+++ b/nvtabular/io/dataset.py
@@ -146,7 +146,7 @@ class Dataset:
directories are not yet supported).
engine : str or DatasetEngine
DatasetEngine object or string identifier of engine. Current
- string options include: ("parquet", "csv"). This argument
+ string options include: ("parquet", "csv", "avro"). This argument
is ignored if path_or_source is a DataFrame type.
part_size : str or int
Desired size (in bytes) of each Dask partition.
@@ -227,6 +227,17 @@ def __init__(
self.engine = CSVDatasetEngine(
paths, part_size, storage_options=storage_options, **kwargs
)
+ elif engine == "avro":
+ try:
+ from .avro import AvroDatasetEngine
+ except ImportError:
+ raise RuntimeError(
+ "Failed to import AvroDatasetEngine. Make sure uavro is installed."
+ )
+
+ self.engine = AvroDatasetEngine(
+ paths, part_size, storage_options=storage_options, **kwargs
+ )
else:
raise ValueError("Only parquet and csv supported (for now).")
else:
| diff --git a/tests/unit/test_io.py b/tests/unit/test_io.py
--- a/tests/unit/test_io.py
+++ b/tests/unit/test_io.py
@@ -22,8 +22,10 @@
import dask
import dask_cudf
import numpy as np
+import pandas as pd
import pytest
from dask.dataframe import assert_eq
+from dask.dataframe.io.demo import names as name_list
import nvtabular as nvt
import nvtabular.io
@@ -296,3 +298,62 @@ def test_parquet_lists(tmpdir, freq_threshold):
df_out = cudf.read_parquet(out_paths)
assert df_out["Authors"].to_arrow().to_pylist() == [[1], [1, 4], [2, 3], [3]]
+
+
[email protected]("part_size", [None, "1KB"])
[email protected]("size", [100, 5000])
[email protected]("nfiles", [1, 2])
+def test_avro_basic(tmpdir, part_size, size, nfiles):
+
+ # Require uavro and fastavro library.
+ # Note that fastavro is only required to write
+ # avro files for testing, while uavro is actually
+ # used by AvroDatasetEngine.
+ fa = pytest.importorskip("fastavro")
+ pytest.importorskip("uavro")
+
+ # Define avro schema
+ schema = fa.parse_schema(
+ {
+ "name": "avro.example.User",
+ "type": "record",
+ "fields": [
+ {"name": "name", "type": "string"},
+ {"name": "age", "type": "int"},
+ ],
+ }
+ )
+
+ # Write avro dataset with two files.
+ # Collect block and record (row) count while writing.
+ nblocks = 0
+ nrecords = 0
+ paths = [os.path.join(str(tmpdir), f"test.{i}.avro") for i in range(nfiles)]
+ records = []
+ for path in paths:
+ names = np.random.choice(name_list, size)
+ ages = np.random.randint(18, 100, size)
+ data = [{"name": names[i], "age": ages[i]} for i in range(size)]
+ with open(path, "wb") as f:
+ fa.writer(f, schema, data)
+ with open(path, "rb") as fo:
+ avro_reader = fa.block_reader(fo)
+ for block in avro_reader:
+ nrecords += block.num_records
+ nblocks += 1
+ records += list(block)
+ if nfiles == 1:
+ paths = paths[0]
+
+ # Read back with dask.dataframe
+ df = nvt.Dataset(paths, part_size=part_size, engine="avro").to_ddf()
+
+ # Check basic length and partition count
+ if part_size == "1KB":
+ assert df.npartitions == nblocks
+ assert len(df) == nrecords
+
+ # Full comparison
+ expect = pd.DataFrame.from_records(records)
+ expect["age"] = expect["age"].astype("int32")
+ assert_eq(df.compute().reset_index(drop=True), expect)
| [FEA] Support reading datasets from Avro files
We should add support for reading from Avro datasets.
| There is not dask_cudf support to read Avro files. I looked into implementing it using cudf and to avoid double work I asked in rapids channel if there are plans to add support in near future. Keith Kraus told me to create a FEA Request in GItHub: https://github.com/rapidsai/cudf/issues/6496
If they add support we can use it in a similar way that what we are doing with CSV. If they don't (or we don't want to wait) we can implement it in a similar way that we are doing with Parquet.
Waiting to see what rapids does.
| 2020-10-20T19:54:17 |
NVIDIA-Merlin/NVTabular | 388 | NVIDIA-Merlin__NVTabular-388 | [
"381"
] | bf9d1983a649cd8a3316455ca1decf412c7e2ee0 | diff --git a/nvtabular/io/writer.py b/nvtabular/io/writer.py
--- a/nvtabular/io/writer.py
+++ b/nvtabular/io/writer.py
@@ -124,7 +124,7 @@ def add_data(self, gdf):
# this restriction can be removed once cudf supports chunked writing
# in parquet
if any(is_list_dtype(gdf[col].dtype) for col in gdf.columns):
- self._write_table(gdf, 0, True)
+ self._write_table(0, gdf, True)
return
# Generate `ind` array to map each row to an output file.
| diff --git a/tests/unit/test_io.py b/tests/unit/test_io.py
--- a/tests/unit/test_io.py
+++ b/tests/unit/test_io.py
@@ -266,7 +266,9 @@ def test_mulifile_parquet(tmpdir, dataset, df, engine, num_io_threads, nfiles, s
@pytest.mark.parametrize("freq_threshold", [0, 1, 2])
-def test_parquet_lists(tmpdir, freq_threshold):
[email protected]("shuffle", [nvt.io.Shuffle.PER_PARTITION, None])
[email protected]("out_files_per_proc", [None, 2])
+def test_parquet_lists(tmpdir, freq_threshold, shuffle, out_files_per_proc):
df = cudf.DataFrame(
{
"Authors": [["User_A"], ["User_A", "User_E"], ["User_B", "User_C"], ["User_C"]],
@@ -291,12 +293,13 @@ def test_parquet_lists(tmpdir, freq_threshold):
nvt.Dataset(filename),
output_format="parquet",
output_path=output_dir,
+ shuffle=shuffle,
+ out_files_per_proc=out_files_per_proc,
)
out_paths = glob.glob(os.path.join(output_dir, "*.parquet"))
- print(out_paths)
df_out = cudf.read_parquet(out_paths)
-
+ df_out = df_out.sort_values(by="Post", ascending=True)
assert df_out["Authors"].to_arrow().to_pylist() == [[1], [1, 4], [2, 3], [3]]
| [BUG] getting AttributeError: 'int' object has no attribute 'to_parquet' when using JoinExternal to merge dfs with list columns
**Describe the bug**
I am getting `AttributeError: 'int' object has no attribute 'to_parquet'` error when using JoinExternal op to merge dfs with list columns. This is a toy example to reproduce the bug:
```
df = cudf.DataFrame({'doc_id': [1, 1, 2, 2, 3, 3, 4, 4], 'category_id': [1, 2, 3, 3, 5, 6, 6, 1], 'confidence_level': [0.92, 0.251, 0.352, 0.359, 0.978, 0.988, 0.978, 0.988]})
df_grouped = df.groupby('doc_id', as_index=False).agg({'category_id': ['collect'], 'confidence_level': ['collect']})
df_grouped.columns= df_grouped.columns.get_level_values(0)
df2 = cudf.DataFrame({'doc_id': [1, 2, 2, 3, 4, 3, 7, 8], 'category_id': [1, 2, 4, 3, 6, 6, 5, 2], 'ad_id': [1, 2, 3, 4, 4, 5, 10, 12],
'source_id': [1200, 1210, 1450, np.nan, 1330, 1200, 1500, 1350]})
columns_ext = ['doc_id', 'category_id', 'confidence_level']
kind_ext='cudf'
proc = nvt.Workflow(
cat_names= ['doc_id', 'category_id', 'ad_id', 'source_id'],
cont_names=[],
label_name=[])
proc.add_preprocess(JoinExternal(df_grouped, on= ['doc_id'], on_ext= ['doc_id'], kind_ext=kind_ext, columns_ext=columns_ext, cache='device', how='left'))
train_dataset = nvt.Dataset(df2)
proc.apply(train_dataset, apply_offline=True, record_stats=True, output_path='./output/', shuffle=True, out_files_per_proc=1)
```
This results in the following error:
```
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-19-f93c44c3b381> in <module>
11 proc.add_preprocess(JoinExternal(df_grouped, on= ['doc_id'], on_ext= ['doc_id'], kind_ext=kind_ext, columns_ext=columns_ext, cache='device', how='left'))
12 train_dataset = nvt.Dataset(df2)
---> 13 proc.apply(train_dataset, apply_offline=True, record_stats=True, output_path='./output/', shuffle=True, out_files_per_proc=1)
~/ronaya/NVTabular/nvtabular/workflow.py in apply(self, dataset, apply_offline, record_stats, shuffle, output_path, output_format, out_files_per_proc, num_io_threads)
738 output_format=output_format,
739 out_files_per_proc=out_files_per_proc,
--> 740 num_io_threads=num_io_threads,
741 )
742 else:
~/ronaya/NVTabular/nvtabular/workflow.py in build_and_process_graph(self, dataset, end_phase, output_path, record_stats, shuffle, output_format, out_files_per_proc, apply_ops, num_io_threads)
845 shuffle=shuffle,
846 out_files_per_proc=out_files_per_proc,
--> 847 num_threads=num_io_threads,
848 )
849
~/ronaya/NVTabular/nvtabular/workflow.py in ddf_to_dataset(self, output_path, shuffle, out_files_per_proc, output_format, num_threads)
931 output_format,
932 self.client,
--> 933 num_threads,
934 )
935 return
~/ronaya/NVTabular/nvtabular/io/dask.py in _ddf_to_dataset(ddf, fs, output_path, shuffle, out_files_per_proc, cat_names, cont_names, label_names, output_format, client, num_threads)
110 out = client.compute(out).result()
111 else:
--> 112 out = dask.compute(out, scheduler="synchronous")[0]
113
114 # Follow-up Shuffling and _metadata creation
~/miniconda3/envs/1019/lib/python3.7/site-packages/dask/base.py in compute(*args, **kwargs)
450 postcomputes.append(x.__dask_postcompute__())
451
--> 452 results = schedule(dsk, keys, **kwargs)
453 return repack([f(r, *a) for r, (f, a) in zip(results, postcomputes)])
454
~/miniconda3/envs/1019/lib/python3.7/site-packages/dask/local.py in get_sync(dsk, keys, **kwargs)
525 """
526 kwargs.pop("num_workers", None) # if num_workers present, remove it
--> 527 return get_async(apply_sync, 1, dsk, keys, **kwargs)
528
529
~/miniconda3/envs/1019/lib/python3.7/site-packages/dask/local.py in get_async(apply_async, num_workers, dsk, result, cache, get_id, rerun_exceptions_locally, pack_exception, raise_exception, callbacks, dumps, loads, **kwargs)
492
493 while state["ready"] and len(state["running"]) < num_workers:
--> 494 fire_task()
495
496 succeeded = True
~/miniconda3/envs/1019/lib/python3.7/site-packages/dask/local.py in fire_task()
464 pack_exception,
465 ),
--> 466 callback=queue.put,
467 )
468
~/miniconda3/envs/1019/lib/python3.7/site-packages/dask/local.py in apply_sync(func, args, kwds, callback)
514 def apply_sync(func, args=(), kwds={}, callback=None):
515 """ A naive synchronous version of apply_async """
--> 516 res = func(*args, **kwds)
517 if callback is not None:
518 callback(res)
~/miniconda3/envs/1019/lib/python3.7/site-packages/dask/local.py in execute_task(key, task_info, dumps, loads, get_id, pack_exception)
225 failed = False
226 except BaseException as e:
--> 227 result = pack_exception(e, dumps)
228 failed = True
229 return key, result, failed
~/miniconda3/envs/1019/lib/python3.7/site-packages/dask/local.py in execute_task(key, task_info, dumps, loads, get_id, pack_exception)
220 try:
221 task, data = loads(task_info)
--> 222 result = _execute_task(task, data)
223 id = get_id()
224 result = dumps((result, id))
~/miniconda3/envs/1019/lib/python3.7/site-packages/dask/core.py in _execute_task(arg, cache, dsk)
119 # temporaries by their reference count and can execute certain
120 # operations in-place.
--> 121 return func(*(_execute_task(a, cache) for a in args))
122 elif not ishashable(arg):
123 return arg
~/miniconda3/envs/1019/lib/python3.7/contextlib.py in inner(*args, **kwds)
72 def inner(*args, **kwds):
73 with self._recreate_cm():
---> 74 return func(*args, **kwds)
75 return inner
76
~/ronaya/NVTabular/nvtabular/io/dask.py in _write_output_partition(gdf, processed_path, shuffle, out_files_per_proc, fs, cat_names, cont_names, label_names, output_format, num_threads)
61
62 # Add data
---> 63 writer.add_data(gdf)
64
65 return gdf_size
~/miniconda3/envs/1019/lib/python3.7/contextlib.py in inner(*args, **kwds)
72 def inner(*args, **kwds):
73 with self._recreate_cm():
---> 74 return func(*args, **kwds)
75 return inner
76
~/ronaya/NVTabular/nvtabular/io/writer.py in add_data(self, gdf)
125 # in parquet
126 if any(is_list_dtype(gdf[col].dtype) for col in gdf.columns):
--> 127 self._write_table(gdf, 0, True)
128 return
129
~/ronaya/NVTabular/nvtabular/io/parquet.py in _write_table(self, idx, data, has_list_column)
210 # write out a new file, rather than stream multiple chunks to a single file
211 filename = self._get_filename(len(self.data_paths))
--> 212 data.to_parquet(filename)
213 self.data_paths.append(filename)
214 else:
AttributeError: 'int' object has no attribute 'to_parquet'
```
**Environment details (please complete the following information):**
- Environment location: [Bare-metal, Docker, Cloud(specify cloud provider)]: `rapids 0.16 nightly conda env.`
- Method of NVTabular install: [conda, Docker, or from source]: `pip install -e.`
| @benfred I am getting the same error when using HashBucket op with list columns. you can reproduce the bug with this exp:
````
df = cudf.DataFrame({
"Authors": [["User_A"], ["User_A", "User_E"], ["User_B", "User_C"], ["User_C"]],
"Engaging User": ["User_B", "User_B", "User_A", "User_D"],
"Post": [1, 2, 3, 4],
})
cat_names = ["Authors"]
cont_names = []
label_name = ["Post"]
processor = nvt.Workflow(cat_names=cat_names, cont_names=cont_names, label_name=label_name)
processor.add_preprocess(HashBucket(num_buckets=10))
processor.finalize()
processor.apply(nvt.Dataset(df), record_stats=True, output_path='./output/', shuffle=True, out_files_per_proc=1) | 2020-10-30T17:32:45 |
NVIDIA-Merlin/NVTabular | 413 | NVIDIA-Merlin__NVTabular-413 | [
"412"
] | 002f8065d359d6a60f9686c78481d887321c94c6 | diff --git a/nvtabular/loader/tensorflow.py b/nvtabular/loader/tensorflow.py
--- a/nvtabular/loader/tensorflow.py
+++ b/nvtabular/loader/tensorflow.py
@@ -21,6 +21,7 @@
from nvtabular.io.dataset import Dataset
from nvtabular.loader.backend import DataLoader
from nvtabular.loader.tf_utils import configure_tensorflow, get_dataset_schema_from_feature_columns
+from nvtabular.ops import _get_embedding_order
from_dlpack = configure_tensorflow()
@@ -211,6 +212,11 @@ def __init__(
)
cat_names, cont_names = _validate_schema(feature_columns, cat_names, cont_names)
+ # sort the ccolumns to avoid getting incorrect output
+ # (https://github.com/NVIDIA/NVTabular/issues/412)
+ cat_names = _get_embedding_order(cat_names)
+ cont_names = _get_embedding_order(cont_names)
+
assert devices is None or len(devices) == 1 # TODO: figure out multi-gpu support
devices = devices or [0]
DataLoader.__init__(
| diff --git a/tests/unit/test_tf_dataloader.py b/tests/unit/test_tf_dataloader.py
--- a/tests/unit/test_tf_dataloader.py
+++ b/tests/unit/test_tf_dataloader.py
@@ -14,6 +14,8 @@
# limitations under the License.
#
+import os
+
import cudf
import numpy as np
import pytest
@@ -28,6 +30,31 @@
tf_dataloader = pytest.importorskip("nvtabular.loader.tensorflow")
+def test_tf_catname_ordering(tmpdir):
+ df = cudf.DataFrame(
+ {"cat1": [1] * 100, "cat2": [2] * 100, "cat3": [3] * 100, "label": [0] * 100}
+ )
+ path = os.path.join(tmpdir, "dataset.parquet")
+ df.to_parquet(path)
+ cat_names = ["cat3", "cat2", "cat1"]
+ cont_names = []
+ label_name = ["label"]
+
+ data_itr = tf_dataloader.KerasSequenceLoader(
+ [path],
+ cat_names=cat_names,
+ cont_names=cont_names,
+ batch_size=10,
+ label_names=label_name,
+ shuffle=False,
+ )
+
+ for X, y in data_itr:
+ assert list(X["cat1"].numpy()) == [1] * 10
+ assert list(X["cat2"].numpy()) == [2] * 10
+ assert list(X["cat3"].numpy()) == [3] * 10
+
+
# TODO: include use_columns option
# TODO: include parts_per_chunk test
@pytest.mark.parametrize("gpu_memory_frac", [0.01, 0.06])
| [BUG] KerasSequenceLoader can produce wrong categorical output
**Describe the bug**
The KerasSequenceLoader class can produce incorrect values for categoricals . It expects that the cat_names passed in are already sorted, and if they aren't sorted the output tensors will be assigned incorerctly
**Steps/Code to reproduce bug**
Passing reverse ordering categorical names causes this test to fail -
```python
def test_tf_catname_ordering(tmpdir):
df = cudf.DataFrame({"cat1" : [1] * 100,
"cat2" : [2] * 100,
"cat3" : [3] * 100,
"label": [0] * 100 })
path = os.path.join(tmpdir, "dataset.parquet")
df.to_parquet(path)
cat_names = ["cat3", "cat2", "cat1"]
cont_names = []
label_name = ["label"]
data_itr = tf_dataloader.KerasSequenceLoader(
[path],
cat_names=cat_names,
cont_names=cont_names,
batch_size=10,
label_names=label_name,
shuffle=False,
)
for X, y in data_itr:
assert list(X["cat1"].numpy()) == [1] * 10
assert list(X["cat2"].numpy()) == [2] * 10
assert list(X["cat3"].numpy()) == [3] * 10
```
All of the cat1 values should be 1 here - but they are all 3's instead
| 2020-11-08T04:46:54 |
|
NVIDIA-Merlin/NVTabular | 451 | NVIDIA-Merlin__NVTabular-451 | [
"316"
] | 5e8f19477981b057bb188ae92795ecd98255474a | diff --git a/nvtabular/ops/categorify.py b/nvtabular/ops/categorify.py
--- a/nvtabular/ops/categorify.py
+++ b/nvtabular/ops/categorify.py
@@ -378,6 +378,11 @@ def get_embedding_sizes(workflow):
# when only hashing is applied, this will return embedding shape as (num_buckets, emb_dim)
elif "buckets" in workflow.stats.keys():
buckets = workflow.stats["buckets"]
+
+ # if we have hash buckets, but no coategorical just use the buckets
+ if buckets and "categories" not in workflow.stats:
+ return {col: _emb_sz_rule(num_rows) for col, num_rows in buckets.items()}
+
if "mh" not in workflow.columns_ctx["categorical"]:
return _get_embeddings_dask(workflow.stats["categories"], cols, buckets, freq)
else:
diff --git a/nvtabular/ops/hash_bucket.py b/nvtabular/ops/hash_bucket.py
--- a/nvtabular/ops/hash_bucket.py
+++ b/nvtabular/ops/hash_bucket.py
@@ -17,12 +17,12 @@
from cudf.utils.dtypes import is_list_dtype
from nvtx import annotate
-from .categorify import _encode_list_column
+from .categorify import SetBuckets, _encode_list_column
from .operator import CAT
-from .transform_operator import TransformOperator
+from .transform_operator import DFOperator
-class HashBucket(TransformOperator):
+class HashBucket(DFOperator):
"""
This op maps categorical columns to a contiguous integer range
by first hashing the column then modulating by the number of
@@ -127,3 +127,12 @@ def op_logic(self, gdf: cudf.DataFrame, target_columns: list, stats_context=None
new_gdf[new_col] = encoded
return new_gdf
+
+ @property
+ def req_stats(self):
+ return [
+ SetBuckets(
+ columns=self.columns,
+ num_buckets=self.num_buckets,
+ )
+ ]
| diff --git a/tests/unit/test_ops.py b/tests/unit/test_ops.py
--- a/tests/unit/test_ops.py
+++ b/tests/unit/test_ops.py
@@ -376,6 +376,9 @@ def test_hash_bucket_lists(tmpdir):
assert authors[0][0] == authors[1][0] # 'User_A'
assert authors[2][1] == authors[3][0] # 'User_C'
+ # make sure we get the embedding sizes
+ assert nvt.ops.get_embedding_sizes(processor)["Authors"][0] == 10
+
@pytest.mark.parametrize("engine", ["parquet"])
def test_fill_missing(tmpdir, df, dataset, engine):
| [FEA] Include bucket range as category stat on HashBucket Op
DL model training workflows that are agnostic to the preprocessing used to generate a particular categorical feature will need to be able to recover the vocabulary size in order to construct the embedding. For features generated using the `Categorify` op, this is not an issue, since that information is recorded in the `Workflow` stats, which can be exported after preprocessing and read back in at train time.
But that information isn't recorded for the `HashBucket` op, which means you need to write some sort of exception which can somehow check for how that feature was generated then access the `Workflow` which was used to generate it, which is overall just a terrible way to have to implement things.
However, if we record `range(num_buckets)` as a stat for the op and mimic the API used for recovering the `Categorify` categories, then we can use the stats as an input to the training script which can then recover all the necessary information in an agnostic fashion.
Related to #78, which will combine these functionalities. Any implementation there should include the indices we include for the hash bins as part of the category stats.
| Ah, you beat me to it :-) @benfred let's merge these tickets. I want to make sure that all forms of categorical encoding are covered, but Alec's description is way more detailed than mine. | 2020-11-23T17:59:58 |
NVIDIA-Merlin/NVTabular | 455 | NVIDIA-Merlin__NVTabular-455 | [
"424"
] | 661bf80ef8cca3fba3a6d455305915a54b01b5f4 | diff --git a/nvtabular/io/parquet.py b/nvtabular/io/parquet.py
--- a/nvtabular/io/parquet.py
+++ b/nvtabular/io/parquet.py
@@ -53,8 +53,12 @@ def __init__(
row_groups_per_part = self.part_size / rg_byte_size_0
if row_groups_per_part < 1.0:
warnings.warn(
- f"Row group size {rg_byte_size_0} is bigger than requested part_size "
- f"{self.part_size}"
+ f"Row group memory size ({rg_byte_size_0}) (bytes) of parquet file is bigger"
+ f" than requested part_size ({self.part_size}) for the NVTabular dataset."
+ f"A row group memory size of 128 MB is generally recommended. You can find"
+ f" info on how to set the row group size of parquet files in "
+ f"https://nvidia.github.io/NVTabular/main/HowItWorks.html"
+ f"#getting-your-data-ready-for-nvtabular"
)
row_groups_per_part = 1.0
| [FEA] Warn on parquet row group sizes out of recommended bounds
**Is your feature request related to a problem? Please describe.**
We should warn on parquet files that contain row groups bigger than recommended - with actionable links and information for our customers
| To detail that issue, NVT does actually raise a warning like the following one.
```
/nvtabular/nvtabular/io/parquet.py:75: UserWarning: Row group size 4017728134 is bigger than requested part_size 1000000000
f"Row group size {rg_byte_size_0} is bigger than requested part_size
```
But it gives no recommendation on how to properly define the row group sizes for the parquet files, in respect of the configured NVT dataset part files (e.g. `nvt.Dataset(TRAIN_DIR, engine="parquet", part_size="1000MB")`). It would be nice to include code examples on how to transform a set of parquet with the desired row group size using PyArrow, Pandas or cuDF (e.g. using Pandas: `df.to_parquet("filename.parquet", row_group_size=500, engine="pyarrow")`)
It would also be interesting to have in the documentation some recommendation on how to set properly the **NVT dataset part_size** based on how much GPU memory you have reserved for the Dask cluster (`LocalCUDACluster(device_memory_limit = ?))`) and RMM ( `rmm.reinitialize(pool_allocator=True, initial_pool_size=?,)` )
The documentation side absolutely. We should also throw a better warning along the lines of:
'To achieve optimal performance the row_group_size of the parquet files should be in the x to y range. For more information about data prep visit: <docs page on data preparation>.'
And then we need the page for how to effectively prepare data for NVTabular. @gabrielspmoreira and @bschifferer can you please work on the docs page. | 2020-11-23T20:55:55 |
|
NVIDIA-Merlin/NVTabular | 525 | NVIDIA-Merlin__NVTabular-525 | [
"522"
] | 09f6afc05ab8872c96af8e5d91634b53ac1077b2 | diff --git a/nvtabular/column_group.py b/nvtabular/column_group.py
--- a/nvtabular/column_group.py
+++ b/nvtabular/column_group.py
@@ -35,16 +35,36 @@ class ColumnGroup:
"""
def __init__(self, columns):
- if isinstance(columns, str):
- self.columns = [columns]
- else:
- self.columns = [_convert_col(col) for col in columns]
self.parents = []
self.children = []
self.op = None
self.kind = None
self.dependencies = None
+ if isinstance(columns, str):
+ columns = [columns]
+
+ # if any of the values we're passed are a columngroup
+ # we have to ourselves as a childnode in the graph.
+ if any(isinstance(col, ColumnGroup) for col in columns):
+ self.columns = []
+ self.kind = "[...]"
+ for col in columns:
+ if not isinstance(col, ColumnGroup):
+ col = ColumnGroup(col)
+ else:
+ # we can't handle nesting arbitrarily deep here
+ # only accept non-nested (str) columns here
+ if any(not isinstance(c, str) for c in col.columns):
+ raise ValueError("Can't handle more than 1 level of nested columns")
+
+ col.children.append(self)
+ self.parents.append(col)
+ self.columns.append(tuple(col.columns))
+
+ else:
+ self.columns = [_convert_col(col) for col in columns]
+
def __rshift__(self, operator):
"""Transforms this ColumnGroup by applying an Operator
| diff --git a/tests/unit/test_column_group.py b/tests/unit/test_column_group.py
new file mode 100644
--- /dev/null
+++ b/tests/unit/test_column_group.py
@@ -0,0 +1,43 @@
+import cudf
+import pytest
+
+from nvtabular import ColumnGroup, Dataset, Workflow
+from nvtabular.ops import Categorify, Rename
+
+
+def test_nested_column_group(tmpdir):
+ df = cudf.DataFrame(
+ {
+ "geo": ["US>CA", "US>NY", "CA>BC", "CA>ON"],
+ "user": ["User_A", "User_A", "User_A", "User_B"],
+ }
+ )
+
+ country = (
+ ColumnGroup(["geo"]) >> (lambda col: col.str.slice(0, 2)) >> Rename(postfix="_country")
+ )
+
+ # make sure we can do a 'combo' categorify (cross based) of country+user
+ # as well as categorifying the country and user columns on their own
+ cats = [country + "user"] + country + "user" >> Categorify(encode_type="combo")
+
+ workflow = Workflow(cats)
+ df_out = workflow.fit_transform(Dataset(df)).to_ddf().compute(scheduler="synchronous")
+
+ geo_country = df_out["geo_country"]
+ assert geo_country[0] == geo_country[1] # rows 0,1 are both 'US'
+ assert geo_country[2] == geo_country[3] # rows 2,3 are both 'CA'
+
+ user = df_out["user"]
+ assert user[0] == user[1] == user[2]
+ assert user[3] != user[2]
+
+ geo_country_user = df_out["geo_country_user"]
+ assert geo_country_user[0] == geo_country_user[1] # US / userA
+ assert geo_country_user[2] != geo_country_user[0] # same user but in canada
+
+ # make sure we get an exception if we nest too deeply (can't handle arbitrarily deep
+ # nested column groups - and the exceptions we would get in operators like Categorify
+ # are super confusing for users)
+ with pytest.raises(ValueError):
+ cats = [[country + "user"] + country + "user"] >> Categorify(encode_type="combo")
| [FEA] apply joint TE on multiple columns after another op
**Is your feature request related to a problem? Please describe.**
We'd like to the the following pipeline with LambdaOp and Target Encoding.
```
unixReviewTime = ColumnGroup(['unixReviewTime'])
unixReviewTime_toDatetime = unixReviewTime >> (lambda col: cudf.to_datetime(col, unit='s')) >> Rename(postfix="_toDatetime")
unixReviewTime_toDatetime_year = unixReviewTime_toDatetime >> (lambda col: col.dt.year) >> Rename(postfix="_year")
columns_1 = [[['userID'] + unixReviewTime_toDatetime_year]]
te_features = columns_1 >> nvt.ops.TargetEncoding("label", kfold=1, p_smooth=20)
```
However this does not work.
We also tried `columns_1 = ColumnGroup(['user_id']) + unixReviewTime_toDatetime_year` but this is not providing us with the joint multi-column TE as we want.
**Describe the solution you'd like**
We want to be able to do joint multi-column TE on `userID` (from the input dataset) and `unixReviewTime_toDatetime_year` that is calculated from a lambda op. So our final output column should be `TE_userID_unixReviewTime_toDatetime_year_label`.
**Additional context**
We are using new api and conda environment with the latest NVT main branch.
| 2021-01-11T23:45:38 |
|
NVIDIA-Merlin/NVTabular | 563 | NVIDIA-Merlin__NVTabular-563 | [
"524"
] | 9d5ed5ee5579b7b88f20e8dbdd1b89ade29ee522 | diff --git a/nvtabular/io/dataset.py b/nvtabular/io/dataset.py
--- a/nvtabular/io/dataset.py
+++ b/nvtabular/io/dataset.py
@@ -448,32 +448,22 @@ def to_parquet(
fs = get_fs_token_paths(output_path)[0]
fs.mkdirs(output_path, exist_ok=True)
- if shuffle or out_files_per_proc or cats or conts or labels:
-
- # Output dask_cudf DataFrame to dataset
- _ddf_to_dataset(
- ddf,
- fs,
- output_path,
- shuffle,
- out_files_per_proc,
- cats or [],
- conts or [],
- labels or [],
- "parquet",
- self.client,
- num_threads,
- self.cpu,
- )
- return
- # Default (shuffle=None and out_files_per_proc=None)
- # Just use `dask_cudf.to_parquet`
- fut = ddf.to_parquet(output_path, compression=None, write_index=False, compute=False)
- if self.client is None:
- fut.compute(scheduler="synchronous")
- else:
- fut.compute()
+ # Output dask_cudf DataFrame to dataset
+ _ddf_to_dataset(
+ ddf,
+ fs,
+ output_path,
+ shuffle,
+ out_files_per_proc,
+ cats or [],
+ conts or [],
+ labels or [],
+ "parquet",
+ self.client,
+ num_threads,
+ self.cpu,
+ )
def to_hugectr(
self,
| diff --git a/tests/unit/test_dask_nvt.py b/tests/unit/test_dask_nvt.py
--- a/tests/unit/test_dask_nvt.py
+++ b/tests/unit/test_dask_nvt.py
@@ -88,7 +88,7 @@ def test_dask_workflow_api_dlrm(
output_path = os.path.join(tmpdir, "processed")
transformed = workflow.fit_transform(dataset)
- transformed.to_parquet(output_path=output_path, shuffle=shuffle)
+ transformed.to_parquet(output_path=output_path, shuffle=shuffle, out_files_per_proc=1)
result = transformed.to_ddf().compute()
assert len(df0) == len(result)
@@ -112,12 +112,12 @@ def test_dask_workflow_api_dlrm(
# Read back from disk
df_disk = dask_cudf.read_parquet(output_path, index=False).compute()
- # Can directly compare the final ddf to the result if we didn't shuffle
- if not shuffle:
- for col in df_disk:
- assert_eq(result[col], df_disk[col])
- else:
- assert len(df0) == len(df_disk)
+ # we don't have a deterministic ordering here, especially when using
+ # a dask client with multiple workers - so we need to sort the values here
+ columns = ["label", "x", "y", "id"] + cat_names
+ got = result.sort_values(columns).reset_index(drop=True)
+ expect = df_disk.sort_values(columns).reset_index(drop=True)
+ assert_eq(got, expect)
@pytest.mark.parametrize("part_mem_fraction", [0.01])
| [BUG] _file_list.txt is not written out in the validation output folder
**Describe the bug**
After we perform `workflow.transform()` both for train and validation set, we can see there is `_file_list.txt` is generated in the training output folder, but it is missing in the validation output folder.
`_file_list.txt` is required for HugeCTR training. There should be a file under train directory and another under valid directory and the file path is provided in the json file:
https://github.com/NVIDIA/NVTabular/blob/main/examples/hugectr/dlrm_fp32_64k.json#L35
**Expected behavior**
`_file_list.txt` should be written out for validation also.
**Environment details (please complete the following information):**
- Environment location: [Bare-metal, Docker, Cloud(specify cloud provider)] :Conda
- Method of NVTabular install: [conda, Docker, or from source]: Conda + pip install
| This seems to be because of the shuffling behaviour - we default to using the dask-cudf 'to_parquet' functionality when we are not shuffling:https://github.com/NVIDIA/NVTabular/blob/09f6afc05ab8872c96af8e5d91634b53ac1077b2/nvtabular/io/dataset.py#L406-L408 - which doesn't write out the hugectr filelist.txt file
@rjzamora should we just always use the NVT to _ddf_to_dataset code to write out parquet datasets? Is there any advantage to the dask_cudf.to_parquet code?
> @rjzamora should we just always use the NVT to out_files_per_proc=None code to write out parquet datasets? Is there any advantage to the dask_cudf.to_parquet code?
It's probably fine to use `out_files_per_proc=None`. I don't recall if there are any real advantages to using `dask_cudf.to_parquet` - I'm not sure if it is still the case, but this code path was not originally the default, even with `shuffle=None`, because it required the user to explicitly specify `out_files_per_proc=None`.
@rnyak @rjzamora @benfred I get all the output files (file_list metadata, and parquet) with the updated Notebook: https://github.com/NVIDIA/NVTabular/blob/main/examples/hugectr/criteo-hugectr.ipynb
+1 I observed the same issue. Upon adding ` shuffle=nvt.io.Shuffle.PER_PARTITION` I finally get the meta data files
```
proc.transform(train_dataset).to_parquet(output_path=output_train_dir, dtypes=dict_dtypes,
shuffle=nvt.io.Shuffle.PER_PARTITION,
cats=cat_feats.columns,
conts=cont_feats.columns,
labels=['target'])
``` | 2021-02-10T00:05:26 |
NVIDIA-Merlin/NVTabular | 568 | NVIDIA-Merlin__NVTabular-568 | [
"387"
] | 9218dd5b7d5244e860ddf45b637e3f482c157156 | diff --git a/nvtabular/ops/fill.py b/nvtabular/ops/fill.py
--- a/nvtabular/ops/fill.py
+++ b/nvtabular/ops/fill.py
@@ -36,18 +36,34 @@ class FillMissing(Operator):
-----------
fill_val : float, default 0
The constant value to replace missing values with.
+ add_binary_cols : boolean, default False
+ When True, adds binary columns that indicate whether cells in each column were filled
"""
- def __init__(self, fill_val=0):
+ def __init__(self, fill_val=0, add_binary_cols=False):
super().__init__()
self.fill_val = fill_val
+ self.add_binary_cols = add_binary_cols
@annotate("FillMissing_op", color="darkgreen", domain="nvt_python")
def transform(self, columns, gdf: cudf.DataFrame) -> cudf.DataFrame:
- return gdf[columns].fillna(self.fill_val)
+ if self.add_binary_cols:
+ for col in columns:
+ gdf[f"{col}_filled"] = gdf[col].isna()
+ gdf[col] = gdf[col].fillna(self.fill_val)
+ else:
+ gdf[columns] = gdf[columns].fillna(self.fill_val)
+
+ return gdf
transform.__doc__ = Operator.transform.__doc__
+ def output_column_names(self, columns: ColumnNames) -> ColumnNames:
+ output_cols = columns[:]
+ if self.add_binary_cols:
+ output_cols.extend([f"{col}_filled" for col in columns])
+ return output_cols
+
class FillMedian(StatOperator):
"""
@@ -64,10 +80,16 @@ class FillMedian(StatOperator):
# Add FillMedian to the workflow for continuous columns
proc.add_cont_feature(nvt.ops.FillMedian())
+
+ Parameters
+ -----------
+ add_binary_cols : boolean, default False
+ When True, adds binary columns that indicate whether cells in each column were filled
"""
- def __init__(self):
+ def __init__(self, add_binary_cols=False):
super().__init__()
+ self.add_binary_cols = add_binary_cols
self.medians = {}
@annotate("FillMedian_transform", color="darkgreen", domain="nvt_python")
@@ -76,6 +98,8 @@ def transform(self, columns: ColumnNames, gdf: cudf.DataFrame) -> cudf.DataFrame
raise RuntimeError("need to call 'fit' before running transform")
for col in columns:
+ if self.add_binary_cols:
+ gdf[f"{col}_filled"] = gdf[col].isna()
gdf[col] = gdf[col].fillna(self.medians[col])
return gdf
@@ -96,3 +120,9 @@ def fit_finalize(self, dask_stats):
def clear(self):
self.medians = {}
+
+ def output_column_names(self, columns: ColumnNames) -> ColumnNames:
+ output_cols = columns[:]
+ if self.add_binary_cols:
+ output_cols.extend([f"{col}_filled" for col in columns])
+ return output_cols
| diff --git a/tests/unit/test_ops.py b/tests/unit/test_ops.py
--- a/tests/unit/test_ops.py
+++ b/tests/unit/test_ops.py
@@ -134,8 +134,9 @@ def test_target_encode_multi(tmpdir, npartitions):
@pytest.mark.parametrize("gpu_memory_frac", [0.01, 0.1])
@pytest.mark.parametrize("engine", ["parquet", "csv", "csv-no-header"])
@pytest.mark.parametrize("op_columns", [["x"], ["x", "y"]])
-def test_fill_median(tmpdir, df, dataset, gpu_memory_frac, engine, op_columns):
- cont_features = op_columns >> nvt.ops.FillMedian()
[email protected]("add_binary_cols", [True, False])
+def test_fill_median(tmpdir, df, dataset, gpu_memory_frac, engine, op_columns, add_binary_cols):
+ cont_features = op_columns >> nvt.ops.FillMedian(add_binary_cols=add_binary_cols)
processor = nvt.Workflow(cont_features)
processor.fit(dataset)
new_gdf = processor.transform(dataset).to_ddf().compute()
@@ -144,6 +145,9 @@ def test_fill_median(tmpdir, df, dataset, gpu_memory_frac, engine, op_columns):
col_median = df[col].dropna().quantile(0.5, interpolation="linear")
assert math.isclose(col_median, processor.column_group.op.medians[col], rel_tol=1e1)
assert np.all((df[col].fillna(col_median) - new_gdf[col]).abs().values <= 1e-2)
+ assert (f"{col}_filled" in new_gdf.keys()) == add_binary_cols
+ if add_binary_cols:
+ assert df[col].isna().sum() == new_gdf[f"{col}_filled"].sum()
@pytest.mark.parametrize("gpu_memory_frac", [0.01, 0.1])
@@ -207,9 +211,10 @@ def test_hash_bucket_lists(tmpdir):
@pytest.mark.parametrize("engine", ["parquet"])
-def test_fill_missing(tmpdir, df, dataset, engine):
[email protected]("add_binary_cols", [True, False])
+def test_fill_missing(tmpdir, df, dataset, engine, add_binary_cols):
cont_names = ["x", "y"]
- cont_features = cont_names >> nvt.ops.FillMissing(fill_val=42)
+ cont_features = cont_names >> nvt.ops.FillMissing(fill_val=42, add_binary_cols=add_binary_cols)
for col in cont_names:
idx = np.random.choice(df.shape[0] - 1, int(df.shape[0] * 0.2))
@@ -220,9 +225,13 @@ def test_fill_missing(tmpdir, df, dataset, engine):
processor = nvt.Workflow(cont_features)
processor.fit(dataset)
new_gdf = processor.transform(dataset).to_ddf().compute()
+
for col in cont_names:
assert np.all((df[col].fillna(42) - new_gdf[col]).abs().values <= 1e-2)
assert new_gdf[col].isna().sum() == 0
+ assert (f"{col}_filled" in new_gdf.keys()) == add_binary_cols
+ if add_binary_cols:
+ assert df[col].isna().sum() == new_gdf[f"{col}_filled"].sum()
@pytest.mark.parametrize("engine", ["parquet"])
| Fill Missing Op option to create a new binary column indicating the value was replaced.
Fill Missing Op should have the option to create a new binary column indicating whether the column was filled or not for continuous variables.
This is a common feature used when dealing with missing values of categoricals.
| +1 | 2021-02-11T15:50:11 |
NVIDIA-Merlin/NVTabular | 600 | NVIDIA-Merlin__NVTabular-600 | [
"598"
] | 108bfc97ecb458256c21b46ea8402caa8f584bd9 | diff --git a/nvtabular/workflow.py b/nvtabular/workflow.py
--- a/nvtabular/workflow.py
+++ b/nvtabular/workflow.py
@@ -151,7 +151,7 @@ def fit(self, dataset: Dataset):
# hack: store input/output dtypes here. We should have complete dtype
# information for each operator (like we do for column names), but as
# an interim solution this gets us what we need.
- input_dtypes = dataset.to_ddf().dtypes
+ input_dtypes = dataset.to_ddf()[self._input_columns()].dtypes
self.input_dtypes = dict(zip(input_dtypes.index, input_dtypes))
output_dtypes = self.transform(dataset).to_ddf().head(1).dtypes
self.output_dtypes = dict(zip(output_dtypes.index, output_dtypes))
| diff --git a/tests/unit/test_workflow.py b/tests/unit/test_workflow.py
--- a/tests/unit/test_workflow.py
+++ b/tests/unit/test_workflow.py
@@ -580,3 +580,14 @@ def test_workflow_move_saved(tmpdir):
# also check that when transforming our input we get the same results after loading
transformed = workflow2.transform(Dataset(data)).to_ddf().compute()
assert_eq(expected, transformed)
+
+
+def test_workflow_input_output_dtypes():
+ df = cudf.DataFrame({"genre": ["drama", "comedy"], "user": ["a", "b"], "unneeded": [1, 2]})
+ features = [["genre", "user"], "genre"] >> ops.Categorify(encode_type="combo")
+ workflow = Workflow(features)
+ workflow.fit(Dataset(df))
+
+ assert "unneeded" not in workflow.input_dtypes
+ assert set(workflow.input_dtypes.keys()) == {"genre", "user"}
+ assert set(workflow.output_dtypes.keys()) == {"genre_user", "genre"}
| [BUG] NVTabular workflow writes more input columns for triton inference server
**Describe the bug**
Using `generate_triton_model` generates the config file for triton inference. It depends on workflow.input_dtypes.
If my workflow does not use all available columns, then the workflow.input_dtypes contains more input columns and triton will fail loading the config.
NVTabular workflow is:

Current (fails):

Error:
```
E0224 15:58:10.330248 178 model_repository_manager.cc:963] failed to load 'amazonreview_tf' version 1: Internal: unable to create stream: the provided PTX was compiled with an unsupported toolchain.
/nvtabular/nvtabular/workflow.py:236: UserWarning: Loading workflow generated with cudf version 0+untagged.1.gbd321d1 - but we are running cudf 0.18.0a+253.g53ed28e91c. This might cause issues
warnings.warn(
E0224 15:58:20.534884 178 model_repository_manager.cc:963] failed to load 'amazonreview_nvt' version 1: Internal: Traceback (most recent call last):
File "/opt/tritonserver/backends/python/startup.py", line 197, in Init
self.backend.initialize(args)
File "/models/models/amazonreview_nvt/1/model.py", line 57, in initialize
self.output_dtypes[name] = triton_string_to_numpy(conf["data_type"])
TypeError: 'NoneType' object is not subscriptable
I0224 15:58:20.535093 178 server.cc:490]
```
This error occurs, when triton tries to load the config for the column `brand`, which is not used in the workflow
How to fix it:

**Steps/Code to reproduce bug**
Use MovieLens example and do not drop the timestamp column
| 2021-02-24T23:54:47 |
|
NVIDIA-Merlin/NVTabular | 693 | NVIDIA-Merlin__NVTabular-693 | [
"691"
] | ce6c463a74b9c181667273fd2c652252ed92b77d | diff --git a/examples/horovod/tf_hvd_simple.py b/examples/horovod/tf_hvd_simple.py
--- a/examples/horovod/tf_hvd_simple.py
+++ b/examples/horovod/tf_hvd_simple.py
@@ -18,7 +18,7 @@
parser = argparse.ArgumentParser(description="Process some integers.")
parser.add_argument("--dir_in", default=None, help="Input directory")
-parser.add_argument("--b_size", default=None, help="batch size")
+parser.add_argument("--batch_size", default=None, help="batch size")
parser.add_argument("--cats", default=None, help="categorical columns")
parser.add_argument("--cats_mh", default=None, help="categorical multihot columns")
parser.add_argument("--conts", default=None, help="continuous columns")
@@ -27,7 +27,7 @@
BASE_DIR = args.dir_in or "./data/"
-BATCH_SIZE = args.b_size or 16384 # Batch Size
+BATCH_SIZE = int(args.batch_size) or 16384 # Batch Size
CATEGORICAL_COLUMNS = args.cats or ["movieId", "userId"] # Single-hot
CATEGORICAL_MH_COLUMNS = args.cats_mh or ["genres"] # Multi-hot
NUMERIC_COLUMNS = args.conts or []
diff --git a/examples/horovod/torch-nvt-horovod.py b/examples/horovod/torch-nvt-horovod.py
--- a/examples/horovod/torch-nvt-horovod.py
+++ b/examples/horovod/torch-nvt-horovod.py
@@ -34,7 +34,7 @@
BASE_DIR = os.path.expanduser(args.dir_in or "./data/")
-BATCH_SIZE = args.batch_size or 16384 # Batch Size
+BATCH_SIZE = int(args.batch_size) or 16384 # Batch Size
CATEGORICAL_COLUMNS = args.cats or ["movieId", "userId"] # Single-hot
CATEGORICAL_MH_COLUMNS = args.cats_mh or ["genres"] # Multi-hot
NUMERIC_COLUMNS = args.conts or []
| diff --git a/tests/unit/test_tf_dataloader.py b/tests/unit/test_tf_dataloader.py
--- a/tests/unit/test_tf_dataloader.py
+++ b/tests/unit/test_tf_dataloader.py
@@ -301,7 +301,7 @@ def test_multigpu_partitioning(datasets, engine, batch_size, global_rank):
@pytest.mark.skipif(importlib.util.find_spec("horovod") is None, reason="needs horovod")
-def test_hvd(tmpdir):
+def test_horovod_multigpu(tmpdir):
json_sample = {
"conts": {},
"cats": {
@@ -359,6 +359,8 @@ def test_hvd(tmpdir):
hvd_exam_path,
"--dir_in",
f"{tmpdir}",
+ "--batch_size",
+ "1024",
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
diff --git a/tests/unit/test_torch_dataloader.py b/tests/unit/test_torch_dataloader.py
--- a/tests/unit/test_torch_dataloader.py
+++ b/tests/unit/test_torch_dataloader.py
@@ -450,6 +450,8 @@ def test_horovod_multigpu(tmpdir):
hvd_example_path,
"--dir_in",
f"{tmpdir}",
+ "--batch_size",
+ "1024",
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
| [BUG] Horovod example scripts fail when user supplies batch size parameter
**Describe the bug**
Using the batch size parameter on the TF Horovod example causes a type error with a mismatch between str and int.
**Steps/Code to reproduce bug**
Run the TF Horovod example with the arguments `--b_size 1024`.
**Expected behavior**
The script should accept a user-provided batch size.
**Environment details (please complete the following information):**
- Environment location: Bare-metal
- Method of NVTabular install: conda
**Additional context**
I believe [this line](https://github.com/NVIDIA/NVTabular/blob/main/examples/horovod/tf_hvd_simple.py#L30) and the same line in the Torch example just need type coercions from str to int.
| 2021-04-02T16:42:38 |
|
NVIDIA-Merlin/NVTabular | 752 | NVIDIA-Merlin__NVTabular-752 | [
"684"
] | 389037f41597e4b2943da534449729e3ce15d0c5 | diff --git a/nvtabular/io/dataset.py b/nvtabular/io/dataset.py
--- a/nvtabular/io/dataset.py
+++ b/nvtabular/io/dataset.py
@@ -362,6 +362,10 @@ def to_ddf(self, columns=None, shuffle=False, seed=None):
def file_partition_map(self):
return self.engine._file_partition_map
+ @property
+ def partition_lens(self):
+ return self.engine._partition_lens
+
def to_cpu(self):
warnings.warn(
"Changing an NVTabular Dataset to CPU mode."
@@ -477,7 +481,7 @@ def shuffle_by_keys(self, keys, hive_data=None, npartitions=None):
# Fall back to dask.dataframe algorithm
return Dataset(ddf.shuffle(keys, npartitions=npartitions))
- def to_iter(self, columns=None, indices=None, shuffle=False, seed=None):
+ def to_iter(self, columns=None, indices=None, shuffle=False, seed=None, use_file_metadata=None):
"""Convert `Dataset` object to a `cudf.DataFrame` iterator.
Note that this method will use `to_ddf` to produce a
@@ -502,12 +506,39 @@ def to_iter(self, columns=None, indices=None, shuffle=False, seed=None):
The random seed to use if `shuffle=True`. If nothing
is specified, the current system time will be used by the
`random` std library.
+ use_file_metadata : bool; Optional
+ Whether to allow the returned ``DataFrameIter`` object to
+ use file metadata from the ``base_dataset`` to estimate
+ the row-count. By default, the file-metadata
+ optimization will only be used if the current Dataset is
+ backed by a file-based engine. Otherwise, it is possible
+ that an intermediate transform has modified the row-count.
"""
if isinstance(columns, str):
columns = [columns]
+ # Try to extract the row-size metadata
+ # if we are not shuffling
+ partition_lens_meta = None
+ if not shuffle and use_file_metadata is not False:
+ # We are allowed to use file metadata to calculate
+ # partition sizes. If `use_file_metadata` is None,
+ # we only use metadata if `self` is backed by a
+ # file-based engine (like "parquet"). Otherwise,
+ # we cannot be "sure" that the metadata row-count
+ # is correct.
+ try:
+ if use_file_metadata:
+ partition_lens_meta = self.base_dataset.partition_lens
+ else:
+ partition_lens_meta = self.partition_lens
+ except AttributeError:
+ pass
+
return DataFrameIter(
- self.to_ddf(columns=columns, shuffle=shuffle, seed=seed), indices=indices
+ self.to_ddf(columns=columns, shuffle=shuffle, seed=seed),
+ indices=indices,
+ partition_lens=partition_lens_meta,
)
def to_parquet(
@@ -869,12 +900,19 @@ def _set_dtypes(chunk, dtypes):
class DataFrameIter:
- def __init__(self, ddf, columns=None, indices=None):
+ def __init__(self, ddf, columns=None, indices=None, partition_lens=None):
self.indices = indices if isinstance(indices, list) else range(ddf.npartitions)
self._ddf = ddf
self.columns = columns
+ self.partition_lens = partition_lens
def __len__(self):
+ if self.partition_lens:
+ # Use metadata-based partition-size information
+ # if/when it is available. Note that this metadata
+ # will not be correct if rows where added or dropped
+ # after IO (within Ops).
+ return sum(self.partition_lens[i] for i in self.indices)
if len(self.indices) < self._ddf.npartitions:
return len(self._ddf.partitions[self.indices])
return len(self._ddf)
diff --git a/nvtabular/io/parquet.py b/nvtabular/io/parquet.py
--- a/nvtabular/io/parquet.py
+++ b/nvtabular/io/parquet.py
@@ -67,6 +67,8 @@ def __init__(
cpu=False,
):
super().__init__(paths, part_size, cpu=cpu, storage_options=storage_options)
+ self._pp_map = None
+ self._pp_nrows = None
if row_groups_per_part is None:
path0 = self._dataset.pieces[0].path
with self.fs.open(path0, "rb") as f0:
@@ -111,8 +113,40 @@ def _dataset(self):
return dataset
@property
- @functools.lru_cache(1)
def _file_partition_map(self):
+ if self._pp_map is None:
+ self._process_parquet_metadata()
+ return self._pp_map
+
+ @property
+ def _partition_lens(self):
+ if self._pp_nrows is None:
+ self._process_parquet_metadata()
+ return self._pp_nrows
+
+ @property
+ def num_rows(self):
+ # TODO: Avoid parsing metadata once upstream dask
+ # can get the length efficiently (in all practical cases)
+ return sum(self._partition_lens)
+
+ def _process_parquet_metadata(self):
+ # Utility shared by `_file_partition_map` and `_partition_lens`
+ # to collect useful information from the parquet metadata
+
+ _pp_nrows = []
+
+ def _update_partition_lens(part_count, md, num_row_groups, rg_offset=None):
+ # Helper function to calculate the row count for each
+ # output partition (and add it to `_pp_nrows`)
+ rg_offset = rg_offset or 0
+ for rg_i in range(0, part_count, self.row_groups_per_part):
+ rg_f = min(rg_i + self.row_groups_per_part, num_row_groups)
+ _pp_nrows.append(
+ sum([md.row_group(rg + rg_offset).num_rows for rg in range(rg_i, rg_f)])
+ )
+ return
+
dataset = self._dataset
if dataset.metadata:
# We have a metadata file.
@@ -124,41 +158,29 @@ def _file_partition_map(self):
# Convert the per-file row-group count to the
# file-to-partition mapping
- ind = 0
+ ind, rg = 0, 0
_pp_map = defaultdict(list)
for fn, num_row_groups in _path_row_groups.items():
part_count = math.ceil(num_row_groups / self.row_groups_per_part)
_pp_map[fn] = np.arange(ind, ind + part_count)
+ _update_partition_lens(part_count, dataset.metadata, num_row_groups, rg_offset=rg)
ind += part_count
-
+ rg += num_row_groups
else:
# No metadata file. Construct file-to-partition map manually
ind = 0
_pp_map = {}
for piece in dataset.pieces:
- num_row_groups = piece.get_metadata().num_row_groups
+ md = piece.get_metadata()
+ num_row_groups = md.num_row_groups
part_count = math.ceil(num_row_groups / self.row_groups_per_part)
fn = piece.path.split(self.fs.sep)[-1]
_pp_map[fn] = np.arange(ind, ind + part_count)
+ _update_partition_lens(part_count, md, num_row_groups)
ind += part_count
- return _pp_map
-
- @property
- @functools.lru_cache(1)
- def num_rows(self):
- # TODO: Avoid parsing metadata here if we can confirm upstream dask
- # can get the length efficiently (in all practical cases)
- dataset = self._dataset
- if dataset.metadata:
- # We have a metadata file
- return dataset.metadata.num_rows
- else:
- # Sum up row-group sizes manually
- num_rows = 0
- for piece in dataset.pieces:
- num_rows += piece.get_metadata().num_rows
- return num_rows
+ self._pp_map = _pp_map
+ self._pp_nrows = _pp_nrows
def to_ddf(self, columns=None, cpu=None):
| diff --git a/tests/unit/test_io.py b/tests/unit/test_io.py
--- a/tests/unit/test_io.py
+++ b/tests/unit/test_io.py
@@ -73,15 +73,17 @@ def test_dask_dataset_itr(tmpdir, datasets, engine, gpu_memory_frac):
else:
columns = mycols_csv
- dd = nvtabular.io.Dataset(
+ size = 0
+ ds = nvtabular.io.Dataset(
paths[0], engine=engine, part_mem_fraction=gpu_memory_frac, dtypes=dtypes
)
- size = 0
- for chunk in dd.to_iter(columns=columns):
+ my_iter = ds.to_iter(columns=columns)
+ for chunk in my_iter:
size += chunk.shape[0]
assert chunk["id"].dtype == np.int32
assert size == df1.shape[0]
+ assert len(my_iter) == size
@pytest.mark.parametrize("engine", ["csv", "parquet", "csv-no-header"])
@@ -603,6 +605,40 @@ def test_dataset_conversion(tmpdir, cpu, preserve_files):
assert not glob.glob(os.path.join(pq_path, "*.parquet"))
[email protected]("use_file_metadata", [True, None])
[email protected]("shuffle", [True, False])
+def test_parquet_iterator_len(tmpdir, shuffle, use_file_metadata):
+
+ ddf1 = dask.datasets.timeseries(
+ start="2000-01-01",
+ end="2000-01-6",
+ freq="600s",
+ partition_freq="1d",
+ id_lam=10,
+ seed=42,
+ ).shuffle("id")
+
+ # Write to parquet dataset
+ ddf1.to_parquet(str(tmpdir))
+
+ # Initialize Dataset
+ ds = nvt.Dataset(str(tmpdir), engine="parquet")
+
+ # Convert ds -> ds2
+ ds2 = nvt.Dataset(ds.to_ddf())
+
+ # Check that iterator lengths match the partition lengths
+ ddf2 = ds2.to_ddf(shuffle=shuffle, seed=42)
+ for i in range(ddf2.npartitions):
+ _iter = ds2.to_iter(
+ shuffle=shuffle,
+ seed=42,
+ indices=[i],
+ use_file_metadata=use_file_metadata,
+ )
+ assert len(ddf2.partitions[i]) == len(_iter)
+
+
@pytest.mark.parametrize("cpu", [True, False])
def test_hive_partitioned_data(tmpdir, cpu):
| [BUG] OOM error while trying to train on Outbrain dataset with Tensorflow
**Describe the bug**
A V100 16GB GPU now runs out memory when trying to run the [Outbrain TF training notebook](https://github.com/NVIDIA/NVTabular/blob/main/examples/advanced-ops-outbrain/03-Training-with-TF.ipynb). This is a new problem that has cropped up in the past few days, so may be related to recent TF dataloader changes.
**Steps/Code to reproduce bug**
Run the Outbrain example notebooks with a single V100 16GB GPU.
**Expected behavior**
Example should complete successfully without running out of memory
**Environment details (please complete the following information):**
TBD
**Additional context**
Stack trace:
```
--------------------------------------------------------------------------
MemoryError Traceback (most recent call last)
<ipython-input-18-b1725d7df4b2> in <module>
5 experimental_run_tf_function=False
6 )
----> 7 history = wide_and_deep_model.fit(train_dataset_tf, epochs=1)
/opt/conda/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
1048 training_utils.RespectCompiledTrainableState(self):
1049 # Creates a `tf.data.Dataset` and handles batch and epoch iteration.
-> 1050 data_handler = data_adapter.DataHandler(
1051 x=x,
1052 y=y,
/opt/conda/lib/python3.8/site-packages/tensorflow/python/keras/engine/data_adapter.py in __init__(self, x, y, sample_weight, batch_size, steps_per_epoch, initial_epoch, epochs, shuffle, class_weight, max_queue_size, workers, use_multiprocessing, model, steps_per_execution)
1098
1099 adapter_cls = select_data_adapter(x, y)
-> 1100 self._adapter = adapter_cls(
1101 x,
1102 y,
/opt/conda/lib/python3.8/site-packages/tensorflow/python/keras/engine/data_adapter.py in __init__(self, x, y, sample_weights, shuffle, workers, use_multiprocessing, max_queue_size, model, **kwargs)
896 "`keras.utils.Sequence` as input.")
897
--> 898 self._size = len(x)
899 self._shuffle_sequence = shuffle
900 self._keras_sequence = x
/nvtabular/nvtabular/loader/tensorflow.py in __len__(self)
237 # TODO: what's a better way to do this inheritance
238 # of the appropriate methods? A Metaclass?
--> 239 return DataLoader.__len__(self)
240
241 def __getitem__(self, idx):
/nvtabular/nvtabular/loader/backend.py in __len__(self)
203
204 def __len__(self):
--> 205 return _num_steps(len(self._buff), self.batch_size)
206
207 @property
/nvtabular/nvtabular/loader/backend.py in __len__(self)
61
62 def __len__(self):
---> 63 return len(self.itr)
64
65 @property
/nvtabular/nvtabular/io/dataset.py in __len__(self)
766
767 def __len__(self):
--> 768 return len(self._ddf.partitions[self.indices])
769
770 def __iter__(self):
/opt/conda/envs/rapids/lib/python3.8/site-packages/dask/dataframe/core.py in __len__(self)
3654 return super().__len__()
3655 else:
-> 3656 return len(s)
3657
3658 def __contains__(self, key):
/opt/conda/envs/rapids/lib/python3.8/site-packages/dask/dataframe/core.py in __len__(self)
555
556 def __len__(self):
--> 557 return self.reduction(
558 len, np.sum, token="len", meta=int, split_every=False
559 ).compute()
/opt/conda/envs/rapids/lib/python3.8/site-packages/dask/base.py in compute(self, **kwargs)
281 dask.base.compute
282 """
--> 283 (result,) = compute(self, traverse=False, **kwargs)
284 return result
285
/opt/conda/envs/rapids/lib/python3.8/site-packages/dask/base.py in compute(*args, **kwargs)
563 postcomputes.append(x.__dask_postcompute__())
564
--> 565 results = schedule(dsk, keys, **kwargs)
566 return repack([f(r, *a) for r, (f, a) in zip(results, postcomputes)])
567
/opt/conda/envs/rapids/lib/python3.8/site-packages/dask/threaded.py in get(dsk, result, cache, num_workers, pool, **kwargs)
74 pools[thread][num_workers] = pool
75
---> 76 results = get_async(
77 pool.apply_async,
78 len(pool._pool),
/opt/conda/envs/rapids/lib/python3.8/site-packages/dask/local.py in get_async(apply_async, num_workers, dsk, result, cache, get_id, rerun_exceptions_locally, pack_exception, raise_exception, callbacks, dumps, loads, **kwargs)
485 _execute_task(task, data) # Re-execute locally
486 else:
--> 487 raise_exception(exc, tb)
488 res, worker_id = loads(res_info)
489 state["cache"][key] = res
/opt/conda/envs/rapids/lib/python3.8/site-packages/dask/local.py in reraise(exc, tb)
315 if exc.__traceback__ is not tb:
316 raise exc.with_traceback(tb)
--> 317 raise exc
318
319
/opt/conda/envs/rapids/lib/python3.8/site-packages/dask/local.py in execute_task(key, task_info, dumps, loads, get_id, pack_exception)
220 try:
221 task, data = loads(task_info)
--> 222 result = _execute_task(task, data)
223 id = get_id()
224 result = dumps((result, id))
/opt/conda/envs/rapids/lib/python3.8/site-packages/dask/core.py in _execute_task(arg, cache, dsk)
119 # temporaries by their reference count and can execute certain
120 # operations in-place.
--> 121 return func(*(_execute_task(a, cache) for a in args))
122 elif not ishashable(arg):
123 return arg
/opt/conda/envs/rapids/lib/python3.8/site-packages/dask/core.py in <genexpr>(.0)
119 # temporaries by their reference count and can execute certain
120 # operations in-place.
--> 121 return func(*(_execute_task(a, cache) for a in args))
122 elif not ishashable(arg):
123 return arg
/opt/conda/envs/rapids/lib/python3.8/site-packages/dask/core.py in _execute_task(arg, cache, dsk)
113 """
114 if isinstance(arg, list):
--> 115 return [_execute_task(a, cache) for a in arg]
116 elif istask(arg):
117 func, args = arg[0], arg[1:]
/opt/conda/envs/rapids/lib/python3.8/site-packages/dask/core.py in <listcomp>(.0)
113 """
114 if isinstance(arg, list):
--> 115 return [_execute_task(a, cache) for a in arg]
116 elif istask(arg):
117 func, args = arg[0], arg[1:]
/opt/conda/envs/rapids/lib/python3.8/site-packages/dask/core.py in _execute_task(arg, cache, dsk)
119 # temporaries by their reference count and can execute certain
120 # operations in-place.
--> 121 return func(*(_execute_task(a, cache) for a in args))
122 elif not ishashable(arg):
123 return arg
/opt/conda/envs/rapids/lib/python3.8/site-packages/dask/core.py in <genexpr>(.0)
119 # temporaries by their reference count and can execute certain
120 # operations in-place.
--> 121 return func(*(_execute_task(a, cache) for a in args))
122 elif not ishashable(arg):
123 return arg
/opt/conda/envs/rapids/lib/python3.8/site-packages/dask/core.py in _execute_task(arg, cache, dsk)
119 # temporaries by their reference count and can execute certain
120 # operations in-place.
--> 121 return func(*(_execute_task(a, cache) for a in args))
122 elif not ishashable(arg):
123 return arg
/opt/conda/envs/rapids/lib/python3.8/site-packages/dask/dataframe/io/parquet/core.py in read_parquet_part(fs, func, meta, part, columns, index, kwargs)
381
382 if isinstance(part, list):
--> 383 dfs = [
384 func(fs, rg, columns.copy(), index, **toolz.merge(kwargs, kw))
385 for (rg, kw) in part
/opt/conda/envs/rapids/lib/python3.8/site-packages/dask/dataframe/io/parquet/core.py in <listcomp>(.0)
382 if isinstance(part, list):
383 dfs = [
--> 384 func(fs, rg, columns.copy(), index, **toolz.merge(kwargs, kw))
385 for (rg, kw) in part
386 ]
/opt/conda/envs/rapids/lib/python3.8/site-packages/dask_cudf/io/parquet.py in read_partition(fs, piece, columns, index, categories, partitions, **kwargs)
59 strings_to_cats = kwargs.get("strings_to_categorical", False)
60 if cudf.utils.ioutils._is_local_filesystem(fs):
---> 61 df = cudf.read_parquet(
62 path,
63 engine="cudf",
/opt/conda/envs/rapids/lib/python3.8/site-packages/cudf/io/parquet.py in read_parquet(filepath_or_buffer, engine, columns, filters, row_groups, skiprows, num_rows, strings_to_categorical, use_pandas_metadata, *args, **kwargs)
249
250 if engine == "cudf":
--> 251 return libparquet.read_parquet(
252 filepaths_or_buffers,
253 columns=columns,
cudf/_lib/parquet.pyx in cudf._lib.parquet.read_parquet()
cudf/_lib/parquet.pyx in cudf._lib.parquet.read_parquet()
MemoryError: std::bad_alloc: CUDA error at: /opt/conda/envs/rapids/include/rmm/mr/device/cuda_memory_resource.hpp:69: cudaErrorMemoryAllocation out of memory
```
| We think this is related to changes to the way that the `__len__` of a Dataset is computed in order to account for the possibility that the dataset may be sharded across multiple processes. A first pass partial fix might be to check if the Dataset holds less partition indices than are available in the Dataset, and if not, then use the earlier length computation code.
@rjzamora says:
> self._ddf.partitions[self.indices] will produce a dask DataFrame (a subset of the original ddf), and the len will probably alias to len(<ddf>.index). So, it will probably try to read from the files to produce a index for each partition. This can probably be optimized to use the metadata for a parquet file, but I don't think this is being done right now.
> we can probably provide our own optimization until it gets into Dask
> We want to use the metadata in the same way we do in [num_rows](https://github.com/NVIDIA/NVTabular/blob/d7a9373504f3e680d81bf899fb97bd1427c3a99f/nvtabular/io/parquet.py#L149), but we need to map the row-group metadata onto the expected ddf partitions. So, we will need to use the kind of tricky logic used in [_file_partition_map](https://github.com/NVIDIA/NVTabular/blob/d7a9373504f3e680d81bf899fb97bd1427c3a99f/nvtabular/io/parquet.py#L115)
> Note that the file_partition_map code is generating a map between files and partitions, but something similar could also map row-groups to partition lengths
@karlhigley and @jperez999 I could train the nb after pulling the recent changes you folks did, and reducing the buffer size on a 16 GB V100.
Okay, cool! So the partial fix to avoid the issue in single-GPU mode seems to have worked, now we just need to make the changes @rjzamora suggested to handle it for large datasets in multi-GPU mode.
I have been scoping this out today, and there doesn't seem to be a "trivial" solution here. I have worked out a way to do this but there are two primary problems:
(1) It requires an API change to add `Dataset.shuffle` and to remove the `shuffle=` option from `Dataset.to_ddf`. The purpose of this is to make it easier to track partition reordering after a shuffle is performed. If the shuffle returns a Dataset object (rather than a ddf), it is easier to record the mapping between parquet row-group metadata and output ddf partitions.
(2) There is no guarantee that the metadata-informed partition sizes will be the same after pre-processing (i.e. general NVTabular Ops). So, we would still need to pull data into memory to calculate the length for an end-to-end pre-processing + training workflow.
Im curious, why do we need to know the length of the dataset anyway? | 2021-04-21T03:26:21 |
NVIDIA-Merlin/NVTabular | 803 | NVIDIA-Merlin__NVTabular-803 | [
"734"
] | 4da100453730b36c09f9860821bf6e8818b4b763 | diff --git a/nvtabular/ops/__init__.py b/nvtabular/ops/__init__.py
--- a/nvtabular/ops/__init__.py
+++ b/nvtabular/ops/__init__.py
@@ -30,6 +30,7 @@
from .join_external import JoinExternal
from .join_groupby import JoinGroupby
from .lambdaop import LambdaOp
+from .list_slice import ListSlice
from .logop import LogOp
from .normalize import Normalize, NormalizeMinMax
from .operator import Operator
diff --git a/nvtabular/ops/list_slice.py b/nvtabular/ops/list_slice.py
new file mode 100644
--- /dev/null
+++ b/nvtabular/ops/list_slice.py
@@ -0,0 +1,138 @@
+#
+# Copyright (c) 2021, NVIDIA CORPORATION.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import cudf
+import cupy as cp
+import numba.cuda
+import numpy as np
+import pandas as pd
+from cudf.core.column import as_column, build_column
+from nvtx import annotate
+
+from nvtabular.dispatch import DataFrameType
+
+from .operator import ColumnNames, Operator
+
+
+class ListSlice(Operator):
+ """Slices a list column
+
+ This operator provides the ability to slice list column by row. For example, to truncate a
+ list column to only include the first 10 elements per row::
+
+ truncated = column_names >> ops.ListSlice(10)
+
+ Take the first 10 items, ignoring the first element::
+
+ truncated = column_names >> ops.ListSlice(1, 11)
+
+ Take the last 10 items from each row::
+
+ truncated = column_names >> ops.ListSlice(-10)
+ """
+
+ def __init__(self, start, end=None):
+ super().__init__()
+ self.start = start
+ self.end = end
+
+ if self.start > 0 and self.end is None:
+ self.end = self.start
+ self.start = 0
+
+ if self.end is None:
+ self.end = np.iinfo(np.int64).max
+
+ @annotate("ListSlice_op", color="darkgreen", domain="nvt_python")
+ def transform(self, columns: ColumnNames, df: DataFrameType) -> DataFrameType:
+ on_cpu = isinstance(df, pd.DataFrame)
+ ret = pd.DataFrame() if on_cpu else cudf.DataFrame()
+ for col in columns:
+ # handle CPU via normal python slicing (not very efficient)
+ if on_cpu:
+ ret[col] = [row[self.start : self.end] for row in df[col]]
+ else:
+ # figure out the size of each row from the list offsets
+ c = df[col]._column
+ offsets = c.offsets.values
+ elements = c.elements.values
+
+ # figure out the size of each row after slicing start/end
+ new_offsets = cp.zeros(offsets.size, dtype=offsets.dtype)
+ threads = 32
+ blocks = (offsets.size + threads - 1) // threads
+
+ # calculate new row offsets after slicing
+ _calculate_row_sizes[blocks, threads](self.start, self.end, offsets, new_offsets)
+ new_offsets = cp.cumsum(new_offsets).astype(offsets.dtype)
+
+ # create a new array for the sliced elements
+ new_elements = cp.zeros(new_offsets[-1].item(), dtype=elements.dtype)
+ if new_elements.size:
+ _slice_rows[blocks, threads](
+ self.start, offsets, elements, new_offsets, new_elements
+ )
+
+ # build up a list column with the sliced values
+ ret[col] = build_column(
+ None,
+ dtype=cudf.core.dtypes.ListDtype(new_elements.dtype),
+ size=new_offsets.size - 1,
+ children=(as_column(new_offsets), as_column(new_elements)),
+ )
+
+ return ret
+
+ transform.__doc__ = Operator.transform.__doc__
+
+
[email protected]
+def _calculate_row_sizes(start, end, offsets, row_sizes):
+ """given a slice (start/end) and existing offsets indicating row lengths, this
+ calculates the size for each new row after slicing"""
+ rowid = numba.cuda.grid(1)
+ if rowid < offsets.size - 1:
+ original_row_size = offsets[rowid + 1] - offsets[rowid]
+
+ # handle negative slicing appropiately
+ if start < 0:
+ start = original_row_size + start
+ if end < 0:
+ end = original_row_size + end
+
+ # clamp start/end to be in (0, original_row_size)
+ start = min(max(0, start), original_row_size)
+ end = min(max(0, end), original_row_size)
+
+ row_sizes[rowid + 1] = end - start
+
+
[email protected]
+def _slice_rows(start, offsets, elements, new_offsets, new_elements):
+ """slices rows of a list column. requires the 'new_offsets' to
+ be previously calculated (meaning that we don't need the 'end' slice index
+ since thats baked into the new_offsets"""
+ rowid = numba.cuda.grid(1)
+ if rowid < (new_offsets.size - 1):
+ if start >= 0:
+ offset = offsets[rowid] + start
+ else:
+ offset = offsets[rowid + 1] + start
+ if offset < offsets[rowid]:
+ offset = offsets[rowid]
+
+ for new_offset in range(new_offsets[rowid], new_offsets[rowid + 1]):
+ new_elements[new_offset] = elements[offset]
+ offset += 1
| diff --git a/tests/unit/test_ops.py b/tests/unit/test_ops.py
--- a/tests/unit/test_ops.py
+++ b/tests/unit/test_ops.py
@@ -967,7 +967,6 @@ def test_data_stats(tmpdir, df, datasets, engine):
@pytest.mark.parametrize("cpu", [False, True])
@pytest.mark.parametrize("keys", [["name"], "id", ["name", "id"]])
def test_groupby_op(keys, cpu):
-
# Initial timeseries dataset
size = 60
df1 = pd.DataFrame(
@@ -1017,3 +1016,42 @@ def test_groupby_op(keys, cpu):
# Check basic behavior or "y" column
assert (new_gdf["y-first"] < new_gdf["y-last"]).all()
+
+
[email protected]("cpu", [True, False])
+def test_list_slice(cpu):
+ DataFrame = pd.DataFrame if cpu else cudf.DataFrame
+
+ df = DataFrame({"y": [[0, 1, 2, 2, 767], [1, 2, 2, 3], [1, 223, 4]]})
+
+ op = ops.ListSlice(0, 2)
+ print("df", df)
+ transformed = op.transform(["y"], df)
+ expected = DataFrame({"y": [[0, 1], [1, 2], [1, 223]]})
+ assert_eq(transformed, expected)
+
+ op = ops.ListSlice(3, 5)
+ print("df", df)
+ transformed = op.transform(["y"], df)
+ expected = DataFrame({"y": [[2, 767], [3], []]})
+ assert_eq(transformed, expected)
+
+ op = ops.ListSlice(4, 10)
+ transformed = op.transform(["y"], df)
+ expected = DataFrame({"y": [[767], [], []]})
+ assert_eq(transformed, expected)
+
+ op = ops.ListSlice(100, 20000)
+ transformed = op.transform(["y"], df)
+ expected = DataFrame({"y": [[], [], []]})
+ assert_eq(transformed, expected)
+
+ op = ops.ListSlice(-4)
+ transformed = op.transform(["y"], df)
+ expected = DataFrame({"y": [[1, 2, 2, 767], [1, 2, 2, 3], [1, 223, 4]]})
+ assert_eq(transformed, expected)
+
+ op = ops.ListSlice(-3, -1)
+ transformed = op.transform(["y"], df)
+ expected = DataFrame({"y": [[2, 2], [2, 2], [1, 223]]})
+ assert_eq(transformed, expected)
| [FEA] Truncate List columns (sparse tensors) - related to the GroupBy op
**Is your feature request related to a problem? Please describe.**
This feature is related with #641 "Sequential / Session-based recommendation and time series support - Group by sorting values by timestamp".
After grouping, some sequences (e.g. user sessions or time series) might be very long, and for some ML models sequences with maximum (fixed) length are required. So lists truncation is necessary.
As currently List columns are internally represented as sparse vectors, it is currently not possible to use a LambdaOp to truncate the list values to a maximum length.
**Describe the solution you'd like**
I would like either an option on Groupby op to truncate all aggregated list columns to the same maximum length or an independent TruncateList op that would truncate selected list columns
**Describe alternatives you've considered**
As a workaround for this problem I am extending the NVT PyTorch dataloader, converting the internal representation of list columns to PyTorch sparse tensors (as shown in #500), converting them to dense tensors (with padding zeros in the right) and then slicing the second dimension of the tensor to the maximum length.
But storing longer sequences than needed in the parquet files is a waste of space and requires workarounds like this in the model size.
| This is similar to string slicing with string columns - we really need to have list slicing for list columns.
We should prototype this with the offset/values and followup with the cudf team
There should be possible to truncate either the start (positive number) or the end of the sessions (negative number).
A first-order solution is probably to use the [list.take](https://github.com/rapidsai/cudf/blob/d56428abfc7345e2f7be3b679f2383e0c1eb7084/python/cudf/cudf/core/column/lists.py#L317) method for cudf list columns. That is, you can just make a column of list indices, and call `take` to get what you want.
It should be possible to get better performance with either a cudf primitive or with other custom solutions, but a quick/simple solution may be a good start. | 2021-05-11T18:12:10 |
NVIDIA-Merlin/NVTabular | 806 | NVIDIA-Merlin__NVTabular-806 | [
"753"
] | e02549a785dc3297b2065b44266226e0fe7af330 | diff --git a/nvtabular/io/parquet.py b/nvtabular/io/parquet.py
--- a/nvtabular/io/parquet.py
+++ b/nvtabular/io/parquet.py
@@ -87,8 +87,8 @@ def __init__(
f" than requested part_size ({self.part_size}) for the NVTabular dataset."
f"A row group memory size of 128 MB is generally recommended. You can find"
f" info on how to set the row group size of parquet files in "
- f"https://nvidia.github.io/NVTabular/main/HowItWorks.html"
- f"#getting-your-data-ready-for-nvtabular"
+ f"https://nvidia.github.io/NVTabular/main/resources/troubleshooting.html"
+ f"#setting-the-row-group-size-for-the-parquet-files"
)
row_groups_per_part = 1.0
| [BUG] Website in Parquet row group size warning message doesn't exist
**Describe the bug**
When I create a `Dataset` object with `part_size=10MB` (for reasons), I get this warning message:
> nvtabular/io/parquet.py:83: UserWarning: Row group memory size (18299872) (bytes) of parquet file is bigger than requested part_size (10000000) for the NVTabular dataset.A row group memory size of 128 MB is generally recommended. You can find info on how to set the row group size of parquet files in https://nvidia.github.io/NVTabular/main/HowItWorks.html#getting-your-data-ready-for-nvtabular
The website mentioned at the end of the message 404s.
**Steps/Code to reproduce bug**
Try to visit [https://nvidia.github.io/NVTabular/main/HowItWorks.html#getting-your-data-ready-for-nvtabular](https://nvidia.github.io/NVTabular/main/HowItWorks.html#getting-your-data-ready-for-nvtabular)
**Expected behavior**
The warning message should contain a link to a page that exists.
| 2021-05-11T21:56:59 |
||
NVIDIA-Merlin/NVTabular | 820 | NVIDIA-Merlin__NVTabular-820 | [
"809"
] | 0f969a99c76c19d87beac463330a8ce44976a4a0 | diff --git a/nvtabular/column_group.py b/nvtabular/column_group.py
--- a/nvtabular/column_group.py
+++ b/nvtabular/column_group.py
@@ -138,11 +138,12 @@ def __add__(self, other):
__radd__ = __add__
def __sub__(self, other):
- """Adds columns from this ColumnGroup with another to return a new ColumnGroup
+ """Removes columns from this ColumnGroup with another to return a new ColumnGroup
Parameters
-----------
other: ColumnGroup or str or list of str
+ Columns to remove
Returns
-------
@@ -163,6 +164,28 @@ def __sub__(self, other):
child.kind = f"- {list(to_remove)}"
return child
+ def __getitem__(self, columns):
+ """Selects certain columns from this ColumnGroup, and returns a new Columngroup with only
+ those columns
+
+ Parameters
+ -----------
+ columns: str or list of str
+ Columns to select
+
+ Returns
+ -------
+ ColumnGroup
+ """
+ if isinstance(columns, str):
+ columns = [columns]
+
+ child = ColumnGroup(columns)
+ child.parents = [self]
+ self.children.append(child)
+ child.kind = str(columns)
+ return child
+
def __repr__(self):
output = " output" if not self.children else ""
return f"<ColumnGroup {self.label}{output}>"
| diff --git a/tests/unit/test_column_group.py b/tests/unit/test_column_group.py
--- a/tests/unit/test_column_group.py
+++ b/tests/unit/test_column_group.py
@@ -1,11 +1,31 @@
import cudf
import pytest
+from cudf.tests.utils import assert_eq
from nvtabular import ColumnGroup, Dataset, Workflow
from nvtabular.ops import Categorify, Rename
-def test_nested_column_group(tmpdir):
+def test_column_group_select():
+ df = cudf.DataFrame({"a": [1, 4, 9, 16, 25], "b": [0, 1, 2, 3, 4], "c": [25, 16, 9, 4, 1]})
+
+ input_features = ColumnGroup(["a", "b", "c"])
+ sqrt_features = input_features[["a", "c"]] >> cudf.sqrt
+ plus_one_features = input_features["b"] >> (lambda col: col + 1)
+ features = sqrt_features + plus_one_features
+
+ workflow = Workflow(features)
+ df_out = workflow.fit_transform(Dataset(df)).to_ddf().compute(scheduler="synchronous")
+
+ expected = cudf.DataFrame()
+ expected["a"] = cudf.sqrt(df["a"])
+ expected["c"] = cudf.sqrt(df["c"])
+ expected["b"] = df["b"] + 1
+
+ assert_eq(expected, df_out)
+
+
+def test_nested_column_group():
df = cudf.DataFrame(
{
"geo": ["US>CA", "US>NY", "CA>BC", "CA>ON"],
| [FEA] Select a subset of a ColumnGroup
**Is your feature request related to a problem? Please describe.**
No
**Describe the solution you'd like**
Currently, the best way I know of to select a ColumnGroup generated by your code (say as the result of a LambdaOp) is set-subtractive, I.E., you have a ColumnGroup with generated features A,B,C, and just want B,C, the way to get this is group - ['A']. A better way to do this in some cases would be set-selective, IE, group['B','C'].
**Describe alternatives you've considered**
An alternative solution would be to add a select op.
**Additional context**
| 2021-05-18T17:10:35 |
|
NVIDIA-Merlin/NVTabular | 861 | NVIDIA-Merlin__NVTabular-861 | [
"860"
] | 40ed890ff28ef5fba6f68d7d82da0a3d92513518 | diff --git a/nvtabular/column_group.py b/nvtabular/column_group.py
--- a/nvtabular/column_group.py
+++ b/nvtabular/column_group.py
@@ -205,7 +205,7 @@ def input_column_names(self):
@property
def label(self):
if self.op:
- return str(self.op.__class__.__name__)
+ return self.op.label
elif self.kind:
return self.kind
elif not self.parents:
diff --git a/nvtabular/ops/lambdaop.py b/nvtabular/ops/lambdaop.py
--- a/nvtabular/ops/lambdaop.py
+++ b/nvtabular/ops/lambdaop.py
@@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-from inspect import signature
+from inspect import getsourcelines, signature
from nvtx import annotate
@@ -55,7 +55,7 @@ def cond_prob(col, gdf):
Whether to provide a dependency column or not.
"""
- def __init__(self, f, dependency=None):
+ def __init__(self, f, dependency=None, label=None):
super().__init__()
if f is None:
raise ValueError("f cannot be None. LambdaOp op applies f to dataframe")
@@ -64,6 +64,7 @@ def __init__(self, f, dependency=None):
if self._param_count not in (1, 2):
raise ValueError("lambda function must accept either one or two parameters")
self.dependency = dependency
+ self._label = label
@annotate("DFLambda_op", color="darkgreen", domain="nvt_python")
def transform(self, columns: ColumnNames, df: DataFrameType) -> DataFrameType:
@@ -82,3 +83,23 @@ def transform(self, columns: ColumnNames, df: DataFrameType) -> DataFrameType:
def dependencies(self):
return self.dependency
+
+ @property
+ def label(self):
+ # if we're given an explicit label to use, return it
+ if self._label:
+ return self._label
+
+ # if we have a named function (not a lambda) return the function name
+ name = self.f.__name__
+ if name != "<lambda>":
+ return name
+ else:
+ # otherwise get the lambda source code from the inspect module if possible
+ source = getsourcelines(self.f)[0][0]
+ lambdas = [op.strip() for op in source.split(">>") if "lambda " in op]
+ if len(lambdas) == 1 and lambdas[0].count("lambda") == 1:
+ return lambdas[0]
+
+ # Failed to figure out the source
+ return "LambdaOp"
diff --git a/nvtabular/ops/operator.py b/nvtabular/ops/operator.py
--- a/nvtabular/ops/operator.py
+++ b/nvtabular/ops/operator.py
@@ -79,3 +79,7 @@ def __rrshift__(self, other) -> ColumnGroup:
import nvtabular
return nvtabular.ColumnGroup(other) >> self
+
+ @property
+ def label(self) -> str:
+ return self.__class__.__name__
| [FEA] Add a way to name LambdaOps
**Describe the solution you'd like**
It'd be nice to have a way to name LambdaOps so that they show up with that name in the workflow graph instead of just the non-descriptive moniker 'LambdaOp'
| 2021-06-04T22:20:34 |
||
NVIDIA-Merlin/NVTabular | 905 | NVIDIA-Merlin__NVTabular-905 | [
"732"
] | 2e772626e69740b19207456a3922dbbbc00720ca | diff --git a/nvtabular/ops/categorify.py b/nvtabular/ops/categorify.py
--- a/nvtabular/ops/categorify.py
+++ b/nvtabular/ops/categorify.py
@@ -433,10 +433,33 @@ def _get_embedding_order(cat_names):
return cat_names
-def get_embedding_sizes(workflow):
- """Returns a dictionary of best embedding sizes from the workflow"""
+def get_embedding_sizes(source, output_dtypes=None):
+ """Returns a dictionary of embedding sizes from a workflow or column_group
+
+ Parameters
+ ----------
+ source : Workflow or ColumnGroup
+ Either a nvtabular Workflow or ColumnGroup object that we should use to find
+ embedding sizes
+ output_dtypes : dict, optional
+ Optional dictionary of column_name:dtype. If passing a workflow object dtypes
+ will be read from the workflow. This is used to figure out which columns
+ are multihot-categorical, which are split out by this function. If passed a column_group
+ and this parameter isn't set, you won't have multihot columns returned separately
+ """
# TODO: do we need to distinguish multihot columns here? (if so why? )
- queue = [workflow.column_group]
+
+ # have to lazy import Workflow to avoid circular import errors
+ from nvtabular.workflow import Workflow
+
+ if isinstance(source, Workflow):
+ queue = [source.column_group]
+ output_dtypes = output_dtypes or source.output_dtypes
+ else:
+ # passed in a column group
+ queue = [source]
+ output_dtypes = output_dtypes or {}
+
output = {}
multihot_columns = set()
while queue:
@@ -444,12 +467,13 @@ def get_embedding_sizes(workflow):
if current.op and hasattr(current.op, "get_embedding_sizes"):
output.update(current.op.get_embedding_sizes(current.columns))
elif not current.op:
-
# only follow parents if its not an operator node (which could
# transform meaning of the get_embedding_sizes
queue.extend(current.parents)
+
for column in output:
- if _is_list_dtype(workflow.output_dtypes[column]):
+ dtype = output_dtypes.get(column)
+ if dtype and _is_list_dtype(dtype):
# multi hot so remove from output and add to multihot
multihot_columns.add(column)
# TODO: returning differnt return types like this (based off the presence
| diff --git a/tests/unit/test_ops.py b/tests/unit/test_ops.py
--- a/tests/unit/test_ops.py
+++ b/tests/unit/test_ops.py
@@ -717,11 +717,16 @@ def test_categorify_max_size(max_emb_size):
# check encoded values after freq_hashing with fix emb size
assert new_gdf["Author"].max() <= max_emb_size["Author"]
assert new_gdf["Engaging_User"].max() <= max_emb_size["Engaging_User"]
+
# check embedding size is less than max_size after hashing with fix emb size.
- assert nvt.ops.get_embedding_sizes(processor)["Author"][0] <= max_emb_size["Author"]
- assert (
- nvt.ops.get_embedding_sizes(processor)["Engaging_User"][0] <= max_emb_size["Engaging_User"]
- )
+ embedding_sizes = nvt.ops.get_embedding_sizes(processor)
+ assert embedding_sizes["Author"][0] <= max_emb_size["Author"]
+ assert embedding_sizes["Engaging_User"][0] <= max_emb_size["Engaging_User"]
+
+ # make sure we can also get embedding sizes from the column_group
+ embedding_sizes = nvt.ops.get_embedding_sizes(cat_features)
+ assert embedding_sizes["Author"][0] <= max_emb_size["Author"]
+ assert embedding_sizes["Engaging_User"][0] <= max_emb_size["Engaging_User"]
@pytest.mark.parametrize("cpu", [True, False])
| [FEA] Return embedding table shape by feeding colum group to get_embedding_sizes() function
**Is your feature request related to a problem? Please describe.**
In the current case, to get the embedding table shape, we need to do as follows:
```
cat_features = ['col'] >> Cateforify()
workflow = nvt.Workflow(cat_features)
dataset = nvt.Dataset(gdf)
workflow.fit(dataset)
EMBEDDING_TABLE_SHAPES = nvt.ops.get_embedding_sizes(workflow)
```
but if we do have many other features from other ops, and not want to add `cat_features` in the final workflow, we cannot get embedding table shape from `nvt.ops.get_embedding_sizes(workflow)` because it would return {}.
To avoid that can we change `def get_embedding_sizes(workflow):` to `def get_embedding_sizes(ColumnGroups):`
or something like that so that we can run `nvt.ops.get_embedding_sizes(cat_features)` instead.
https://github.com/NVIDIA/NVTabular/blob/c7b7e05564ea675f00cad6e784cc360f8bb4ee85/nvtabular/ops/categorify.py#L449
| @gabrielspmoreira for viz.
This is possible now, but the api is a little clunky:
```python
cat_features.op.get_embedding_sizes(cat_features.columns)
```
> This is possible now, but the api is a little clunky:
>
> ```python
> cat_features.op.get_embedding_sizes(cat_features.columns)
> ```
Thanks Ben. will try it out.
Thanks @benfred . I have tried your suggested command and it does work.
`categ_features.op.get_embedding_sizes(categ_features.columns)`
The user could be interested to know only the cardinality of the categorical features and not our recommended embedding sizes. So maybe having a `get_categ_cardinalities()` could be interesting.
Furthermore, that function could be global to the workflow, making it clear that it is only available after the fit() call and returning a dict with the cardinality of all categorical features. | 2021-06-25T18:04:53 |
NVIDIA-Merlin/NVTabular | 935 | NVIDIA-Merlin__NVTabular-935 | [
"763"
] | 0c80385521455ed06a3acd450f254fd356f66d88 | diff --git a/nvtabular/dispatch.py b/nvtabular/dispatch.py
--- a/nvtabular/dispatch.py
+++ b/nvtabular/dispatch.py
@@ -105,6 +105,10 @@ def _is_cpu_object(x):
return isinstance(x, (pd.DataFrame, pd.Series))
+def is_series_or_dataframe_object(maybe_series_or_df):
+ return _is_series_object(maybe_series_or_df) or _is_dataframe_object(maybe_series_or_df)
+
+
def _hex_to_int(s, dtype=None):
def _pd_convert_hex(x):
if pd.isnull(x):
@@ -320,11 +324,28 @@ def _make_df(_like_df=None, device=None):
return pd.DataFrame(_like_df)
elif isinstance(_like_df, (cudf.DataFrame, cudf.Series)):
return cudf.DataFrame(_like_df)
+ elif isinstance(_like_df, dict) and len(_like_df) > 0:
+ is_pandas = all(isinstance(v, pd.Series) for v in _like_df.values())
+
+ return pd.DataFrame(_like_df) if is_pandas else cudf.DataFrame(_like_df)
if device == "cpu":
return pd.DataFrame()
return cudf.DataFrame()
+def _add_to_series(series, to_add, prepend=True):
+ if isinstance(series, pd.Series):
+ series_to_add = pd.Series(to_add)
+ elif isinstance(series, cudf.Series):
+ series_to_add = cudf.Series(to_add)
+ else:
+ raise ValueError("Unrecognized series, please provide either a pandas a cudf series")
+
+ series_to_concat = [series_to_add, series] if prepend else [series, series_to_add]
+
+ return _concat(series_to_concat)
+
+
def _detect_format(data):
"""Utility to detect the format of `data`"""
from nvtabular import Dataset
diff --git a/nvtabular/framework_utils/tensorflow/feature_column_utils.py b/nvtabular/framework_utils/tensorflow/feature_column_utils.py
--- a/nvtabular/framework_utils/tensorflow/feature_column_utils.py
+++ b/nvtabular/framework_utils/tensorflow/feature_column_utils.py
@@ -13,10 +13,9 @@
# limitations under the License.
#
-import os
import warnings
-import cudf
+import pandas as pd
import tensorflow as tf
from tensorflow.python.feature_column import feature_column_v2 as fc
@@ -227,7 +226,7 @@ def _get_parents(column):
features += features_replaced_buckets
if len(categorifies) > 0:
- features += categorifies.keys() >> Categorify()
+ features += categorifies.keys() >> Categorify(vocabs=pd.DataFrame(categorifies))
if len(hashes) > 0:
features += hashes.keys() >> HashBucket(hashes)
@@ -282,22 +281,4 @@ def _get_parents(column):
workflow = nvt.Workflow(features)
- # create stats for Categorify op if we need it
- if len(categorifies) > 0:
- if category_dir is None:
- category_dir = "/tmp/categories" # nosec
- if not os.path.exists(category_dir):
- os.makedirs(category_dir)
-
- stats = {"categories": {}}
- for feature_name, categories in categorifies.items():
- categories.insert(0, None)
- df = cudf.DataFrame({feature_name: categories})
-
- save_path = os.path.join(category_dir, f"unique.{feature_name}.parquet")
- df.to_parquet(save_path)
- stats["categories"][feature_name] = save_path
-
- workflow.stats = stats
-
return workflow, numeric_columns + new_feature_columns
diff --git a/nvtabular/ops/categorify.py b/nvtabular/ops/categorify.py
--- a/nvtabular/ops/categorify.py
+++ b/nvtabular/ops/categorify.py
@@ -15,6 +15,7 @@
import os
import warnings
+from copy import deepcopy
from dataclasses import dataclass
from operator import getitem
from typing import Optional, Union
@@ -32,19 +33,8 @@
from fsspec.core import get_fs_token_paths
from pyarrow import parquet as pq
-from nvtabular.dispatch import (
- DataFrameType,
- _arange,
- _encode_list_column,
- _flatten_list_column,
- _from_host,
- _hash_series,
- _is_list_dtype,
- _parquet_writer_dispatch,
- _read_parquet_dispatch,
- _series_has_nulls,
- annotate,
-)
+from nvtabular import dispatch
+from nvtabular.dispatch import DataFrameType, annotate
from nvtabular.worker import fetch_table_data, get_worker_cache
from .operator import ColumnNames, Operator
@@ -198,6 +188,7 @@ def __init__(
name_sep="_",
search_sorted=False,
num_buckets=None,
+ vocabs=None,
max_size=0,
):
@@ -237,8 +228,10 @@ def __init__(
# Only support two kinds of multi-column encoding
if encode_type not in ("joint", "combo"):
raise ValueError(f"encode_type={encode_type} not supported.")
+ if encode_type == "combo" and vocabs is not None:
+ raise ValueError("Passing in vocabs is not supported with a combo encoding.")
- # Other self-explanatory intialization
+ # Other self-explanatory initialization
super().__init__()
self.freq_threshold = freq_threshold or 0
self.out_path = out_path or "./"
@@ -249,7 +242,6 @@ def __init__(
self.cat_cache = cat_cache
self.encode_type = encode_type
self.search_sorted = search_sorted
- self.categories = {}
if self.search_sorted and self.freq_threshold:
raise ValueError(
@@ -284,6 +276,11 @@ def __init__(
"with this num_buckets setting!"
)
+ self.vocabs = {}
+ if vocabs is not None:
+ self.vocabs = self.process_vocabs(vocabs)
+ self.categories = deepcopy(self.vocabs)
+
@annotate("Categorify_fit", color="darkgreen", domain="nvt_python")
def fit(self, columns: ColumnNames, ddf: dd.DataFrame):
# User passed in a list of column groups. We need to figure out
@@ -319,23 +316,11 @@ def fit(self, columns: ColumnNames, ddf: dd.DataFrame):
warnings.warn("Cannot use `search_sorted=True` for pandas-backed data.")
# convert tuples to lists
- columns = [list(c) if isinstance(c, tuple) else c for c in columns]
- dsk, key = _category_stats(
- ddf,
- FitOptions(
- columns,
- [],
- [],
- self.out_path,
- self.freq_threshold,
- self.tree_width,
- self.on_host,
- concat_groups=self.encode_type == "joint",
- name_sep=self.name_sep,
- max_size=self.max_size,
- num_buckets=self.num_buckets,
- ),
- )
+ cols_with_vocabs = list(self.categories.keys())
+ columns = [
+ list(c) if isinstance(c, tuple) else c for c in columns if c not in cols_with_vocabs
+ ]
+ dsk, key = _category_stats(ddf, self._create_fit_options_from_columns(columns))
return Delayed(key, dsk)
def fit_finalize(self, categories):
@@ -343,7 +328,50 @@ def fit_finalize(self, categories):
self.categories[col] = categories[col]
def clear(self):
- self.categories = {}
+ self.categories = deepcopy(self.vocabs)
+
+ def process_vocabs(self, vocabs):
+ categories = {}
+
+ if dispatch._is_dataframe_object(vocabs):
+ fit_options = self._create_fit_options_from_columns(list(vocabs.columns))
+ base_path = os.path.join(self.out_path, fit_options.stat_name)
+ os.makedirs(base_path, exist_ok=True)
+ for col in list(vocabs.columns):
+ col_df = vocabs[[col]]
+ if col_df[col].iloc[0] is not None:
+ with_empty = dispatch._add_to_series(col_df[col], [None]).reset_index()[0]
+ vals = {col: with_empty}
+ col_df = dispatch._make_df(vals)
+
+ save_path = os.path.join(base_path, f"unique.{col}.parquet")
+ col_df.to_parquet(save_path)
+ categories[col] = save_path
+ elif isinstance(vocabs, dict) and all(isinstance(v, str) for v in vocabs.values()):
+ categories = vocabs
+ else:
+ error = """Unrecognized vocab type,
+ please provide either a dictionary with paths to a parquet files
+ or a DataFrame that contains the vocabulary per column.
+ """
+ raise ValueError(error)
+
+ return categories
+
+ def _create_fit_options_from_columns(self, columns) -> "FitOptions":
+ return FitOptions(
+ columns,
+ [],
+ [],
+ self.out_path,
+ self.freq_threshold,
+ self.tree_width,
+ self.on_host,
+ concat_groups=self.encode_type == "joint",
+ name_sep=self.name_sep,
+ max_size=self.max_size,
+ num_buckets=self.num_buckets,
+ )
def set_storage_path(self, new_path, copy=False):
self.categories = _copy_storage(self.categories, self.out_path, new_path, copy=copy)
@@ -486,7 +514,7 @@ def get_embedding_sizes(source, output_dtypes=None):
for column in output:
dtype = output_dtypes.get(column)
- if dtype and _is_list_dtype(dtype):
+ if dtype and dispatch._is_list_dtype(dtype):
# multi hot so remove from output and add to multihot
multihot_columns.add(column)
# TODO: returning differnt return types like this (based off the presence
@@ -630,7 +658,7 @@ def _top_level_groupby(df, options: FitOptions):
# (flattening provides better cudf/pd support)
if _is_list_col(cat_col_group, df_gb):
# handle list columns by encoding the list values
- df_gb = _flatten_list_column(df_gb[cat_col_group[0]])
+ df_gb = dispatch._flatten_list_column(df_gb[cat_col_group[0]])
# NOTE: groupby(..., dropna=False) requires pandas>=1.1.0
gb = df_gb.groupby(cat_col_group, dropna=False).agg(agg_dict)
@@ -671,7 +699,7 @@ def _mid_level_groupby(dfs, col_group, freq_limit_val, options: FitOptions):
# Construct gpu DataFrame from pyarrow data.
# `on_host=True` implies gpu-backed data.
df = pa.concat_tables(dfs, promote=True)
- df = _from_host(df)
+ df = dispatch._from_host(df)
else:
df = _concat(dfs, ignore_index=True)
groups = df.groupby(col_group, dropna=False)
@@ -753,7 +781,7 @@ def _write_gb_stats(dfs, base_path, col_group, options: FitOptions):
if not options.on_host and len(dfs):
# Want first non-empty df for schema (if there are any)
_d = next((df for df in dfs if len(df)), dfs[0])
- pwriter = _parquet_writer_dispatch(_d, path=path, compression=None)
+ pwriter = dispatch._parquet_writer_dispatch(_d, path=path, compression=None)
# Loop over dfs and append to file
# TODO: For high-cardinality columns, should support
@@ -794,7 +822,7 @@ def _write_uniques(dfs, base_path, col_group, options):
# Construct gpu DataFrame from pyarrow data.
# `on_host=True` implies gpu-backed data.
df = pa.concat_tables(dfs, promote=True)
- df = _from_host(df)
+ df = dispatch._from_host(df)
else:
df = _concat(dfs, ignore_index=True)
rel_path = "unique.%s.parquet" % (_make_name(*col_group, sep=options.name_sep))
@@ -823,9 +851,11 @@ def _write_uniques(dfs, base_path, col_group, options):
if nlargest < len(df):
df = df.nlargest(n=nlargest, columns=name_count)
- if not _series_has_nulls(df[col]):
+
+ if not dispatch._series_has_nulls(df[col]):
if name_count in df:
df = df.sort_values(name_count, ascending=False, ignore_index=True)
+
nulls_missing = True
new_cols[col] = _concat(
[df._constructor_sliced([None], dtype=df[col].dtype), df[col]],
@@ -989,7 +1019,7 @@ def _encode(
selection_r = name if isinstance(name, list) else [storage_name]
list_col = _is_list_col(selection_l, df)
if path:
- read_pq_func = _read_parquet_dispatch(df)
+ read_pq_func = dispatch._read_parquet_dispatch(df)
if cat_cache is not None:
cat_cache = (
cat_cache if isinstance(cat_cache, str) else cat_cache.get(storage_name, "disk")
@@ -1021,10 +1051,10 @@ def _encode(
if not search_sorted:
if list_col:
- codes = _flatten_list_column(df[selection_l[0]])
- codes["order"] = _arange(len(codes), like_df=df)
+ codes = dispatch._flatten_list_column(df[selection_l[0]])
+ codes["order"] = dispatch._arange(len(codes), like_df=df)
else:
- codes = type(df)({"order": _arange(len(df), like_df=df)}, index=df.index)
+ codes = type(df)({"order": dispatch._arange(len(df), like_df=df)}, index=df.index)
for c in selection_l:
codes[c] = df[c].copy()
if buckets and storage_name in buckets:
@@ -1064,7 +1094,7 @@ def _encode(
labels[labels >= len(value[selection_r])] = na_sentinel
if list_col:
- labels = _encode_list_column(df[selection_l[0]], labels, dtype=dtype)
+ labels = dispatch._encode_list_column(df[selection_l[0]], labels, dtype=dtype)
elif dtype:
labels = labels.astype(dtype, copy=False)
@@ -1097,7 +1127,7 @@ def _get_multicolumn_names(column_groups, df_columns, name_sep):
def _is_list_col(column_group, df):
- has_lists = any(_is_list_dtype(df[col]) for col in column_group)
+ has_lists = any(dispatch._is_list_dtype(df[col]) for col in column_group)
if has_lists and len(column_group) != 1:
raise ValueError("Can't categorical encode multiple list columns")
return has_lists
@@ -1106,7 +1136,7 @@ def _is_list_col(column_group, df):
def _hash_bucket(df, num_buckets, col, encode_type="joint"):
if encode_type == "joint":
nb = num_buckets[col[0]]
- encoded = _hash_series(df[col[0]]) % nb
+ encoded = dispatch._hash_series(df[col[0]]) % nb
elif encode_type == "combo":
if len(col) > 1:
name = _make_name(*tuple(col), sep="_")
@@ -1115,7 +1145,7 @@ def _hash_bucket(df, num_buckets, col, encode_type="joint"):
nb = num_buckets[name]
val = 0
for column in col:
- val ^= _hash_series(df[column]) # or however we want to do this aggregation
+ val ^= dispatch._hash_series(df[column]) # or however we want to do this aggregation
val = val % nb
encoded = val
return encoded
| diff --git a/tests/unit/test_ops.py b/tests/unit/test_ops.py
--- a/tests/unit/test_ops.py
+++ b/tests/unit/test_ops.py
@@ -107,7 +107,6 @@ def test_target_encode(tmpdir, cat_groups, kfold, fold_seed, cpu):
@pytest.mark.parametrize("npartitions", [1, 2])
@pytest.mark.parametrize("cpu", [True, False])
def test_target_encode_multi(tmpdir, npartitions, cpu):
-
cat_1 = np.asarray(["baaaa"] * 12)
cat_2 = np.asarray(["baaaa"] * 6 + ["bbaaa"] * 3 + ["bcaaa"] * 3)
num_1 = np.asarray([1, 1, 2, 2, 2, 1, 1, 5, 4, 4, 4, 4])
@@ -445,7 +444,8 @@ def test_lambdaop_misalign(cpu):
@pytest.mark.parametrize("freq_threshold", [0, 1, 2])
@pytest.mark.parametrize("cpu", [False, True])
@pytest.mark.parametrize("dtype", [None, np.int32, np.int64])
-def test_categorify_lists(tmpdir, freq_threshold, cpu, dtype):
[email protected]("vocabs", [None, pd.DataFrame({"Authors": [f"User_{x}" for x in "ACBE"]})])
+def test_categorify_lists(tmpdir, freq_threshold, cpu, dtype, vocabs):
df = cudf.DataFrame(
{
"Authors": [["User_A"], ["User_A", "User_E"], ["User_B", "User_C"], ["User_C"]],
@@ -457,7 +457,7 @@ def test_categorify_lists(tmpdir, freq_threshold, cpu, dtype):
label_name = ["Post"]
cat_features = cat_names >> ops.Categorify(
- out_path=str(tmpdir), freq_threshold=freq_threshold, dtype=dtype
+ out_path=str(tmpdir), freq_threshold=freq_threshold, dtype=dtype, vocabs=vocabs
)
workflow = nvt.Workflow(cat_features + label_name)
@@ -471,8 +471,7 @@ def test_categorify_lists(tmpdir, freq_threshold, cpu, dtype):
assert df_out["Authors"].dtype == cudf.core.dtypes.ListDtype(dtype if dtype else "int64")
compare = df_out["Authors"].to_arrow().to_pylist()
- # change values based on frequency "C" (2) comes before "B" (1)
- if freq_threshold < 2:
+ if freq_threshold < 2 or vocabs is not None:
assert compare == [[1], [1, 4], [3, 2], [2]]
else:
assert compare == [[1], [1, 0], [0, 2], [2]]
@@ -771,7 +770,6 @@ def test_joingroupby_dependency(tmpdir, cpu):
@pytest.mark.parametrize("cpu", [True, False])
@pytest.mark.parametrize("groups", [[["Author", "Engaging-User"]], "Author"])
def test_joingroupby_multi(tmpdir, groups, cpu):
-
df = pd.DataFrame(
{
"Author": ["User_A", "User_A", "User_A", "User_B"],
@@ -824,7 +822,6 @@ def test_joingroupby_multi(tmpdir, groups, cpu):
@pytest.mark.parametrize("cpu", [True, False])
@pytest.mark.parametrize("drop_duplicates", [True, False])
def test_join_external(tmpdir, df, dataset, engine, kind_ext, cache, how, cpu, drop_duplicates):
-
# Define "external" table
shift = 100
df_ext = df[["id"]].copy().sort_values("id")
diff --git a/tests/unit/test_tf_feature_columns.py b/tests/unit/test_tf_feature_columns.py
new file mode 100644
--- /dev/null
+++ b/tests/unit/test_tf_feature_columns.py
@@ -0,0 +1,24 @@
+import pytest
+
+tf = pytest.importorskip("tensorflow")
+nvtf = pytest.importorskip("nvtabular.framework_utils.tensorflow")
+
+
+def test_feature_column_utils():
+ cols = [
+ tf.feature_column.embedding_column(
+ tf.feature_column.categorical_column_with_vocabulary_list(
+ "vocab_1", ["a", "b", "c", "d"]
+ ),
+ 16,
+ ),
+ tf.feature_column.embedding_column(
+ tf.feature_column.categorical_column_with_vocabulary_list(
+ "vocab_2", ["1", "2", "3", "4"]
+ ),
+ 32,
+ ),
+ ]
+
+ workflow, _ = nvtf.make_feature_column_workflow(cols, "target")
+ assert workflow.column_group.columns == ["target", "vocab_1", "vocab_2"]
| [BUG] make_feature_column_workflow doesn't work with categorical columns
Since the API overhaul in 0.4 - the make_feature_column_workflow function doesn't work with categorical data.
The problem is that its trying to directly set the category statistics on the workflow https://github.com/NVIDIA/NVTabular/blob/8b834310680154c102237b14cd36c165aa5c4cff/nvtabular/framework_utils/tensorflow/feature_column_utils.py#L292-L301
We should update to using a fit categorify op - and add a test to catch this in the future
| 2021-07-12T12:36:29 |
|
NVIDIA-Merlin/NVTabular | 981 | NVIDIA-Merlin__NVTabular-981 | [
"739"
] | b76a19b8b6517ed18886ec99587546d7221a018b | diff --git a/nvtabular/inference/triton/__init__.py b/nvtabular/inference/triton/__init__.py
--- a/nvtabular/inference/triton/__init__.py
+++ b/nvtabular/inference/triton/__init__.py
@@ -18,6 +18,8 @@
import os
from shutil import copyfile
+import pandas as pd
+
# this needs to be before any modules that import protobuf
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
@@ -350,6 +352,8 @@ def convert_df_to_triton_input(column_names, batch, input_class=grpcclient.Infer
inputs = []
for i, (name, col) in enumerate(columns):
if _is_list_dtype(col):
+ if isinstance(col, pd.Series):
+ raise ValueError("this function doesn't support CPU list values yet")
inputs.append(
_convert_column_to_triton_input(
col._column.offsets.values_host.astype("int64"), name + "__nnzs", input_class
@@ -361,7 +365,8 @@ def convert_df_to_triton_input(column_names, batch, input_class=grpcclient.Infer
)
)
else:
- inputs.append(_convert_column_to_triton_input(col.values_host, name, input_class))
+ values = col.values if isinstance(col, pd.Series) else col.values_host
+ inputs.append(_convert_column_to_triton_input(values, name, input_class))
return inputs
diff --git a/nvtabular/inference/triton/model.py b/nvtabular/inference/triton/model.py
--- a/nvtabular/inference/triton/model.py
+++ b/nvtabular/inference/triton/model.py
@@ -24,6 +24,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+import functools
import json
import logging
import os
@@ -62,6 +63,18 @@ def _initialize_ops(self, column_group, visited=None):
if inference_op:
column_group.op = inference_op
+ supported = column_group.op.supports
+
+ # if we're running on the CPU only, mask off support for GPU data formats
+ if self.kind == "CPU":
+ supported = functools.reduce(
+ lambda a, b: a | b,
+ (v for v in list(Supports) if v & supported and "CPU" in str(v)),
+ )
+ # the 'supports' property is readonly, and we can't always attach a new property
+ # to some of the operators (C++ categorify etc). set on the column_group instead
+ column_group.inference_supports = supported
+
for parent in column_group.parents:
if parent not in visited:
visited.add(parent)
@@ -72,6 +85,7 @@ def initialize(self, args):
args["model_repository"], str(args["model_version"]), "workflow"
)
self.workflow = nvtabular.Workflow.load(workflow_path)
+ self.kind = args["model_instance_kind"]
self.model_config = json.loads(args["model_config"])
self.output_model = self.model_config["parameters"]["output_model"]["string_value"]
@@ -229,7 +243,7 @@ def _transform_tensors(input_tensors, column_group):
# we have multiple different kinds of data here (dataframe/array on cpu/gpu)
# we need to convert to a common format here first before concatentating.
op = column_group.op
- target_kind = op.supports if op else Supports.CPU_DICT_ARRAY
+ target_kind = column_group.inference_supports if op else Supports.CPU_DICT_ARRAY
# note : the 2nd convert_format call needs to be stricter in what the kind is
# (exact match rather than a bitmask of values)
tensors, kind = convert_format(tensors, kind, target_kind)
@@ -244,8 +258,8 @@ def _transform_tensors(input_tensors, column_group):
if column_group.op:
try:
# if the op doesn't support the current kind - we need to convert
- if not column_group.op.supports & kind:
- tensors, kind = convert_format(tensors, kind, column_group.op.supports)
+ if not column_group.inference_supports & kind:
+ tensors, kind = convert_format(tensors, kind, column_group.inference_supports)
tensors = column_group.op.transform(column_group.columns, tensors)
diff --git a/nvtabular/workflow.py b/nvtabular/workflow.py
--- a/nvtabular/workflow.py
+++ b/nvtabular/workflow.py
@@ -258,9 +258,14 @@ def check_version(stored, current, name):
lib = cudf if cudf else pd
versions = meta["versions"]
check_version(versions["nvtabular"], nvt_version, "nvtabular")
- check_version(versions[lib.__name__], lib.__version__, lib.__name__)
check_version(versions["python"], sys.version, "python")
+ if lib.__name__ in versions:
+ check_version(versions[lib.__name__], lib.__version__, lib.__name__)
+ else:
+ expected = "GPU" if "cudf" in versions else "CPU"
+ warnings.warn(f"Loading workflow generated on {expected}")
+
# load up the workflow object di
workflow = cloudpickle.load(open(os.path.join(path, "workflow.pkl"), "rb"))
workflow.client = client
| diff --git a/tests/unit/test_triton_inference.py b/tests/unit/test_triton_inference.py
--- a/tests/unit/test_triton_inference.py
+++ b/tests/unit/test_triton_inference.py
@@ -11,7 +11,7 @@
import nvtabular as nvt
import nvtabular.ops as ops
-from nvtabular.dispatch import HAS_GPU, _make_df
+from nvtabular.dispatch import HAS_GPU, _hash_series, _make_df
from nvtabular.ops.operator import Supports
from tests.conftest import assert_eq
@@ -141,7 +141,7 @@ def test_concatenate_dataframe(tmpdir):
}
)
# this bug only happened with a dataframe representation: force this by using a lambda
- cats = ["cat"] >> ops.LambdaOp(lambda col: col.hash_values() % 1000)
+ cats = ["cat"] >> ops.LambdaOp(lambda col: _hash_series(col) % 1000)
conts = ["cont"] >> ops.Normalize() >> ops.FillMissing() >> ops.LogOp()
workflow = nvt.Workflow(cats + conts)
_verify_workflow_on_tritonserver(tmpdir, workflow, df, "test_concatenate_dataframe")
| [FEA] Triton inference support should be able to select running on cpu/gpu
| 2021-07-21T23:22:42 |
|
NVIDIA-Merlin/NVTabular | 1,010 | NVIDIA-Merlin__NVTabular-1010 | [
"996"
] | 886d5b85fee83acfefc3f60c282f723f41719d53 | diff --git a/nvtabular/io/dataset.py b/nvtabular/io/dataset.py
--- a/nvtabular/io/dataset.py
+++ b/nvtabular/io/dataset.py
@@ -642,9 +642,9 @@ def to_parquet(
persist stage. The `FULL` option is not yet implemented.
partition_on : str or list(str)
Columns to use for hive-partitioning. If this option is used,
- `preserve_files`, `output_files`, and `out_files_per_proc` will
- all be ignored. Also, the `PER_WORKER` shuffle will not be
- supported.
+ `preserve_files`, `output_files`, and `out_files_per_proc`
+ cannot be specified, and `method` will be ignored. Also, the
+ `PER_WORKER` shuffle will not be supported.
preserve_files : bool
Whether to preserve the original file-to-partition mapping of
the base dataset. This option requires `method="subgraph"`, and is
@@ -695,25 +695,41 @@ def to_parquet(
specifying `method="worker"`.
"""
- # Check that method (algorithm) is valid
- if method not in ("subgraph", "worker"):
- raise ValueError(f"{method} not a recognized method for `Dataset.to_parquet`")
-
- # Deal with method-specific defaults
- if method == "worker":
- if output_files or preserve_files:
- raise ValueError("output_files and preserve_files require `method='subgraph'`")
- output_files = False
- elif preserve_files and output_files:
- raise ValueError("Cannot specify both preserve_files and output_files.")
- elif not (output_files or preserve_files):
- # Default "subgraph" behavior - Set output_files to the
- # total umber of workers, multiplied by out_files_per_proc
- try:
- nworkers = len(self.client.cluster.workers)
- except AttributeError:
- nworkers = 1
- output_files = nworkers * (out_files_per_proc or 1)
+ if partition_on:
+
+ # Check that the user is not expecting a specific output-file
+ # count/structure that is not supported
+ if output_files:
+ raise ValueError("`output_files` not supported when `partition_on` is used.")
+ if out_files_per_proc:
+ raise ValueError("`out_files_per_proc` not supported when `partition_on` is used.")
+ if preserve_files:
+ raise ValueError("`preserve_files` not supported when `partition_on` is used.")
+
+ else:
+
+ # Check that method (algorithm) is valid
+ if method not in ("subgraph", "worker"):
+ raise ValueError(f"{method} not a recognized method for `Dataset.to_parquet`")
+
+ # Deal with method-specific defaults
+ if method == "worker":
+ if output_files or preserve_files:
+ raise ValueError("output_files and preserve_files require `method='subgraph'`")
+ output_files = False
+ elif preserve_files and output_files:
+ raise ValueError("Cannot specify both preserve_files and output_files.")
+ elif not (output_files or preserve_files):
+ # Default "subgraph" behavior - Set output_files to the
+ # total umber of workers, multiplied by out_files_per_proc
+ try:
+ nworkers = len(self.client.cluster.workers)
+ except AttributeError:
+ nworkers = 1
+ output_files = nworkers * (out_files_per_proc or 1)
+
+ # Replace None/False suffix argument with ""
+ suffix = suffix or ""
# Check shuffle argument
shuffle = _check_shuffle_arg(shuffle)
@@ -725,9 +741,6 @@ def to_parquet(
else:
ddf = self.to_ddf(shuffle=shuffle)
- # Replace None/False suffix argument with ""
- suffix = suffix or ""
-
# Deal with `method=="subgraph"`.
# Convert `output_files` argument to a dict mapping
if output_files:
diff --git a/nvtabular/io/writer.py b/nvtabular/io/writer.py
--- a/nvtabular/io/writer.py
+++ b/nvtabular/io/writer.py
@@ -129,6 +129,11 @@ def _write_thread(self):
@annotate("add_data", color="orange", domain="nvt_python")
def add_data(self, df):
+
+ # Early return
+ if isinstance(df, list) and not df:
+ return
+
# Populate columns idxs
if not self.col_idx:
_df = df[0] if isinstance(df, list) else df
| diff --git a/tests/unit/test_io.py b/tests/unit/test_io.py
--- a/tests/unit/test_io.py
+++ b/tests/unit/test_io.py
@@ -736,6 +736,11 @@ def test_hive_partitioned_data(tmpdir, cpu):
seed=42,
).reset_index()
ddf["timestamp"] = ddf["timestamp"].dt.round("D").dt.day
+
+ # Make sure the first partition is empty
+ ddf = ddf[ddf.timestamp > 1]
+
+ # Convert to nvt.Dataset
ds = nvt.Dataset(ddf, engine="parquet")
# Write the dataset to disk
@@ -748,12 +753,14 @@ def test_hive_partitioned_data(tmpdir, cpu):
df_expect = df_expect.sort_values(["id", "x", "y"]).reset_index(drop=True)
timestamp_check = df_expect["timestamp"].iloc[0]
name_check = df_expect["name"].iloc[0]
- assert glob.glob(
+ result_paths = glob.glob(
os.path.join(
path,
f"timestamp={timestamp_check}/name={name_check}/*",
)
)
+ assert result_paths
+ assert all(p.endswith(".parquet") for p in result_paths)
# Read back with dask.dataframe and check the data
df_check = dd.read_parquet(path).compute()
| [BUG] nvt.Dataset(df).to_parquet(tmp, partition_on=col) saves parquet files without .parquet extension
**Describe the bug**
I run this script `nvt.Dataset(df).to_parquet(tmp, partition_on=col)` to save preprocessed dask-cudf or cudf file by partitioning on a given column but this does not save the files with `.parquet` extension. It saves them as `part.0` instead of `part.0.parquet`.
**Expected behavior**
A clear and concise description of what you expected to happen.
**Environment details (please complete the following information):**
- Environment location: [Bare-metal, Docker, Cloud(specify cloud provider)] Docker
- Method of NVTabular install: [conda, Docker, or from source] Docker- 0.5.3 image with latest nvtabular main branch and also tested with `nvtabular-merlin-pytorch` nightly container.
- If method of install is [Docker], provide `docker pull` & `docker run` commands used
| 2021-07-30T22:06:57 |
|
NVIDIA-Merlin/NVTabular | 1,056 | NVIDIA-Merlin__NVTabular-1056 | [
"1043"
] | a5c1a3fad14e4ec5a2cf8ce5590375c3353bc7e8 | diff --git a/nvtabular/tools/data_gen.py b/nvtabular/tools/data_gen.py
--- a/nvtabular/tools/data_gen.py
+++ b/nvtabular/tools/data_gen.py
@@ -46,10 +46,8 @@ def create_col(self, num_rows, dtype=np.float32, min_val=0, max_val=1):
gamma = 1 - self.alpha
# range 1.0 - 2.0 to avoid using 0, which represents unknown, null, None
ser = cudf.Series(cupy.random.uniform(0.0, 1.0, size=num_rows))
- factor = (cupy.power(max_val, gamma) - cupy.power(min_val, gamma)) + cupy.power(
- min_val, gamma
- )
- ser = ser * factor.item()
+ factor = cupy.power(max_val, gamma) - cupy.power(min_val, gamma)
+ ser = (ser * factor.item()) + cupy.power(min_val, gamma).item()
exp = 1.0 / gamma
ser = ser.pow(exp)
# replace zeroes saved for unknown
| [BUG] Bug in generating data with Power Law in data_gen.py
**Describe the bug**
In data_gen.py data generation script, the method for for generating data using Power Law Distribution seems to be using an incorrect formula, because of which it's ignoring the min. values. What the distribution should look like:
Reference: https://mathworld.wolfram.com/RandomNumber.html

Current Implementation:
```
def create_col(self, num_rows, dtype=np.float32, min_val=0, max_val=1):
gamma = 1 - self.alpha
# range 1.0 - 2.0 to avoid using 0, which represents unknown, null, None
ser = cudf.Series(cupy.random.uniform(0.0, 1.0, size=num_rows))
factor = (cupy.power(max_val, gamma) - cupy.power(min_val, gamma)) + cupy.power(
min_val, gamma
)
ser = ser * factor.item()
exp = 1.0 / gamma
ser = ser.pow(exp)
# replace zeroes saved for unknown
# add in nulls if requested
# select indexes
return ser.astype(dtype)
```
Here, the "factor" completely ignores the influence of min_val
What it should look like
```
def create_col(self, num_rows, dtype=np.float32, min_val=0, max_val=1):
gamma = 1 - self.alpha
# range 1.0 - 2.0 to avoid using 0, which represents unknown, null, None
ser = cudf.Series(cupy.random.uniform(0.0, 1.0, size=num_rows))
factor = (cupy.power(max_val, gamma) - cupy.power(min_val, gamma))
ser = (ser * factor.item()) + cupy.power(min_val, gamma).item()
exp = 1.0 / gamma
ser = ser.pow(exp)
# replace zeroes saved for unknown
# add in nulls if requested
# select indexes
return ser.astype(dtype)
```
**Steps/Code to reproduce bug**
Simply generate data using PowerLaw with min_val > 1, and see the distribution and the minimum value generated
Before fix. Simple test:
```
p = datagen.PowerLawDistro(0.1)
col = p.create_col(num_rows=10000000, min_val=5, max_val=1000)
```
In one instance col.min() returns a value 1.6266105e-06. It should be greater than 5.
**Expected behavior**
A clear and concise description of what you expected to happen.
**Environment details (please complete the following information):**
- Environment location: 1xA100-80GB
- Method of NVTabular install: Docker, nvcr.io/nvidia/merlin/merlin-tensorflow-training:0.5.3
**Additional context**
Found this problem while trying to plot the data generated.
| 2021-08-20T20:26:18 |
||
NVIDIA-Merlin/NVTabular | 1,059 | NVIDIA-Merlin__NVTabular-1059 | [
"958"
] | 3a7b9e53cf4e7b16285b0e0a0298994047780912 | diff --git a/nvtabular/dispatch.py b/nvtabular/dispatch.py
--- a/nvtabular/dispatch.py
+++ b/nvtabular/dispatch.py
@@ -68,6 +68,13 @@ def inner2(*args, **kwargs):
DataFrameType = Union[pd.DataFrame]
SeriesType = Union[pd.Series]
+# Define mapping between non-nullable,
+# and nullable types in Pandas
+_PD_NULLABLE_MAP = {
+ "int32": "Int32",
+ "int64": "Int64",
+}
+
class ExtData(enum.Enum):
"""Simple Enum to track external-data types"""
@@ -94,6 +101,17 @@ def _is_dataframe_object(x):
return isinstance(x, (cudf.DataFrame, pd.DataFrame))
+def _nullable_series(data, like_df, dtype):
+ # Return a Series containing the elements in `data`,
+ # with a nullable version of `dtype`, using `like_df`
+ # to infer the Series constructor
+ if isinstance(like_df, pd.DataFrame):
+ # Note that we cannot use "int32"/"int64" to
+ # represent nullable data in pandas
+ return like_df._constructor_sliced(data, dtype=_PD_NULLABLE_MAP.get(str(dtype), dtype))
+ return like_df._constructor_sliced(data, dtype=dtype)
+
+
def _is_series_object(x):
# Simple check if object is a cudf or pandas
# Series object
diff --git a/nvtabular/ops/categorify.py b/nvtabular/ops/categorify.py
--- a/nvtabular/ops/categorify.py
+++ b/nvtabular/ops/categorify.py
@@ -34,7 +34,7 @@
from pyarrow import parquet as pq
from nvtabular import dispatch
-from nvtabular.dispatch import DataFrameType, annotate
+from nvtabular.dispatch import DataFrameType, _nullable_series, annotate
from nvtabular.ops.internal import ConcatColumns, Identity, SubsetColumns
from nvtabular.worker import fetch_table_data, get_worker_cache
@@ -892,7 +892,7 @@ def _write_uniques(dfs, base_path, col_selector: ColumnSelector, options: FitOpt
nulls_missing = True
new_cols[col] = _concat(
- [df._constructor_sliced([None], dtype=df[col].dtype), df[col]],
+ [_nullable_series([None], df, df[col].dtype), df[col]],
ignore_index=True,
)
else:
@@ -1079,7 +1079,7 @@ def _encode(
value = type(df)()
for c in selection_r:
typ = df[selection_l[0]].dtype if len(selection_l) == 1 else df[c].dtype
- value[c] = df._constructor_sliced([None], dtype=typ)
+ value[c] = _nullable_series([None], df, typ)
value.index.name = "labels"
value.reset_index(drop=False, inplace=True)
@@ -1089,8 +1089,8 @@ def _encode(
codes["order"] = dispatch._arange(len(codes), like_df=df)
else:
codes = type(df)({"order": dispatch._arange(len(df), like_df=df)}, index=df.index)
- for c in selection_l:
- codes[c] = df[c].copy()
+ for cl, cr in zip(selection_l, selection_r):
+ codes[cl] = df[cl].copy().astype(value[cr].dtype)
if buckets and storage_name in buckets:
na_sentinel = _hash_bucket(df, buckets, selection_l, encode_type=encode_type)
# apply frequency hashing
| [BUG] Movielens 02 ETL error with CPU
When running Notebook: https://github.com/NVIDIA/NVTabular/blob/main/examples/getting-started-movielens/02-ETL-with-NVTabular.ipynb there is an error in Categorify related to: https://github.com/NVIDIA/NVTabular/blob/main/nvtabular/ops/categorify.py#L829
```
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<timed eval> in <module>
/NVTA/nvtabular/workflow.py in fit(self, dataset)
150 results = [r.result() for r in self.client.compute(stats)]
151 else:
--> 152 results = dask.compute(stats, scheduler="synchronous")[0]
153
154 for computed_stats, op in zip(results, ops):
/usr/local/lib/python3.8/dist-packages/dask/base.py in compute(*args, **kwargs)
565 postcomputes.append(x.__dask_postcompute__())
566
--> 567 results = schedule(dsk, keys, **kwargs)
568 return repack([f(r, *a) for r, (f, a) in zip(results, postcomputes)])
569
/usr/local/lib/python3.8/dist-packages/dask/local.py in get_sync(dsk, keys, **kwargs)
558 """
559 kwargs.pop("num_workers", None) # if num_workers present, remove it
--> 560 return get_async(
561 synchronous_executor.submit,
562 synchronous_executor._max_workers,
/usr/local/lib/python3.8/dist-packages/dask/local.py in get_async(submit, num_workers, dsk, result, cache, get_id, rerun_exceptions_locally, pack_exception, raise_exception, callbacks, dumps, loads, chunksize, **kwargs)
501 while state["waiting"] or state["ready"] or state["running"]:
502 fire_tasks(chunksize)
--> 503 for key, res_info, failed in queue_get(queue).result():
504 if failed:
505 exc, tb = loads(res_info)
/usr/lib/python3.8/concurrent/futures/_base.py in result(self, timeout)
430 raise CancelledError()
431 elif self._state == FINISHED:
--> 432 return self.__get_result()
433
434 self._condition.wait(timeout)
/usr/lib/python3.8/concurrent/futures/_base.py in __get_result(self)
386 def __get_result(self):
387 if self._exception:
--> 388 raise self._exception
389 else:
390 return self._result
/usr/local/lib/python3.8/dist-packages/dask/local.py in submit(self, fn, *args, **kwargs)
543 fut = Future()
544 try:
--> 545 fut.set_result(fn(*args, **kwargs))
546 except BaseException as e:
547 fut.set_exception(e)
/usr/local/lib/python3.8/dist-packages/dask/local.py in batch_execute_tasks(it)
235 Batch computing of multiple tasks with `execute_task`
236 """
--> 237 return [execute_task(*a) for a in it]
238
239
/usr/local/lib/python3.8/dist-packages/dask/local.py in <listcomp>(.0)
235 Batch computing of multiple tasks with `execute_task`
236 """
--> 237 return [execute_task(*a) for a in it]
238
239
/usr/local/lib/python3.8/dist-packages/dask/local.py in execute_task(key, task_info, dumps, loads, get_id, pack_exception)
226 failed = False
227 except BaseException as e:
--> 228 result = pack_exception(e, dumps)
229 failed = True
230 return key, result, failed
/usr/local/lib/python3.8/dist-packages/dask/local.py in execute_task(key, task_info, dumps, loads, get_id, pack_exception)
221 try:
222 task, data = loads(task_info)
--> 223 result = _execute_task(task, data)
224 id = get_id()
225 result = dumps((result, id))
/usr/local/lib/python3.8/dist-packages/dask/core.py in _execute_task(arg, cache, dsk)
119 # temporaries by their reference count and can execute certain
120 # operations in-place.
--> 121 return func(*(_execute_task(a, cache) for a in args))
122 elif not ishashable(arg):
123 return arg
/usr/lib/python3.8/contextlib.py in inner(*args, **kwds)
73 def inner(*args, **kwds):
74 with self._recreate_cm():
---> 75 return func(*args, **kwds)
76 return inner
77
/NVTA/nvtabular/ops/categorify.py in _write_uniques(dfs, base_path, col_group, options)
827 nulls_missing = True
828 new_cols[col] = _concat(
--> 829 [df._constructor_sliced([None], dtype=df[col].dtype), df[col]],
830 ignore_index=True,
831 )
/usr/local/lib/python3.8/dist-packages/pandas/core/series.py in __init__(self, data, index, dtype, name, copy, fastpath)
325 data = data.copy()
326 else:
--> 327 data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
328
329 data = SingleBlockManager.from_array(data, index)
/usr/local/lib/python3.8/dist-packages/pandas/core/construction.py in sanitize_array(data, index, dtype, copy, raise_cast_failure)
445
446 if dtype is not None:
--> 447 subarr = _try_cast(data, dtype, copy, raise_cast_failure)
448 else:
449 subarr = maybe_convert_platform(data)
/usr/local/lib/python3.8/dist-packages/pandas/core/construction.py in _try_cast(arr, dtype, copy, raise_cast_failure)
553 if is_integer_dtype(dtype):
554 # this will raise if we have e.g. floats
--> 555 maybe_cast_to_integer_array(arr, dtype)
556 subarr = arr
557 else:
/usr/local/lib/python3.8/dist-packages/pandas/core/dtypes/cast.py in maybe_cast_to_integer_array(arr, dtype, copy)
1672 try:
1673 if not hasattr(arr, "astype"):
-> 1674 casted = np.array(arr, dtype=dtype, copy=copy)
1675 else:
1676 casted = arr.astype(dtype, copy=copy)
TypeError: int() argument must be a string, a bytes-like object or a number, not 'NoneType'
```
| any ideas @benfred @rjzamora ?
@jperez999 were you looking into anything similar?
Can you verify the version of dask you're running? We need to be using 2021.4.1 here - we've seen this issue with newer versions of dask
Solved with our latests container using dask and dsitributed 2021.04.1
Still getting errors with latest containers
It seems this problem is related to nullabe-integer types in Pandas. Since cudf allows null values in its `"int32"`/`"int64"` columns, there are many places in NVTabular where we require/expect the data to be casted to these types. More-recent version of Pandas seem to throw errors when you try to create an `"int32"`/`"int64"` `Series`/column when null values are present. | 2021-08-23T18:00:36 |
|
NVIDIA-Merlin/NVTabular | 1,132 | NVIDIA-Merlin__NVTabular-1132 | [
"1128"
] | f7af37cb107e92b3bebe4d612d5c413b34daa6c6 | diff --git a/nvtabular/dispatch.py b/nvtabular/dispatch.py
--- a/nvtabular/dispatch.py
+++ b/nvtabular/dispatch.py
@@ -379,9 +379,8 @@ def _make_df(_like_df=None, device=None):
return pd.DataFrame(_like_df)
elif isinstance(_like_df, (cudf.DataFrame, cudf.Series)):
return cudf.DataFrame(_like_df)
- elif isinstance(_like_df, dict) and len(_like_df) > 0:
+ elif device is None and isinstance(_like_df, dict) and len(_like_df) > 0:
is_pandas = all(isinstance(v, pd.Series) for v in _like_df.values())
-
return pd.DataFrame(_like_df) if is_pandas else cudf.DataFrame(_like_df)
if device == "cpu":
return pd.DataFrame(_like_df)
diff --git a/nvtabular/ops/categorify.py b/nvtabular/ops/categorify.py
--- a/nvtabular/ops/categorify.py
+++ b/nvtabular/ops/categorify.py
@@ -981,6 +981,12 @@ def _write_uniques(dfs, base_path, col_selector: ColumnSelector, options: FitOpt
[_nullable_series([None], df, df[col].dtype), df[col]],
ignore_index=True,
)
+ if name_count in df:
+ new_cols[name_count] = _concat(
+ [_nullable_series([0], df, df[name_count].dtype), df[name_count]],
+ ignore_index=True,
+ )
+
else:
# ensure None aka "unknown" stays at index 0
if name_count in df:
@@ -988,8 +994,9 @@ def _write_uniques(dfs, base_path, col_selector: ColumnSelector, options: FitOpt
df_1 = df.iloc[1:].sort_values(name_count, ascending=False, ignore_index=True)
df = _concat([df_0, df_1])
new_cols[col] = df[col].copy(deep=False)
- if name_count in df:
- new_cols[name_count] = df[name_count].copy(deep=False)
+
+ if name_count in df:
+ new_cols[name_count] = df[name_count].copy(deep=False)
if nulls_missing:
df = type(df)(new_cols)
df.to_parquet(path, index=False, compression=None)
@@ -998,6 +1005,7 @@ def _write_uniques(dfs, base_path, col_selector: ColumnSelector, options: FitOpt
for c in col_selector.names:
df_null[c] = df_null[c].astype(df[c].dtype)
df_null.to_parquet(path, index=False, compression=None)
+
del df
return path
| diff --git a/tests/unit/ops/test_ops.py b/tests/unit/ops/test_ops.py
--- a/tests/unit/ops/test_ops.py
+++ b/tests/unit/ops/test_ops.py
@@ -15,6 +15,8 @@
#
import copy
import math
+import os
+import random
import string
import dask.dataframe as dd
@@ -469,6 +471,50 @@ def test_lambdaop_misalign(cpu):
)
[email protected]("cpu", _CPU)
[email protected]("include_nulls", [True, False])
+def test_categorify_counts(tmpdir, cpu, include_nulls):
+ num_rows = 50
+ num_distinct = 10
+
+ possible_session_ids = list(range(num_distinct))
+ if include_nulls:
+ possible_session_ids.append(None)
+
+ df = dispatch._make_df(
+ {"session_id": [random.choice(possible_session_ids) for _ in range(num_rows)]},
+ device="cpu" if cpu else None,
+ )
+
+ cat_features = ["session_id"] >> nvt.ops.Categorify(out_path=str(tmpdir))
+ workflow = nvt.Workflow(cat_features)
+ workflow.fit_transform(nvt.Dataset(df, cpu=cpu)).to_ddf().compute()
+
+ vals = df["session_id"].value_counts()
+ vocab = dispatch._read_dispatch(cpu=cpu)(
+ os.path.join(tmpdir, "categories", "unique.session_id.parquet")
+ )
+
+ if cpu:
+ expected = dict(zip(vals.index, vals))
+ computed = {
+ session: count
+ for session, count in zip(vocab["session_id"], vocab["session_id_count"])
+ if count
+ }
+ else:
+ expected = dict(zip(vals.index.values_host, vals.values_host))
+ computed = {
+ session: count
+ for session, count in zip(
+ vocab["session_id"].values_host, vocab["session_id_count"].values_host
+ )
+ if count
+ }
+
+ assert computed == expected
+
+
@pytest.mark.parametrize("freq_threshold", [0, 1, 2])
@pytest.mark.parametrize("cpu", _CPU)
@pytest.mark.parametrize("dtype", [None, np.int32, np.int64])
| [BUG] Unique parquet files are not accurately generated after Categorify() op
**Describe the bug**
After Categorify() op when I check out the unique parquet files, I see that original id column, and count column in the parquet file do not match. Somehow an existing element in the categorical column is encoded to `NA` and the last elements valu_count also appears as `NA` meaning that first column is somehow shifted 1 row and then this creates mismatch between two columns. See screenshot below.

**Steps/Code to reproduce bug**
Run the code below to reproduce the issue:
```
NUM_ROWS = 100000
long_tailed_item_distribution = np.clip(np.random.lognormal(3., 1., NUM_ROWS).astype(np.int32), 1, 50000)
# generate random item interaction features
df = pd.DataFrame(np.random.randint(1, 20000, NUM_ROWS), columns=['session_id'])
df['item_id'] = long_tailed_item_distribution
# generate category mapping for each item-id
df['category'] = pd.cut(df['item_id'], bins=334, labels=np.arange(1, 335)).astype(np.int32)
df['timestamp/age_days'] = np.random.uniform(0, 1, NUM_ROWS)
df['timestamp/weekday/sin']= np.random.uniform(0, 1, NUM_ROWS)
# generate day mapping for each session
map_day = dict(zip(df.session_id.unique(), np.random.randint(1, 10, size=(df.session_id.nunique()))))
df['day'] = df.session_id.map(map_day)
categ_feats = ['session_id', 'item_id', 'category'] >> nvt.ops.Categorify()
workflow = nvt.Workflow(categ_feats)
dataset = nvt.Dataset(df, cpu=False)
workflow.fit(dataset)
gdf = workflow.transform(dataset).to_ddf().compute()
unique_sessionid=cudf.read_parquet('./categories/unique.session_id.parquet')
print(unique_sessionid)
```
**Expected behavior**
Correct encoding is expected.
**Environment details (please complete the following information):**
- Environment location: [Bare-metal, Docker, Cloud(specify cloud provider)]
- Method of NVTabular install: [conda, Docker, or from source]: NVT 0.6 Pytorch Docker latest main branch is pulled.
- If method of install is [Docker], provide `docker pull` & `docker run` commands used
| Same reproduction - reducing the number of rows and getting rid of the unneeded columns:
```
import numpy as np
import pandas as pd
import nvtabular as nvt
import cudf
NUM_ROWS = 50
# generate random item interaction features
df = pd.DataFrame(np.random.randint(1, NUM_ROWS//4, NUM_ROWS), columns=['session_id'])
categ_feats = ['session_id'] >> nvt.ops.Categorify()
workflow = nvt.Workflow(categ_feats)
dataset = nvt.Dataset(df, cpu=False)
workflow.fit(dataset)
gdf = workflow.transform(dataset).to_ddf().compute()
unique_sessionid=cudf.read_parquet('./categories/unique.session_id.parquet')
print("got")
print(unique_sessionid)
print("expected")
print(df["session_id"].value_counts())
```
This prints out:
```python
got
session_id session_id_count
0 <NA> 7
1 4 6
2 2 6
3 3 6
4 7 5
5 6 5
6 9 4
7 5 4
8 8 4
9 10 3
10 11 <NA>
expected
4 7
7 6
2 6
3 6
9 5
6 5
8 4
5 4
10 4
11 3
Name: session_id, dtype: int64
```
Showing that session '11' should have value counts of 3, but is instead getting NA | 2021-09-19T23:50:53 |
NVIDIA-Merlin/NVTabular | 1,139 | NVIDIA-Merlin__NVTabular-1139 | [
"1097"
] | 6ab864e39081c07a6873eed2277974d71c586dd0 | diff --git a/nvtabular/workflow/workflow.py b/nvtabular/workflow/workflow.py
--- a/nvtabular/workflow/workflow.py
+++ b/nvtabular/workflow/workflow.py
@@ -433,6 +433,7 @@ def _transform_ddf(ddf, workflow_nodes, meta=None, additional_columns=None):
workflow_nodes,
additional_columns=additional_columns,
meta=meta,
+ enforce_metadata=False,
)
| diff --git a/tests/unit/test_dask_nvt.py b/tests/unit/test_dask_nvt.py
--- a/tests/unit/test_dask_nvt.py
+++ b/tests/unit/test_dask_nvt.py
@@ -20,8 +20,10 @@
import cudf
import dask_cudf
+import pandas as pd
import pytest
from dask.dataframe import assert_eq
+from dask.dataframe import from_pandas as dd_from_pandas
from dask.dataframe import read_parquet as dd_read_parquet
from nvtabular import ColumnSelector, Dataset, Workflow, ops
@@ -276,3 +278,18 @@ def test_dask_preproc_cpu(client, tmpdir, datasets, engine, shuffle, cpu):
df_disk.sort_values(["id", "x"])[["name-string", "label"]],
check_index=False,
)
+
+
[email protected]("cpu", [None, True])
+def test_filtered_partition(tmpdir, cpu):
+ # Toy DataFrame example
+ df = pd.DataFrame({"col": range(100)})
+ ddf = dd_from_pandas(df, npartitions=5)
+ dataset = Dataset(ddf, cpu=cpu)
+
+ # Workflow
+ filtered = ["col"] >> ops.Filter(lambda df: df["col"] < 75)
+ workflow = Workflow(filtered)
+
+ # Write result to disk
+ workflow.transform(dataset).to_parquet(str(tmpdir))
| [BUG] Problem on writing to_parquet after transforming
```python
#######################################
trasforming Code:
class Processjson(Operator):
def transform(self, columns, gdf):
col = gdf['event_properties']
gdf['item_id'] = col.str.extract('\'product_id\'\s*:\s*\'([^\']+)\'')
gdf['event_time'] = (gdf['event_time'] - pd.Timestamp("1970-01-01")) // pd.Timedelta('1s')
gdf['device_brand'] = gdf['device_brand'].fillna('Apple')
return gdf
def output_column_names(self, columns):
return [i for i in columns if (i != 'event_properties')] + ['item_id']
def dependencies(self):
return None
filtered = COLUMNS >> nvt.ops.Filter(lambda df: df['event_type'].isin(['Value']))
filtered = filtered >> nvt.ops.JoinExternal(df_ext=fid_map,on='user_id',columns_ext=['user_id','memberID'])
filtered = filtered>>Processjson() >> nvt.ops.Dropna()
workflow = nvt.Workflow(filtered)
dataset_file = glob.glob('raw/*')
subdataset_file = dataset_file[6:8]
dataset = nvt.Dataset(subdataset_file, part_size="500MB")
workflow.transform(dataset).to_parquet(f'processed/test')
############################################
```
I follow exam and edited to served my data.
When I set multiple files as input for Dataset class, there is some specific files that I loaded and this exception was thrown.
It might be because there is no handling, where some file have data and some have no data after the filtering.
This doesn't happen if I loaded a single file and process separately.
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-60-2a116bd489a4> in <module>
2 # for i in dataset_file:
3 dataset = nvt.Dataset(subdataset_file, part_size="500MB")
----> 4 workflow.transform(dataset).to_parquet(f'processed/test')
5
/usr/local/lib/python3.8/dist-packages/nvtabular/io/dataset.py in to_parquet(self, output_path, shuffle, preserve_files, output_files, out_files_per_proc, num_threads, dtypes, cats, conts, labels, suffix, partition_on)
763
764 # Output dask_cudf DataFrame to dataset
--> 765 _ddf_to_dataset(
766 ddf,
767 fs,
/usr/local/lib/python3.8/dist-packages/nvtabular/io/dask.py in _ddf_to_dataset(ddf, fs, output_path, shuffle, file_partition_map, out_files_per_proc, cat_names, cont_names, label_names, output_format, client, num_threads, cpu, suffix, partition_on)
364 out = client.compute(out).result()
365 else:
--> 366 out = dask.compute(out, scheduler="synchronous")[0]
367
368 if cached_writers:
/usr/local/lib/python3.8/dist-packages/dask/base.py in compute(*args, **kwargs)
564 postcomputes.append(x.__dask_postcompute__())
565
--> 566 results = schedule(dsk, keys, **kwargs)
567 return repack([f(r, *a) for r, (f, a) in zip(results, postcomputes)])
568
/usr/local/lib/python3.8/dist-packages/dask/local.py in get_sync(dsk, keys, **kwargs)
558 """
559 kwargs.pop("num_workers", None) # if num_workers present, remove it
--> 560 return get_async(
561 synchronous_executor.submit,
562 synchronous_executor._max_workers,
/usr/local/lib/python3.8/dist-packages/dask/local.py in get_async(submit, num_workers, dsk, result, cache, get_id, rerun_exceptions_locally, pack_exception, raise_exception, callbacks, dumps, loads, chunksize, **kwargs)
501 while state["waiting"] or state["ready"] or state["running"]:
502 fire_tasks(chunksize)
--> 503 for key, res_info, failed in queue_get(queue).result():
504 if failed:
505 exc, tb = loads(res_info)
/usr/lib/python3.8/concurrent/futures/_base.py in result(self, timeout)
430 raise CancelledError()
431 elif self._state == FINISHED:
--> 432 return self.__get_result()
433
434 self._condition.wait(timeout)
/usr/lib/python3.8/concurrent/futures/_base.py in __get_result(self)
386 def __get_result(self):
387 if self._exception:
--> 388 raise self._exception
389 else:
390 return self._result
/usr/local/lib/python3.8/dist-packages/dask/local.py in submit(self, fn, *args, **kwargs)
543 fut = Future()
544 try:
--> 545 fut.set_result(fn(*args, **kwargs))
546 except BaseException as e:
547 fut.set_exception(e)
/usr/local/lib/python3.8/dist-packages/dask/local.py in batch_execute_tasks(it)
235 Batch computing of multiple tasks with `execute_task`
236 """
--> 237 return [execute_task(*a) for a in it]
238
239
/usr/local/lib/python3.8/dist-packages/dask/local.py in <listcomp>(.0)
235 Batch computing of multiple tasks with `execute_task`
236 """
--> 237 return [execute_task(*a) for a in it]
238
239
/usr/local/lib/python3.8/dist-packages/dask/local.py in execute_task(key, task_info, dumps, loads, get_id, pack_exception)
226 failed = False
227 except BaseException as e:
--> 228 result = pack_exception(e, dumps)
229 failed = True
230 return key, result, failed
/usr/local/lib/python3.8/dist-packages/dask/local.py in execute_task(key, task_info, dumps, loads, get_id, pack_exception)
221 try:
222 task, data = loads(task_info)
--> 223 result = _execute_task(task, data)
224 id = get_id()
225 result = dumps((result, id))
/usr/local/lib/python3.8/dist-packages/dask/core.py in _execute_task(arg, cache, dsk)
119 # temporaries by their reference count and can execute certain
120 # operations in-place.
--> 121 return func(*(_execute_task(a, cache) for a in args))
122 elif not ishashable(arg):
123 return arg
/usr/lib/python3.8/contextlib.py in inner(*args, **kwds)
73 def inner(*args, **kwds):
74 with self._recreate_cm():
---> 75 return func(*args, **kwds)
76 return inner
77
/usr/local/lib/python3.8/dist-packages/nvtabular/io/dask.py in _write_output_partition(df, processed_path, shuffle, out_files_per_proc, fs, cat_names, cont_names, label_names, output_format, num_threads, cpu, suffix)
92
93 # Add data
---> 94 writer.add_data(df)
95
96 return df_size
/usr/lib/python3.8/contextlib.py in inner(*args, **kwds)
73 def inner(*args, **kwds):
74 with self._recreate_cm():
---> 75 return func(*args, **kwds)
76 return inner
77
/usr/local/lib/python3.8/dist-packages/nvtabular/io/writer.py in add_data(self, df)
137 # Only writing to a single file. No need to
138 # scatter or slice the data before writing
--> 139 self._add_single_file(df)
140 else:
141 # Use different mechanism to decompose and write each df
/usr/local/lib/python3.8/dist-packages/nvtabular/io/writer.py in _add_single_file(self, df)
224 self.queue.put((0, df))
225 else:
--> 226 self._write_table(0, df)
227
228 def package_general_metadata(self):
/usr/local/lib/python3.8/dist-packages/nvtabular/io/parquet.py in _write_table(self, idx, data)
788 def _write_table(self, idx, data):
789 writer = self._get_or_create_writer(idx)
--> 790 writer.write_table(data)
791
792 @classmethod
cudf/_lib/parquet.pyx in cudf._lib.parquet.ParquetWriter.write_table()
RuntimeError: cuDF failure at: /workspace/build-env/cpp/src/io/parquet/writer_impl.cu:462: Leaf column's corresponding metadata cannot have children
| The bug is related, when NVTabular iterates over the partitions of the dataset and the DropNA operator produces an empy dataframe (all rows are filtered due to missing values).
Quick fix could be resolved by increasing the partition size or merging the data, that there will be no partition, which results in an empty dataframe.
We need to handle that edge case when writing the file to disk
@thanatdam (and maybe @bschifferer ) - If possible, could you share a self-contained reproducer (inlcuding some dummy input data to produce the error)? I am having trouble reproducing locally.
Also, can you share the version of NVTabular that you are using? The traceback makes it look like the **old** `method="worker"` algorithm is being used for the write.
I wrote this using NVT 0.5.3, but this also happen with 0.6.
As for the sample data and code, I already provided that to Ettikan and @bschifferer via email, since it contains corporate data.
@rjzamora @thanatdam thanks for sharing. I provided an own toy example, which reproduces the same error
@bschifferer shared a simple toy reproducer with me offline, and it seems that the error is caused by the dtypes of an empty partition being converted to `float64` in Dask's `map_partitions` call. The problem is not that we are trying to write an empty partition, but that the empty partition has an inconsistent dtype, and this makes the parquet writer unhappy.
As far as I can tell, we should be able to use `ddf.map_partitions(..., enforce_metadata=False)` in cases where we already know `ddf._meta` is wrong. With `enforce_metadata=False`, this error should be avoided. | 2021-09-22T22:11:02 |
NVIDIA-Merlin/NVTabular | 1,209 | NVIDIA-Merlin__NVTabular-1209 | [
"1153"
] | 70dce1c5a1205545ee52010bd90a2921c05b0920 | diff --git a/nvtabular/dispatch.py b/nvtabular/dispatch.py
--- a/nvtabular/dispatch.py
+++ b/nvtabular/dispatch.py
@@ -242,14 +242,6 @@ def _hash_series(s):
return s.hash_values()
-def _natural_log(df):
- """Natural logarithm of all columns in a DataFrame"""
- if isinstance(df, pd.DataFrame):
- return pd.DataFrame(np.log(df.values), columns=df.columns, index=df.index)
- else:
- return df.log()
-
-
def _series_has_nulls(s):
"""Check if Series contains any null values"""
if isinstance(s, pd.Series):
@@ -285,12 +277,21 @@ def _is_string_dtype(obj):
return is_string_dtype(obj)
+def _flatten_list_column_values(s):
+ """returns a flattened list from a list column"""
+ if isinstance(s, pd.Series) or not cudf:
+ return pd.Series(itertools.chain(*s))
+ else:
+ return s.list.leaves
+
+
def _flatten_list_column(s):
- """Flatten elements of a list-based column"""
+ """Flatten elements of a list-based column, and return as a DataFrame"""
+ values = _flatten_list_column_values(s)
if isinstance(s, pd.Series) or not cudf:
- return pd.DataFrame({s.name: itertools.chain(*s)})
+ return pd.DataFrame({s.name: values})
else:
- return cudf.DataFrame({s.name: s.list.leaves})
+ return cudf.DataFrame({s.name: values})
def _concat_columns(args: list):
diff --git a/nvtabular/ops/logop.py b/nvtabular/ops/logop.py
--- a/nvtabular/ops/logop.py
+++ b/nvtabular/ops/logop.py
@@ -15,7 +15,13 @@
#
import numpy as np
-from nvtabular.dispatch import DataFrameType, _natural_log, annotate
+from nvtabular.dispatch import (
+ DataFrameType,
+ _encode_list_column,
+ _flatten_list_column_values,
+ _is_list_dtype,
+ annotate,
+)
from ..tags import Tags
from .operator import ColumnSelector, Operator
@@ -36,7 +42,14 @@ class LogOp(Operator):
@annotate("LogOp_op", color="darkgreen", domain="nvt_python")
def transform(self, col_selector: ColumnSelector, df: DataFrameType) -> DataFrameType:
- return _natural_log(df[col_selector.names].astype(np.float32) + 1)
+ for name in col_selector.names:
+ column = df[name]
+ if _is_list_dtype(column):
+ transformed = np.log(_flatten_list_column_values(column).astype(np.float32) + 1)
+ df[name] = _encode_list_column(column, transformed)
+ else:
+ df[name] = np.log(column.astype(np.float32) + 1)
+ return df
def output_tags(self):
return [Tags.CONTINUOUS]
| diff --git a/tests/unit/ops/test_ops.py b/tests/unit/ops/test_ops.py
--- a/tests/unit/ops/test_ops.py
+++ b/tests/unit/ops/test_ops.py
@@ -51,6 +51,21 @@ def test_log(tmpdir, df, dataset, gpu_memory_frac, engine, op_columns, cpu):
assert_eq(values, np.log(original.astype(np.float32) + 1))
[email protected]("cpu", _CPU)
+def test_logop_lists(tmpdir, cpu):
+ df = dispatch._make_df(device="cpu" if cpu else "gpu")
+ df["vals"] = [[np.exp(0) - 1, np.exp(1) - 1], [np.exp(2) - 1], []]
+
+ features = ["vals"] >> nvt.ops.LogOp()
+ workflow = nvt.Workflow(features)
+ new_df = workflow.fit_transform(nvt.Dataset(df)).to_ddf().compute()
+
+ expected = dispatch._make_df(device="cpu" if cpu else "gpu")
+ expected["vals"] = [[0.0, 1.0], [2.0], []]
+
+ assert_eq(expected, new_df)
+
+
def test_valuecount(tmpdir):
df = dispatch._make_df(
{
| [FEA] LogOp for list features
**Is your feature request related to a problem? Please describe.**
As a user, I want to be able to use the LogOp on a columns with list values.
**Describe the solution you'd like**
Example dataset:
```
df = cudf.DataFrame({'userid': ['a', 'b', 'c'], 'col': [[10,100,1000], [10,5], [50,10]]})
log_col = ['col'] >> nvt.ops.LogOp()
```
Log Operation should be applied to every value in the list.
| 2021-10-22T21:37:20 |
|
NVIDIA-Merlin/NVTabular | 1,210 | NVIDIA-Merlin__NVTabular-1210 | [
"1154"
] | a17c6adf10cbf3a31f1b00a618d6bb91f9775526 | diff --git a/nvtabular/ops/moments.py b/nvtabular/ops/moments.py
--- a/nvtabular/ops/moments.py
+++ b/nvtabular/ops/moments.py
@@ -22,6 +22,8 @@
from dask.delayed import Delayed
from dask.highlevelgraph import HighLevelGraph
+from nvtabular.dispatch import _flatten_list_column_values, _is_list_dtype
+
def _custom_moments(ddf, split_every=32):
@@ -62,23 +64,24 @@ def _custom_moments(ddf, split_every=32):
def _chunkwise_moments(df):
- df2 = type(df)()
- for col in df.columns:
- df2[col] = df[col].astype("float64").pow(2)
- vals = {
- "df-count": df.count().to_frame().transpose(),
- "df-sum": df.sum().astype("float64").to_frame().transpose(),
- "df2-sum": df2.sum().to_frame().transpose(),
- }
+ vals = {name: type(df)() for name in ["count", "sum", "squaredsum"]}
+ for name in df.columns:
+ column = df[name]
+ if _is_list_dtype(column):
+ column = _flatten_list_column_values(column)
+
+ vals["count"][name] = [column.count()]
+ vals["sum"][name] = [column.sum().astype("float64")]
+ vals["squaredsum"][name] = [column.astype("float64").pow(2).sum()]
+
# NOTE: Perhaps we should convert to pandas here
# (since we know the results should be small)?
- del df2
return vals
def _tree_node_moments(inputs):
out = {}
- for val in ["df-count", "df-sum", "df2-sum"]:
+ for val in ["count", "sum", "squaredsum"]:
df_list = [x.get(val, None) for x in inputs]
df_list = [df for df in df_list if df is not None]
out[val] = _concat(df_list, ignore_index=True).sum().to_frame().transpose()
@@ -86,9 +89,9 @@ def _tree_node_moments(inputs):
def _finalize_moments(inp, ddof=1):
- n = inp["df-count"].iloc[0]
- x = inp["df-sum"].iloc[0]
- x2 = inp["df2-sum"].iloc[0]
+ n = inp["count"].iloc[0]
+ x = inp["sum"].iloc[0]
+ x2 = inp["squaredsum"].iloc[0]
if hasattr(n, "to_pandas"):
n = n.to_pandas()
x = x.to_pandas()
@@ -105,7 +108,7 @@ def _finalize_moments(inp, ddof=1):
var[(n - ddof) == 0] = np.nan
# Construct output DataFrame
- out = pd.DataFrame(index=inp["df-count"].columns)
+ out = pd.DataFrame(index=inp["count"].columns)
out["count"] = n
out["sum"] = x
out["sum2"] = x2
diff --git a/nvtabular/ops/normalize.py b/nvtabular/ops/normalize.py
--- a/nvtabular/ops/normalize.py
+++ b/nvtabular/ops/normalize.py
@@ -16,7 +16,13 @@
import dask.dataframe as dd
import numpy
-from ..dispatch import DataFrameType, annotate
+from ..dispatch import (
+ DataFrameType,
+ _encode_list_column,
+ _flatten_list_column_values,
+ _is_list_dtype,
+ annotate,
+)
from ..tags import Tags
from .moments import _custom_moments
from .operator import ColumnSelector, Operator, Supports
@@ -58,11 +64,22 @@ def fit_finalize(self, dask_stats):
def transform(self, col_selector: ColumnSelector, df: DataFrameType) -> DataFrameType:
new_df = type(df)()
for name in col_selector.names:
+ values = df[name]
+ list_col = _is_list_dtype(values)
+ if list_col:
+ values = _flatten_list_column_values(values)
+
if self.stds[name] > 0:
- new_df[name] = (df[name] - self.means[name]) / (self.stds[name])
+ values = (values - self.means[name]) / (self.stds[name])
else:
- new_df[name] = df[name] - self.means[name]
- new_df[name] = new_df[name].astype("float32")
+ values = values - self.means[name]
+
+ values = values.astype("float32")
+
+ if list_col:
+ values = _encode_list_column(df[name], values)
+
+ new_df[name] = values
return new_df
@property
| diff --git a/tests/unit/ops/test_normalize.py b/tests/unit/ops/test_normalize.py
--- a/tests/unit/ops/test_normalize.py
+++ b/tests/unit/ops/test_normalize.py
@@ -22,7 +22,8 @@
import nvtabular as nvt
import nvtabular.io
from nvtabular import ColumnSelector, dispatch, ops
-from nvtabular.dispatch import HAS_GPU
+from nvtabular.dispatch import HAS_GPU, _flatten_list_column, _flatten_list_column_values
+from tests.conftest import assert_eq
if HAS_GPU:
_CPU = [True, False]
@@ -83,6 +84,29 @@ def test_normalize(tmpdir, df, dataset, gpu_memory_frac, engine, op_columns):
assert np.allclose(cupy_outputs[col], new_gdf[col].values)
[email protected]("cpu", _CPU)
+def test_normalize_lists(tmpdir, cpu):
+ df = dispatch._make_df(device="cpu" if cpu else "gpu")
+ df["vals"] = [
+ [0.0, 1.0, 2.0],
+ [
+ 3.0,
+ 4.0,
+ ],
+ [5.0],
+ ]
+
+ features = ["vals"] >> nvt.ops.Normalize()
+ workflow = nvt.Workflow(features)
+ transformed = workflow.fit_transform(nvt.Dataset(df)).to_ddf().compute()
+
+ expected = _flatten_list_column_values(df["vals"]).astype("float32")
+ expected = (expected - expected.mean()) / expected.std()
+ expected_df = type(transformed)({"vals": expected})
+
+ assert_eq(expected_df, _flatten_list_column(transformed["vals"]))
+
+
@pytest.mark.parametrize("cpu", _CPU)
def test_normalize_std_zero(cpu):
df = pd.DataFrame({"a": 7 * [10]})
| [FEA] Normalize for list
**Is your feature request related to a problem? Please describe.**
As a user, I want to be able to use the Normalize on a columns with list values.
**Describe the solution you'd like**
Example dataset:
```
df = cudf.DataFrame({'userid': ['a', 'b', 'c'], 'col': [[10,100,1000], [10,5], [50,10]]})
normalize_col = ['col'] >> nvt.ops.Normalize()
```
The Normalize op will collect the mean/std over all values in the column `col`, when calling fit. It does not depend on the position in the list.
The normalize op will transform the column `col` and the lists contain normalized values.
| 2021-10-25T00:00:22 |
|
NVIDIA-Merlin/NVTabular | 1,213 | NVIDIA-Merlin__NVTabular-1213 | [
"1155"
] | 5c85dfdee22edd1cd70f0c657f228f51f83e128d | diff --git a/nvtabular/io/fsspec_utils.py b/nvtabular/io/fsspec_utils.py
--- a/nvtabular/io/fsspec_utils.py
+++ b/nvtabular/io/fsspec_utils.py
@@ -179,13 +179,20 @@ def _get_parquet_byte_ranges(
for c in range(row_group.num_columns):
column = row_group.column(c)
name = column.path_in_schema
- # Skip this column if we are targeting a
- # specific columns
- if columns is None or name in columns:
+ # Skip this column if we are targeting
+ # specific columns, and this name is not
+ # in the list.
+ #
+ # Note that `column.path_in_schema` may
+ # modify the column name for list and struct
+ # columns. For example, a column named "a"
+ # may become "a.list.element"
+ split_name = name.split(".")[0]
+ if columns is None or name in columns or split_name in columns:
file_offset0 = column.dictionary_page_offset
if file_offset0 is None:
file_offset0 = column.data_page_offset
- num_bytes = column.total_uncompressed_size
+ num_bytes = column.total_compressed_size
byte_ranges.append((file_offset0, num_bytes))
return byte_ranges, footer_sample, file_size
| [BUG] Reading data from GCS creates issue
**Describe the bug**
Reading parquet file from Google Cloud Storage does not work.
**Steps/Code to reproduce bug**
```
dataset = nvt.Dataset("gs://bucket/file.parquet")
dataset.to_ddf().head()
```
Error:
```
cuDF failure at: ../src/table/table.cpp:42: Column size mismatch:
```
If the data is copied to the local disk, the code will work.
cuDF / dask_cudf can read from GCS.
This is with the latest NVTabular
| @bschifferer - I am having trouble reproducing this. However, I did run into another error related to the local buffer needing to be wrapped in a BytesIO object in order to interact with pyarrow correctly. Perhaps the errors are related?
When you say "cuDF / dask_cudf can read from GCS", are you referring to the latest version of RAPIDS, or 21.08?
@rjzamora
As @bschifferer said, this fails
```
dataset = nvt.Dataset("gs://bucket/file.parquet")
dataset.to_ddf().head()
```
but this seems to work fine (feel a lot slower)
```
ddf = dask_cudf.read_parquet("/path-to-data/*.parquet")
nvt.Dataset(ddf)
```
So this still doesn't work with the latest development branch (`main`)? Is the problematic file in a public bucket? If not, can you share a toy DataFrame example that cannot be read back like this after being written to gcs?
I haven't tried with the latest `main` branch. I'll check if #1158 fixed the issue and provide an update.
>I haven't tried with the latest main branch. I'll check if #1158 fixed the issue and provide an update.
Thank @pchandar ! Note that I am not very confident that #1158 was related, but since I cannot reproduce with an arbitrary parquet file myself, this issue is a bit difficult to debug.
@rjzamora sorry it took a while. It was a bit tricky to reproduce this on a test dataset. But if you copy the transformed parquet from [this](https://github.com/NVIDIA-Merlin/NVTabular/blob/main/examples/getting-started-movielens/02-ETL-with-NVTabular.ipynb) (Cell 18) example to a GCS bucket and then
```
ds = nvt.Dataset("gs://bucket/movielens.parquet")
ds.head()
```
will give the following error
```
RuntimeError: cuDF failure at: ../src/table/table.cpp:42: Column size mismatch: 76 != 20000076
```
A couple of observations: (1) this seem to happen only when the list columns exists; and (2) for sufficiently large datasets (when I tried slicing the problematic dataset it seemed to work fine). Hope this help reproduce the error at your end.
Thanks
I'm sorry for the delay here. This is indeed a bug in the optimized data-transfer logic for read_parquet from remote storage. It turns out that that the list column name is modified from "genres" to "genres.list.element" in the parquet metadata, and so we fail to transfer the data for that column. In the near future, [all this logic will live directly in fsspec](https://github.com/intake/filesystem_spec/pull/806) (and will be removed from NVTabular), but I will submit a temporary fix asap for NVT. | 2021-10-26T17:21:41 |
|
NVIDIA-Merlin/NVTabular | 1,223 | NVIDIA-Merlin__NVTabular-1223 | [
"1222"
] | 634cfb754fd9885b2772445d48e634eb47f711ea | diff --git a/nvtabular/io/dataset.py b/nvtabular/io/dataset.py
--- a/nvtabular/io/dataset.py
+++ b/nvtabular/io/dataset.py
@@ -668,6 +668,7 @@ def to_parquet(
suffix=".parquet",
partition_on=None,
method="subgraph",
+ write_hugectr_keyset=False,
):
"""Writes out to a parquet dataset
@@ -742,6 +743,10 @@ def to_parquet(
a single large task). In some cases, it may be more ideal to prioritize
concurrency. In that case, a worker-based approach can be used by
specifying `method="worker"`.
+ write_hugectr_keyset : bool, optional
+ Whether to write a HugeCTR keyset output file ("_hugectr.keyset").
+ Writing this file can be very slow, and should only be done if you
+ are planning to ingest the output data with HugeCTR. Default is False.
"""
if partition_on:
@@ -905,7 +910,7 @@ def to_parquet(
self.cpu,
suffix=suffix,
partition_on=partition_on,
- schema=self.schema,
+ schema=self.schema if write_hugectr_keyset else None,
)
def to_hugectr(
diff --git a/nvtabular/ops/categorify.py b/nvtabular/ops/categorify.py
--- a/nvtabular/ops/categorify.py
+++ b/nvtabular/ops/categorify.py
@@ -685,7 +685,6 @@ class FitOptions:
max_size: Optional[Union[int, dict]] = None
num_buckets: Optional[Union[int, dict]] = None
start_index: int = 0
- dataset_size: int = 0
def __post_init__(self):
if not isinstance(self.col_groups, ColumnSelector):
@@ -742,7 +741,16 @@ def _top_level_groupby(df, options: FitOptions):
df_gb = df[combined_col_selector.names].copy(deep=False)
agg_dict = {}
- agg_dict[cat_col_selector.names[0]] = ["count"]
+ base_aggs = []
+ if "size" in options.agg_list:
+ # This is either for a Categorify operation,
+ # or "size" is in the list of aggregations
+ base_aggs.append("size")
+ if set(options.agg_list).difference({"size", "min", "max"}):
+ # This is a groupby aggregation that may
+ # require "count" statistics
+ base_aggs.append("count")
+ agg_dict[cat_col_selector.names[0]] = base_aggs
if isinstance(options.agg_cols, list):
options.agg_cols = ColumnSelector(options.agg_cols)
for col in options.agg_cols.names:
@@ -807,13 +815,15 @@ def _mid_level_groupby(dfs, col_selector: ColumnSelector, freq_limit_val, option
gb.reset_index(drop=False, inplace=True)
name_count = _make_name(*(col_selector.names + ["count"]), sep=options.name_sep)
+ name_size = _make_name(*(col_selector.names + ["size"]), sep=options.name_sep)
if options.freq_limit and not options.max_size:
- gb = gb[gb[name_count] >= freq_limit_val]
+ gb = gb[gb[name_size] >= freq_limit_val]
required = col_selector.names.copy()
if "count" in options.agg_list:
required.append(name_count)
-
+ if "size" in options.agg_list:
+ required.append(name_size)
ddof = 1
if isinstance(options.agg_cols, list):
options.agg_cols = ColumnSelector(options.agg_cols)
@@ -959,10 +969,10 @@ def _write_uniques(dfs, base_path, col_selector: ColumnSelector, options: FitOpt
new_cols = {}
nulls_missing = False
for col in col_selector.names:
- name_count = col + "_count"
+ name_size = col + "_size"
null_size = 0
- if name_count in df:
- null_size = options.dataset_size - df[name_count].sum()
+ if name_size in df:
+ null_size = df[name_size].iloc[0]
if options.max_size:
max_emb_size = options.max_size
if isinstance(options.max_size, dict):
@@ -979,33 +989,32 @@ def _write_uniques(dfs, base_path, col_selector: ColumnSelector, options: FitOpt
raise ValueError("`nlargest` cannot be 0 or negative")
if nlargest < len(df):
- df = df.nlargest(n=nlargest, columns=name_count)
+ df = df.nlargest(n=nlargest, columns=name_size)
if not dispatch._series_has_nulls(df[col]):
- if name_count in df:
- df = df.sort_values(name_count, ascending=False, ignore_index=True)
+ if name_size in df:
+ df = df.sort_values(name_size, ascending=False, ignore_index=True)
nulls_missing = True
new_cols[col] = _concat(
[_nullable_series([None], df, df[col].dtype), df[col]],
ignore_index=True,
)
- if name_count in df:
- new_cols[name_count] = _concat(
- [_nullable_series([null_size], df, df[name_count].dtype), df[name_count]],
+ if name_size in df:
+ new_cols[name_size] = _concat(
+ [_nullable_series([null_size], df, df[name_size].dtype), df[name_size]],
ignore_index=True,
)
else:
# ensure None aka "unknown" stays at index 0
- if name_count in df:
+ if name_size in df:
df_0 = df.iloc[0:1]
- df_0[name_count] = null_size
- df_1 = df.iloc[1:].sort_values(name_count, ascending=False, ignore_index=True)
+ df_1 = df.iloc[1:].sort_values(name_size, ascending=False, ignore_index=True)
df = _concat([df_0, df_1])
new_cols[col] = df[col].copy(deep=False)
- if name_count in df:
- new_cols[name_count] = df[name_count].copy(deep=False)
+ if name_size in df:
+ new_cols[name_size] = df[name_size].copy(deep=False)
if nulls_missing:
df = type(df)(new_cols)
df.to_parquet(path, index=False, compression=None)
@@ -1028,8 +1037,10 @@ def _groupby_to_disk(ddf, write_func, options: FitOptions):
return {}
if options.concat_groups:
- if options.agg_list and options.agg_list != ["count"]:
- raise ValueError("Cannot use concat_groups=True with aggregations other than count")
+ if options.agg_list and not set(options.agg_list).issubset({"count", "size"}):
+ raise ValueError(
+ "Cannot use concat_groups=True with aggregations other than count and size"
+ )
if options.agg_cols:
raise ValueError("Cannot aggregate continuous-column stats with concat_groups=True")
@@ -1068,7 +1079,6 @@ def _groupby_to_disk(ddf, write_func, options: FitOptions):
level_2_name = "level_2-" + token
level_3_name = "level_3-" + token
finalize_labels_name = options.stat_name + "-" + token
- options.dataset_size = len(ddf)
for p in range(ddf.npartitions):
dsk[(level_1_name, p)] = (_top_level_groupby, (ddf._name, p), options)
k = 0
@@ -1120,7 +1130,7 @@ def _groupby_to_disk(ddf, write_func, options: FitOptions):
def _category_stats(ddf, options: FitOptions):
# Check if we only need categories
if options.agg_cols == [] and options.agg_list == []:
- options.agg_list = ["count"]
+ options.agg_list = ["size"]
return _groupby_to_disk(ddf, _write_uniques, options)
# Otherwise, getting category-statistics
| diff --git a/tests/unit/ops/test_categorify.py b/tests/unit/ops/test_categorify.py
--- a/tests/unit/ops/test_categorify.py
+++ b/tests/unit/ops/test_categorify.py
@@ -13,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-import math
import os
import random
@@ -37,7 +36,7 @@
@pytest.mark.parametrize("cpu", _CPU)
@pytest.mark.parametrize("include_nulls", [True, False])
-def test_categorify_counts(tmpdir, cpu, include_nulls):
+def test_categorify_size(tmpdir, cpu, include_nulls):
num_rows = 50
num_distinct = 10
@@ -62,21 +61,21 @@ def test_categorify_counts(tmpdir, cpu, include_nulls):
if cpu:
expected = dict(zip(vals.index, vals))
computed = {
- session: count
- for session, count in zip(vocab["session_id"], vocab["session_id_count"])
- if count
+ session: size
+ for session, size in zip(vocab["session_id"], vocab["session_id_size"])
+ if size
}
else:
expected = dict(zip(vals.index.values_host, vals.values_host))
computed = {
- session: count
- for session, count in zip(
- vocab["session_id"].values_host, vocab["session_id_count"].values_host
+ session: size
+ for session, size in zip(
+ vocab["session_id"].values_host, vocab["session_id_size"].values_host
)
- if count
+ if size
}
first_key = list(computed.keys())[0]
- if math.isnan(first_key):
+ if pd.isna(first_key):
computed.pop(first_key)
assert computed == expected
@@ -104,8 +103,8 @@ def test_na_value_count(tmpdir):
second_cat = dispatch._read_dispatch("./categories/unique.productID.parquet")(
"./categories/unique.productID.parquet"
)
- assert single_cat["brand_count"][0] == 5
- assert second_cat["productID_count"][0] == 3
+ assert single_cat["brand_size"][0] == 5
+ assert second_cat["productID_size"][0] == 3
@pytest.mark.parametrize("freq_threshold", [0, 1, 2])
diff --git a/tests/unit/test_io.py b/tests/unit/test_io.py
--- a/tests/unit/test_io.py
+++ b/tests/unit/test_io.py
@@ -346,6 +346,7 @@ def test_hugectr(
output_path=outdir,
out_files_per_proc=nfiles,
num_threads=num_io_threads,
+ write_hugectr_keyset=True,
)
# Check for _file_list.txt
| [BUG] Recent regressions in Criteo-benchmark performance
**Describe the bug**
It looks like a few PRs have introduced significant performance regressions in the DGX-1 dask-nvtabular-criteo-benchmark.py run time over the previous month. If I go back to the commit before #1136 , I get a total run time of ~125s. However, on the latest main I am getting ~434s.
The biggest performance hit comes from the addition of a HugeCTR keyset output file write in #1136 (which is apparently very slow). However, #1162 also adds a new `len(ddf)` call to `Categorify`. For some reason, that new `len(ddf)` call was running relatively fast when it was first introduced, but it became considerably slower after #1194.
When I manually remove the keyset write and the `len(ddf)` call from `main`, the benchmark goes back down to ~125s.
**Steps/Code to reproduce bug**:
Running on a DGX-1 machine with 8x32GB-V100:
```
python NVTabular/bench/examples/dask-nvtabular-criteo-benchmark.py --part-mem-frac 0.125 --device-limit-frac 0.7 --device-pool-frac 0.8 --data-path /raid/.../criteo_parquet--out-path /raid/.../scratch/ --out-files-per-proc 3 --cat-cache-high device -d 0,1,2,3,4,5,6,7
```
**Output before #1136**:
```
Dask-NVTabular DLRM/Criteo benchmark
--------------------------------------
partition size | 4261216256
protocol | tcp
device(s) | 0,1,2,3,4,5,6,7
rmm-pool-frac | 0.8
out-files-per-proc | 3
num_io_threads | 0
shuffle | PER_PARTITION
cats-on-device | False
======================================
Runtime[s] | 125.49687790870667
======================================
```
**Output on `main`**:
```
Dask-NVTabular DLRM/Criteo benchmark
--------------------------------------
partition size | 4261216256
protocol | tcp
device(s) | 0,1,2,3,4,5,6,7
rmm-pool-frac | 0.8
out-files-per-proc | 3
num_io_threads | 0
shuffle | PER_PARTITION
cats-on-device | False
======================================
Runtime[s] | 434.0827877521515
======================================
```
**Environment details (please complete the following information):**
- Environment location: Bare-metal
- Method of NVTabular install: conda (cudf-21.08 conda package)
| 2021-10-29T22:20:49 |
|
NVIDIA-Merlin/NVTabular | 1,232 | NVIDIA-Merlin__NVTabular-1232 | [
"1230"
] | 21c0f7abca8467609f4f2203de3a95a83757d69e | diff --git a/nvtabular/ops/categorify.py b/nvtabular/ops/categorify.py
--- a/nvtabular/ops/categorify.py
+++ b/nvtabular/ops/categorify.py
@@ -1345,14 +1345,15 @@ def _hash_bucket(df, num_buckets, col, encode_type="joint"):
def _copy_storage(existing_stats, existing_path, new_path, copy):
"""helper function to copy files to a new storage location"""
- from shutil import copyfile
-
+ existing_fs = get_fs_token_paths(existing_path)[0]
+ new_fs = get_fs_token_paths(new_path)[0]
new_locations = {}
for column, existing_file in existing_stats.items():
new_file = existing_file.replace(str(existing_path), str(new_path))
if copy and new_file != existing_file:
- os.makedirs(os.path.dirname(new_file), exist_ok=True)
- copyfile(existing_file, new_file)
+ new_fs.makedirs(os.path.dirname(new_file), exist_ok=True)
+ with new_fs.open(new_file, "wb") as output:
+ output.write(existing_fs.open(existing_file, "rb").read())
new_locations[column] = new_file
diff --git a/nvtabular/worker.py b/nvtabular/worker.py
--- a/nvtabular/worker.py
+++ b/nvtabular/worker.py
@@ -23,7 +23,6 @@
import cudf
except ImportError:
cudf = None
-import fsspec
import pyarrow as pa
from dask.distributed import get_worker
@@ -83,8 +82,7 @@ def fetch_table_data(
if reader == _lib.read_parquet: # pylint: disable=comparison-with-callable
# Using cudf-backed data with "host" caching.
# Cache as an Arrow table.
- with contextlib.closing(fsspec.open(path, "rb")) as f:
- table = reader(f, **use_kwargs)
+ table = reader(path, **use_kwargs)
if cudf:
table_cache[path] = table.to_arrow()
else:
diff --git a/nvtabular/workflow/workflow.py b/nvtabular/workflow/workflow.py
--- a/nvtabular/workflow/workflow.py
+++ b/nvtabular/workflow/workflow.py
@@ -15,13 +15,13 @@
#
import json
import logging
-import os
import sys
import time
import warnings
from typing import TYPE_CHECKING, Optional
import cloudpickle
+import fsspec
try:
import cudf
@@ -283,7 +283,9 @@ def save(self, path):
# avoid a circular import getting the version
from nvtabular import __version__ as nvt_version
- os.makedirs(path, exist_ok=True)
+ fs = fsspec.get_fs_token_paths(path)[0]
+
+ fs.makedirs(path, exist_ok=True)
# point all stat ops to store intermediate output (parquet etc) at the path
# this lets us easily bundle
@@ -292,7 +294,7 @@ def save(self, path):
# generate a file of all versions used to generate this bundle
lib = cudf if cudf else pd
- with open(os.path.join(path, "metadata.json"), "w") as o:
+ with fs.open(fs.sep.join([path, "metadata.json"]), "w") as o:
json.dump(
{
"versions": {
@@ -306,7 +308,7 @@ def save(self, path):
)
# dump out the full workflow (graph/stats/operators etc) using cloudpickle
- with open(os.path.join(path, "workflow.pkl"), "wb") as o:
+ with fs.open(fs.sep.join([path, "workflow.pkl"]), "wb") as o:
cloudpickle.dump(self, o)
@classmethod
@@ -327,8 +329,10 @@ def load(cls, path, client=None):
# avoid a circular import getting the version
from nvtabular import __version__ as nvt_version
+ fs = fsspec.get_fs_token_paths(path)[0]
+
# check version information from the metadata blob, and warn if we have a mismatch
- meta = json.load(open(os.path.join(path, "metadata.json")))
+ meta = json.load(fs.open(fs.sep.join([path, "metadata.json"])))
def parse_version(version):
return version.split(".")[:2]
@@ -354,7 +358,7 @@ def check_version(stored, current, name):
warnings.warn(f"Loading workflow generated on {expected}")
# load up the workflow object di
- workflow = cloudpickle.load(open(os.path.join(path, "workflow.pkl"), "rb"))
+ workflow = cloudpickle.load(fs.open(fs.sep.join([path, "workflow.pkl"]), "rb"))
workflow.client = client
# we might have been copied since saving, update all the stat ops
| diff --git a/tests/unit/test_s3.py b/tests/unit/test_s3.py
--- a/tests/unit/test_s3.py
+++ b/tests/unit/test_s3.py
@@ -85,7 +85,7 @@ def test_s3_dataset(s3_base, s3so, paths, datasets, engine, df, patch_aiobotocor
meta.write_metadata_file(files[fn])
files[fn].seek(0)
- with s3_context(s3_base=s3_base, bucket=engine, files=files):
+ with s3_context(s3_base=s3_base, bucket=engine, files=files) as s3fs:
# Create nvt.Dataset from mock s3 paths
url = f"s3://{engine}" if engine == "parquet" else f"s3://{engine}/*"
dataset = nvt.Dataset(url, engine=engine, storage_options=s3so)
@@ -108,3 +108,17 @@ def test_s3_dataset(s3_base, s3so, paths, datasets, engine, df, patch_aiobotocor
# make sure we can write out the dataset back to S3
# (https://github.com/NVIDIA-Merlin/NVTabular/issues/1214)
processor.transform(dataset).to_parquet(f"s3://{engine}/output")
+ expected = processor.transform(dataset).to_ddf().compute()
+
+ # make sure we can write out the workflow to s3
+ processor.save(f"s3://{engine}/saved_workflow/")
+
+ # make sure the workflow got saved to the right spot in S3
+ workflow_files = s3fs.ls(f"/{engine}/saved_workflow/")
+ assert workflow_files
+
+ # finally make sure we can read in the workflow from S3, and use it
+ # to transform values and get the same result as on the local fs
+ reloaded = nvt.Workflow.load(f"s3://{engine}/saved_workflow/")
+ from_s3 = reloaded.transform(dataset).to_ddf().compute()
+ assert_eq(expected, from_s3)
| [BUG] NVTabular: Support for GCS paths: read + write (Dataset, Workflow classes)
**Describe the bug**
Dataset: Only supports reading from GCS, not writing.
Workflow: Does not support GCS paths for reading and writing.
**Steps/Code to reproduce bug**
follow
https://github.com/jarokaz/merlin-on-vertex/blob/main/01-dataset-preprocessing.ipynb
Details will be added by Vertex team.
**Expected behavior**
as description
GCP - n1-highmem-32+4xT4 - nvcr.io/nvidia/merlin/merlin-training:21.09
Driver: NVIDIA-SMI 460.73.01 Driver Version: 460.73.01 CUDA Version: 11.2
| In the past (Maybe 6 months ago), I have been able to run an NVTabular workflow reading/writing data from/in cloud(gcs and S3) using this script: https://github.com/NVIDIA-Merlin/NVTabular/blob/main/bench/examples/dask-nvtabular-criteo-benchmark.py
I actually got some perf numbers and was working in how to better configure GCP/AWS VMs storage to get better I/O access.
Not sure if this is exactly the same, but just adding the information.
>I have been able to run an NVTabular workflow reading/writing data from/in cloud(gcs and S3) using this script: https://github.com/NVIDIA-Merlin/NVTabular/blob/main/bench/examples/dask-nvtabular-criteo-benchmark.py
@albert17 - Are you saying you were able to run your script with `--out-path` pointing to a location in both s3 and gcs? I have not spent much time trying to write to remote storage, but I do recall having problems writing to gcs. I believe I was able to write directly to s3, but needed to copy (`put`) local files to get them into gcs.
@rjzamora Yes, I meant that. I added a PR at that time to add the support to the script and to update several components from NVTabular. #540
@mengdong for writing the dataset, are you using the latest code? There was a fix pushed about a week ago that at least fixed this for writing the dataset to S3: https://github.com/NVIDIA-Merlin/NVTabular/issues/1214
| 2021-11-04T21:41:19 |
NVIDIA-Merlin/NVTabular | 1,239 | NVIDIA-Merlin__NVTabular-1239 | [
"1217"
] | 563c6c7570e81283ac220af1e3b8487264a3719c | diff --git a/nvtabular/io/fsspec_utils.py b/nvtabular/io/fsspec_utils.py
--- a/nvtabular/io/fsspec_utils.py
+++ b/nvtabular/io/fsspec_utils.py
@@ -109,30 +109,30 @@ def _optimized_read_remote(path, row_groups, columns, fs, **kwargs):
path, row_groups, columns, fs, **kwargs
)
- # Transfer the required byte-ranges with fsspec.
- # Store these blocks in a local dummy buffer
- dummy_buffer = _fsspec_data_transfer(
- path,
- fs,
- byte_ranges=byte_ranges,
- footer=footer,
- file_size=file_size,
- add_par1_magic=True,
- **kwargs,
- )
-
# Call cudf.read_parquet on the dummy buffer
strings_to_cats = kwargs.get("strings_to_categorical", False)
- df = cudf.read_parquet(
- io.BytesIO(dummy_buffer),
+ return cudf.read_parquet(
+ # Wrap in BytesIO since cudf will sometimes use
+ # pyarrow to parse the metadata (and pyarrow
+ # cannot read from a bytes object)
+ io.BytesIO(
+ # Transfer the required bytes with fsspec
+ _fsspec_data_transfer(
+ path,
+ fs,
+ byte_ranges=byte_ranges,
+ footer=footer,
+ file_size=file_size,
+ add_par1_magic=True,
+ **kwargs,
+ )
+ ),
engine="cudf",
columns=columns,
row_groups=row_groups,
strings_to_categorical=strings_to_cats,
**kwargs.get("read", {}),
)
- del dummy_buffer
- return df
def _get_parquet_byte_ranges(
| [BUG] excessive memory usage when reading parquet file from GCS bucket
**Describe the bug**
Cannot read 11GB parquet file from GCS with 4x 16GB T4 GPU, while same file reads just fine from local storage (persistent disk).
We suspect the parquet file got read into GPU as a whole, the expected behavior should be reading row groups and not the entire file.
**Steps/Code to reproduce bug**
Wget 2 python script here: https://gist.github.com/mengdong/8526f2765707be492b1b697c3fb8c687 and run python3 analyze_dataset_op.py
root@dongm-debug-fsspec:/scripts# python3 analyze_dataset_op.py
INFO:root:Creating Dask cluster.
INFO:numba.cuda.cudadrv.driver:init
distributed.preloading - INFO - Import preload module: dask_cuda.initialize
distributed.preloading - INFO - Import preload module: dask_cuda.initialize
distributed.preloading - INFO - Import preload module: dask_cuda.initialize
distributed.preloading - INFO - Import preload module: dask_cuda.initialize
INFO:root:Creating transformation workflow.
INFO:root:Creating dataset to be analysed.
distributed.worker - WARNING - Worker is at 82% memory usage. Pausing worker. Process memory: 24.44 GiB -- Worker memory limit: 29.51 GiB
distributed.worker - WARNING - Unmanaged memory use is high. This may indicate a memory leak or the memory may not be released to the OS; see https://distributed.dask.org/en/latest/worker.html#memtrim for more information. -- Unmanaged memory: 24.44 GiB -- Worker memory limit: 29.51 GiB
distributed.worker - WARNING - Unmanaged memory use is high. This may indicate a memory leak or the memory may not be released to the OS; see https://distributed.dask.org/en/latest/worker.html#memtrim for more information. -- Unmanaged memory: 24.51 GiB -- Worker memory limit: 29.51 GiB
distributed.worker - WARNING - Unmanaged memory use is high. This may indicate a memory leak or the memory may not be released to the OS; see https://distributed.dask.org/en/latest/worker.html#memtrim for more information. -- Unmanaged memory: 24.65 GiB -- Worker memory limit: 29.51 GiB
distributed.worker - WARNING - Unmanaged memory use is high. This may indicate a memory leak or the memory may not be released to the OS; see https://distributed.dask.org/en/latest/worker.html#memtrim for more information. -- Unmanaged memory: 24.68 GiB -- Worker memory limit: 29.51 GiB
distributed.worker - WARNING - Unmanaged memory use is high. This may indicate a memory leak or the memory may not be released to the OS; see https://distributed.dask.org/en/latest/worker.html#memtrim for more information. -- Unmanaged memory: 24.85 GiB -- Worker memory limit: 29.51 GiB
distributed.worker - WARNING - Unmanaged memory use is high. This may indicate a memory leak or the memory may not be released to the OS; see https://distributed.dask.org/en/latest/worker.html#memtrim for more information. -- Unmanaged memory: 25.03 GiB -- Worker memory limit: 29.51 GiB
distributed.worker - WARNING - Worker is at 4% memory usage. Resuming worker. Process memory: 1.33 GiB -- Worker memory limit: 29.51 GiB
INFO:root:Starting workflow fitting for fsspec_parquet split.
distributed.worker - WARNING - Worker is at 85% memory usage. Pausing worker. Process memory: 25.30 GiB -- Worker memory limit: 29.51 GiB
distributed.worker - WARNING - Worker is at 6% memory usage. Resuming worker. Process memory: 2.02 GiB -- Worker memory limit: 29.51 GiB
distributed.worker - ERROR - [Errno 2] No such file or directory: '/scripts/dask-worker-space/storage/%28%27split-c6e5ab25962e13a1755fd762b62bbfe9%27%2C%205%2C%2011%2C%200%29'
Traceback (most recent call last):
File "/usr/local/lib/python3.8/dist-packages/distributed-2021.7.1-py3.8.egg/distributed/worker.py", line 2623, in release_key
if key in self.data:
File "/usr/lib/python3.8/_collections_abc.py", line 666, in __contains__
self[key]
File "/usr/local/lib/python3.8/dist-packages/dask_cuda/device_host_file.py", line 246, in __getitem__
return self.host_buffer[key]
File "/usr/local/lib/python3.8/dist-packages/zict-2.0.0-py3.8.egg/zict/buffer.py", line 78, in __getitem__
return self.slow_to_fast(key)
File "/usr/local/lib/python3.8/dist-packages/zict-2.0.0-py3.8.egg/zict/buffer.py", line 65, in slow_to_fast
value = self.slow[key]
File "/usr/local/lib/python3.8/dist-packages/zict-2.0.0-py3.8.egg/zict/func.py", line 38, in __getitem__
return self.load(self.d[key])
File "/usr/local/lib/python3.8/dist-packages/zict-2.0.0-py3.8.egg/zict/file.py", line 75, in __getitem__
with open(os.path.join(self.directory, _safe_key(key)), "rb") as f:
FileNotFoundError: [Errno 2] No such file or directory: '/scripts/dask-worker-space/storage/%28%27split-c6e5ab25962e13a1755fd762b62bbfe9%27%2C%205%2C%2011%2C%200%29'
distributed.worker - ERROR -
Traceback (most recent call last):
File "/usr/local/lib/python3.8/dist-packages/distributed-2021.7.1-py3.8.egg/distributed/worker.py", line 1857, in transition_flight_memory
self.batched_stream.send({"op": "add-keys", "keys": [ts.key]})
File "/usr/local/lib/python3.8/dist-packages/distributed-2021.7.1-py3.8.egg/distributed/batched.py", line 136, in send
raise CommClosedError()
distributed.comm.core.CommClosedError
tornado.application - ERROR - Exception in callback functools.partial(<bound method IOLoop._discard_future_result of <tornado.platform.asyncio.AsyncIOLoop object at 0x7ff429eb9b20>>, <Task finished name='Task-1992' coro=<Worker.gather_dep() done, defined at /usr/local/lib/python3.8/dist-packages/distributed-2021.7.1-py3.8.egg/distributed/worker.py:2267> exception=CommClosedError()>)
Traceback (most recent call last):
File "/usr/local/lib/python3.8/dist-packages/tornado/ioloop.py", line 741, in _run_callback
ret = callback()
File "/usr/local/lib/python3.8/dist-packages/tornado/ioloop.py", line 765, in _discard_future_result
future.result()
File "/usr/local/lib/python3.8/dist-packages/distributed-2021.7.1-py3.8.egg/distributed/worker.py", line 2411, in gather_dep
self.transition(ts, "memory", value=data[d])
File "/usr/local/lib/python3.8/dist-packages/distributed-2021.7.1-py3.8.egg/distributed/worker.py", line 1694, in transition
state = func(ts, **kwargs)
File "/usr/local/lib/python3.8/dist-packages/distributed-2021.7.1-py3.8.egg/distributed/worker.py", line 1857, in transition_flight_memory
self.batched_stream.send({"op": "add-keys", "keys": [ts.key]})
File "/usr/local/lib/python3.8/dist-packages/distributed-2021.7.1-py3.8.egg/distributed/batched.py", line 136, in send
raise CommClosedError()
distributed.comm.core.CommClosedError
tornado.application - ERROR - Exception in callback functools.partial(<bound method IOLoop._discard_future_result of <tornado.platform.asyncio.AsyncIOLoop object at 0x7ff429eb9b20>>, <Task finished name='Task-1995' coro=<Worker.heartbeat() done, defined at /usr/local/lib/python3.8/dist-packages/distributed-2021.7.1-py3.8.egg/distributed/worker.py:959> exception=RuntimeError('Set changed size during iteration')>)
Traceback (most recent call last):
File "/usr/local/lib/python3.8/dist-packages/tornado/ioloop.py", line 741, in _run_callback
ret = callback()
File "/usr/local/lib/python3.8/dist-packages/tornado/ioloop.py", line 765, in _discard_future_result
future.result()
File "/usr/local/lib/python3.8/dist-packages/distributed-2021.7.1-py3.8.egg/distributed/worker.py", line 991, in heartbeat
await self._register_with_scheduler()
File "/usr/local/lib/python3.8/dist-packages/distributed-2021.7.1-py3.8.egg/distributed/worker.py", line 905, in _register_with_scheduler
types={k: typename(v) for k, v in self.data.items()},
File "/usr/local/lib/python3.8/dist-packages/distributed-2021.7.1-py3.8.egg/distributed/worker.py", line 905, in <dictcomp>
types={k: typename(v) for k, v in self.data.items()},
File "/usr/lib/python3.8/_collections_abc.py", line 743, in __iter__
for key in self._mapping:
RuntimeError: Set changed size during iteration
distributed.worker - WARNING - Worker is at 87% memory usage. Pausing worker. Process memory: 25.84 GiB -- Worker memory limit: 29.51 GiB
distributed.worker - WARNING - Unmanaged memory use is high. This may indicate a memory leak or the memory may not be released to the OS; see https://distributed.dask.org/en/latest/worker.html#memtrim for more information. -- Unmanaged memory: 25.84 GiB -- Worker memory limit: 29.51 GiB
distributed.worker - WARNING - Unmanaged memory use is high. This may indicate a memory leak or the memory may not be released to the OS; see https://distributed.dask.org/en/latest/worker.html#memtrim for more information. -- Unmanaged memory: 25.90 GiB -- Worker memory limit: 29.51 GiB
distributed.worker - WARNING - Worker is at 7% memory usage. Resuming worker. Process memory: 2.07 GiB -- Worker memory limit: 29.51 GiB
distributed.worker - ERROR -
Traceback (most recent call last):
File "/usr/local/lib/python3.8/dist-packages/distributed-2021.7.1-py3.8.egg/distributed/worker.py", line 1857, in transition_flight_memory
self.batched_stream.send({"op": "add-keys", "keys": [ts.key]})
File "/usr/local/lib/python3.8/dist-packages/distributed-2021.7.1-py3.8.egg/distributed/batched.py", line 136, in send
raise CommClosedError()
distributed.comm.core.CommClosedError
tornado.application - ERROR - Exception in callback functools.partial(<bound method IOLoop._discard_future_result of <tornado.platform.asyncio.AsyncIOLoop object at 0x7ff429eb9b20>>, <Task finished name='Task-2007' coro=<Worker.gather_dep() done, defined at /usr/local/lib/python3.8/dist-packages/distributed-2021.7.1-py3.8.egg/distributed/worker.py:2267> exception=CommClosedError()>)
Traceback (most recent call last):
File "/usr/local/lib/python3.8/dist-packages/tornado/ioloop.py", line 741, in _run_callback
ret = callback()
File "/usr/local/lib/python3.8/dist-packages/tornado/ioloop.py", line 765, in _discard_future_result
future.result()
File "/usr/local/lib/python3.8/dist-packages/distributed-2021.7.1-py3.8.egg/distributed/worker.py", line 2411, in gather_dep
self.transition(ts, "memory", value=data[d])
File "/usr/local/lib/python3.8/dist-packages/distributed-2021.7.1-py3.8.egg/distributed/worker.py", line 1694, in transition
state = func(ts, **kwargs)
File "/usr/local/lib/python3.8/dist-packages/distributed-2021.7.1-py3.8.egg/distributed/worker.py", line 1857, in transition_flight_memory
self.batched_stream.send({"op": "add-keys", "keys": [ts.key]})
File "/usr/local/lib/python3.8/dist-packages/distributed-2021.7.1-py3.8.egg/distributed/batched.py", line 136, in send
raise CommClosedError()
distributed.comm.core.CommClosedError
**Expected behavior**
should be able to process 11 GB parquet with 4 T4
GCP - n1-standard-32+4xT4 - nvcr.io/nvidia/merlin/merlin-training:vnightly
Driver: NVIDIA-SMI 460.73.01 Driver Version: 460.73.01 CUDA Version: 11.2
| 2021-11-12T05:32:30 |
||
NVIDIA-Merlin/NVTabular | 1,262 | NVIDIA-Merlin__NVTabular-1262 | [
"1261"
] | 0f017da48840008662324d2ec535e2ea15fd6edb | diff --git a/nvtabular/ops/list_slice.py b/nvtabular/ops/list_slice.py
--- a/nvtabular/ops/list_slice.py
+++ b/nvtabular/ops/list_slice.py
@@ -44,10 +44,12 @@ class ListSlice(Operator):
truncated = column_names >> ops.ListSlice(-10)
"""
- def __init__(self, start, end=None):
+ def __init__(self, start, end=None, pad=False, pad_value=0.0):
super().__init__()
self.start = start
self.end = end
+ self.pad = pad
+ self.pad_value = pad_value
if self.start > 0 and self.end is None:
self.end = self.start
@@ -60,27 +62,47 @@ def __init__(self, start, end=None):
def transform(self, col_selector: ColumnSelector, df: DataFrameType) -> DataFrameType:
on_cpu = _is_cpu_object(df)
ret = type(df)()
+
+ max_elements = self.end - self.start
+
for col in col_selector.names:
# handle CPU via normal python slicing (not very efficient)
if on_cpu:
- ret[col] = [row[self.start : self.end] for row in df[col]]
+ values = [row[self.start : self.end] for row in df[col]]
+
+ # pad out to so each row has max_elements if askeed
+ if self.pad:
+ for v in values:
+ if len(v) < max_elements:
+ v.extend([self.pad_value] * (max_elements - len(v)))
+
+ ret[col] = values
else:
# figure out the size of each row from the list offsets
c = df[col]._column
offsets = c.offsets.values
elements = c.elements.values
- # figure out the size of each row after slicing start/end
- new_offsets = cp.zeros(offsets.size, dtype=offsets.dtype)
threads = 32
blocks = (offsets.size + threads - 1) // threads
- # calculate new row offsets after slicing
- _calculate_row_sizes[blocks, threads](self.start, self.end, offsets, new_offsets)
- new_offsets = cp.cumsum(new_offsets).astype(offsets.dtype)
+ if self.pad:
+ new_offsets = cp.arange(offsets.size, dtype=offsets.dtype) * max_elements
+
+ else:
+ # figure out the size of each row after slicing start/end
+ new_offsets = cp.zeros(offsets.size, dtype=offsets.dtype)
+
+ # calculate new row offsets after slicing
+ _calculate_row_sizes[blocks, threads](
+ self.start, self.end, offsets, new_offsets
+ )
+ new_offsets = cp.cumsum(new_offsets).astype(offsets.dtype)
# create a new array for the sliced elements
- new_elements = cp.zeros(new_offsets[-1].item(), dtype=elements.dtype)
+ new_elements = cp.full(
+ new_offsets[-1].item(), fill_value=self.pad_value, dtype=elements.dtype
+ )
if new_elements.size:
_slice_rows[blocks, threads](
self.start, offsets, elements, new_offsets, new_elements
@@ -132,6 +154,15 @@ def _slice_rows(start, offsets, elements, new_offsets, new_elements):
if offset < offsets[rowid]:
offset = offsets[rowid]
- for new_offset in range(new_offsets[rowid], new_offsets[rowid + 1]):
+ new_start = new_offsets[rowid]
+ new_end = new_offsets[rowid + 1]
+
+ # if we are padding (more new offsets than old olffsets) - don't keep on iterating past
+ # the end
+ offset_delta = (new_end - new_start) - (offsets[rowid + 1] - offset)
+ if offset_delta > 0:
+ new_end -= offset_delta
+
+ for new_offset in range(new_start, new_end):
new_elements[new_offset] = elements[offset]
offset += 1
| diff --git a/tests/unit/ops/test_ops.py b/tests/unit/ops/test_ops.py
--- a/tests/unit/ops/test_ops.py
+++ b/tests/unit/ops/test_ops.py
@@ -401,6 +401,26 @@ def test_list_slice(cpu):
assert_eq(transformed, expected)
[email protected]("cpu", _CPU)
+def test_list_slice_pad(cpu):
+ DataFrame = pd.DataFrame if cpu else cudf.DataFrame
+ df = DataFrame({"y": [[0, 1, 2, 2, 767], [1, 2, 2, 3], [1, 223, 4]]})
+
+ # 0 pad to 5 elements
+ op = ops.ListSlice(5, pad=True)
+ selector = ColumnSelector(["y"])
+ transformed = op.transform(selector, df)
+ expected = DataFrame({"y": [[0, 1, 2, 2, 767], [1, 2, 2, 3, 0], [1, 223, 4, 0, 0]]})
+ assert_eq(transformed, expected)
+
+ # make sure we can also pad when start != 0, and when pad_value is set
+ op = ops.ListSlice(1, 6, pad=True, pad_value=123)
+ selector = ColumnSelector(["y"])
+ transformed = op.transform(selector, df)
+ expected = DataFrame({"y": [[1, 2, 2, 767, 123], [2, 2, 3, 123, 123], [223, 4, 123, 123, 123]]})
+ assert_eq(transformed, expected)
+
+
@pytest.mark.parametrize("cpu", _CPU)
def test_rename(cpu):
DataFrame = pd.DataFrame if cpu else cudf.DataFrame
| [FEA] Create a padding option in the ListSlice Op
The right padding option is required
- to create fixed length lists generated from ETL workflow (see ([example nb](https://github.com/NVIDIA-Merlin/Transformers4Rec/blob/main/examples/getting-started-session-based/01-ETL-with-NVTabular.ipynb)).
- to be able to serve a seq-to-seq model (ex. session-based model) to TIS with proper reshaped tensors and get the proper reshaped outputs.
| 2021-11-19T22:02:20 |
|
NVIDIA-Merlin/NVTabular | 1,270 | NVIDIA-Merlin__NVTabular-1270 | [
"1267"
] | 646ead8a9d5bceb49303049c5a03e5b9eaf5d880 | diff --git a/nvtabular/inference/triton/ensemble.py b/nvtabular/inference/triton/ensemble.py
--- a/nvtabular/inference/triton/ensemble.py
+++ b/nvtabular/inference/triton/ensemble.py
@@ -786,9 +786,6 @@ def _convert_pytorch_dtype(dtype):
torch.int32: model_config.TYPE_INT32,
torch.int16: model_config.TYPE_INT16,
torch.int8: model_config.TYPE_INT8,
- torch.uint64: model_config.TYPE_UINT64,
- torch.uint32: model_config.TYPE_UINT32,
- torch.uint16: model_config.TYPE_UINT16,
torch.uint8: model_config.TYPE_UINT8,
torch.bool: model_config.TYPE_BOOL,
}
@@ -819,9 +816,6 @@ def _convert_string2pytorch_dtype(dtype):
"TYPE_INT32": torch.int32,
"TYPE_INT16": torch.int16,
"TYPE_INT8": torch.int8,
- "TYPE_UINT64": torch.uint64,
- "TYPE_UINT32": torch.uint32,
- "TYPE_UINT16": torch.uint16,
"TYPE_UINT8": torch.uint8,
"TYPE_BOOL": torch.bool,
}
| [BUG] getting error from export_pytorch_ensemble()
**Describe the bug**
Getting the following error while running `export_pytorch_ensemble()` in cell 26 in this [notebook](https://github.com/NVIDIA-Merlin/Transformers4Rec/blob/main/examples/end-to-end-session-based/end-to-end-session-based-with-Yoochoose.ipynb).
```
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
/tmp/ipykernel_783/617147693.py in <module>
1 from nvtabular.inference.triton import export_pytorch_ensemble
----> 2 export_pytorch_ensemble(
3 model,
4 workflow,
5 sparse_max=recsys_trainer.get_train_dataloader().dataset.sparse_max,
/nvtabular/nvtabular/inference/triton/ensemble.py in export_pytorch_ensemble(model, workflow, sparse_max, name, model_path, label_columns, use_fix_dtypes, version, nvtabular_backend)
139 # generate the TF saved model
140 pt_path = os.path.join(model_path, name + "_pt")
--> 141 pt_config = export_pytorch_model(
142 model, workflow, sparse_max, name + "_pt", pt_path, use_fix_dtypes, version=version
143 )
/nvtabular/nvtabular/inference/triton/ensemble.py in export_pytorch_model(model, workflow, sparse_max, name, output_path, use_fix_dtypes, version, backend)
570 config.output.append(
571 model_config.ModelOutput(
--> 572 name="output", data_type=_convert_pytorch_dtype(dtype), dims=[-1, dims]
573 )
574 )
/nvtabular/nvtabular/inference/triton/ensemble.py in _convert_pytorch_dtype(dtype)
787 torch.int16: model_config.TYPE_INT16,
788 torch.int8: model_config.TYPE_INT8,
--> 789 torch.uint64: model_config.TYPE_UINT64,
790 torch.uint32: model_config.TYPE_UINT32,
791 torch.uint16: model_config.TYPE_UINT16,
AttributeError: module 'torch' has no attribute 'uint64'
```
Looks like we need to update `_convert_pytorch_dtype(dtype)` function. In [this doc](https://pytorch.org/docs/stable/tensors.html) `uint64` does not exist:
Container: `merlin-pytorch-training:21.11` pulled the latest main branch
Pytorch: `1.10.10` public pytorch
TF4rec: v0.1.2
| 2021-11-23T15:42:52 |
||
NVIDIA-Merlin/NVTabular | 1,301 | NVIDIA-Merlin__NVTabular-1301 | [
"1173"
] | 76b0b5eb3cd45a6b1d17bf29a8968252bea011fe | diff --git a/nvtabular/io/dataset.py b/nvtabular/io/dataset.py
--- a/nvtabular/io/dataset.py
+++ b/nvtabular/io/dataset.py
@@ -861,7 +861,14 @@ def to_parquet(
fns = output_files[i : i + files_per_task]
start = i * split
stop = min(start + split * len(fns), ddf.npartitions)
- new[tuple(fns)] = np.arange(start, stop)
+ if start < stop:
+ new[tuple(fns)] = np.arange(start, stop)
+ # let user know they will not have expected number of output files.
+ if len(new.keys()) < len(output_files):
+ warnings.warn(
+ f"Only created {len(new.keys())} files did not have enough\n"
+ f"partitions to create {len(output_files)} files."
+ )
output_files = new
suffix = "" # Don't add a suffix later - Names already include it
if not isinstance(output_files, dict):
| diff --git a/tests/unit/test_io.py b/tests/unit/test_io.py
--- a/tests/unit/test_io.py
+++ b/tests/unit/test_io.py
@@ -158,6 +158,36 @@ def test_dask_dataset_itr(tmpdir, datasets, engine, gpu_memory_frac):
assert len(my_iter) == size
+def test_io_partitions_push(tmpdir):
+ os.makedirs(os.path.join(tmpdir, "csv"))
+
+ # Generate random csv files
+ files = [os.path.join(tmpdir, f"csv/day_{i}") for i in range(23)]
+ for file in files:
+ with open(file, "w") as f:
+ f.write("0,1,2,3,a,b,c\n" * 1000)
+
+ # Load csv files
+ label_columns = ["label"]
+ cont_columns = ["I1", "I2", "I3"]
+ cat_columns = ["C1", "C2", "C3"]
+ columns = label_columns + cont_columns + cat_columns
+ dataset = nvt.Dataset(files, engine="csv", names=columns)
+ print("npartitions of dataset:", dataset.npartitions)
+
+ for x in range(20):
+ dataset.to_parquet(
+ output_files=x,
+ output_path=os.path.join(tmpdir, f"parquet{x}"),
+ cats=cat_columns,
+ conts=cont_columns,
+ labels=label_columns,
+ )
+
+ df_lib = dispatch.get_lib()
+ df_lib.read_parquet(os.path.join(tmpdir, f"parquet{x}/part_0.parquet"))
+
+
@pytest.mark.parametrize("engine", ["csv", "parquet", "csv-no-header"])
@pytest.mark.parametrize("num_files", [1, 2])
@pytest.mark.parametrize("cpu", [None, True])
| [BUG]Criteo Preprocessing example failed with increase of number of parquet files.
**Describe the bug**
observe Exception: KeyError('C1') when using 8 A100 each output 8 files, however, same job succeed when using 8A100 each output 4 files
**Steps/Code to reproduce bug**
Follow https://github.com/jarokaz/merlin-on-vertex/blob/main/src/training/dataprep/README.md
docker run -it --rm --gpus all \
-v /home/dongm/merlin-on-vertex/src/training/dataprep:/src \
-w /src \
-v /mnt/disks/data:/criteo_data \
nvcr.io/nvidia/merlin/merlin-training:21.09 \
python preprocess.py \
--train_folder /criteo_data/criteo_raw_parquet_train \
--valid_folder /criteo_data/criteo_raw_parquet_valid \
--output_folder /criteo_data/criteo_processed_parquet \
--devices 0,1,2,3,4,5,6,7 \
--protocol tcp \
--device_limit_frac 0.8 \
--device_pool_frac 0.9 \
--num_io_threads 8 \
--part_mem_frac 0.08 \
--out_files_per_proc 8 \
--freq_limit 6 \
--shuffle PER_PARTITION
Will fail
```
distributed.worker - WARNING - Compute Failed
Function: _write_subgraph
args: (<nvtabular.io.dask.DaskSubgraph object at 0x7ef650f80340>, ('part_56.parquet', 'part_57.parquet', 'part_58.parquet', 'part_59.parquet', 'part_60.parquet', 'part_61.parquet', 'part_62.parquet', 'part_63.parquet'), '/criteo_data/criteo_processed_parquet/train', <Shuffle.PER_PARTITION: 0>, <fsspec.implementations.local.LocalFileSystem object at 0x7f015df3c790>, ['C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9', 'C10', 'C11', 'C12', 'C13', 'C14', 'C15', 'C16', 'C17', 'C18', 'C19', 'C20', 'C21', 'C22', 'C23', 'C24', 'C25', 'C26'], ['I1', 'I2', 'I3', 'I4', 'I5', 'I6', 'I7', 'I8', 'I9', 'I10', 'I11', 'I12', 'I13'], ['label'], 'parquet', 8, False, '')
kwargs: {}
Exception: KeyError('C1')
Traceback (most recent call last):
File "preprocess.py", line 284, in <module>
preprocess(args)
File "preprocess.py", line 135, in preprocess
workflow.transform(train_dataset).to_parquet(
File "/nvtabular/nvtabular/io/dataset.py", line 886, in to_parquet
_ddf_to_dataset(
File "/nvtabular/nvtabular/io/dask.py", line 369, in _ddf_to_dataset
out = client.compute(out).result()
File "/root/.local/lib/python3.8/site-packages/distributed-2021.7.1-py3.8.egg/distributed/client.py", line 228, in result
raise exc.with_traceback(tb)
File "/usr/lib/python3.8/contextlib.py", line 75, in inner
return func(*args, **kwds)
File "/nvtabular/nvtabular/io/dask.py", line 210, in _write_subgraph
return writer.close()
File "/nvtabular/nvtabular/io/writer.py", line 314, in close
_general_meta = self.package_general_metadata()
File "/nvtabular/nvtabular/io/writer.py", line 253, in package_general_metadata
data["cats"].append({"col_name": c, "index": self.col_idx[c]})
KeyError: 'C1'
distributed.nanny - WARNING - Worker process still alive after 3 seconds, killing
distributed.nanny - WARNING - Worker process still alive after 3 seconds, killing
distributed.nanny - WARNING - Worker process still alive after 3 seconds, killing
distributed.nanny - WARNING - Worker process still alive after 3 seconds, killing
distributed.nanny - WARNING - Worker process still alive after 3 seconds, killing
distributed.nanny - WARNING - Worker process still alive after 3 seconds, killing
distributed.nanny - WARNING - Worker process still alive after 3 seconds, killing
distributed.core - ERROR - Exception while handling op register-client
Traceback (most recent call last):
File "/root/.local/lib/python3.8/site-packages/distributed-2021.7.1-py3.8.egg/distributed/core.py", line 502, in handle_comm
result = await result
File "/root/.local/lib/python3.8/site-packages/distributed-2021.7.1-py3.8.egg/distributed/scheduler.py", line 5194, in add_client
self.remove_client(client=client)
File "/root/.local/lib/python3.8/site-packages/distributed-2021.7.1-py3.8.egg/distributed/scheduler.py", line 5221, in remove_client
self.client_releases_keys(
File "/root/.local/lib/python3.8/site-packages/distributed-2021.7.1-py3.8.egg/distributed/scheduler.py", line 4961, in client_releases_keys
self.transitions(recommendations)
File "/root/.local/lib/python3.8/site-packages/distributed-2021.7.1-py3.8.egg/distributed/scheduler.py", line 6975, in transitions
self.send_all(client_msgs, worker_msgs)
File "/root/.local/lib/python3.8/site-packages/distributed-2021.7.1-py3.8.egg/distributed/scheduler.py", line 5478, in send_all
w = stream_comms[worker]
KeyError: 'tcp://127.0.0.1:36065'
tornado.application - ERROR - Exception in callback functools.partial(<function TCPServer._handle_connection.<locals>.<lambda> at 0x7f8c74443820>, <Task finished name='Task-72' coro=<BaseTCPListener._handle_stream() done, defined at /root/.local/lib/python3.8/site-packages/distributed-2021.7.1-py3.8.egg/distributed/comm/tcp.py:478> exception=KeyError('tcp://127.0.0.1:36065')>)
Traceback (most recent call last):
File "/usr/local/lib/python3.8/dist-packages/tornado/ioloop.py", line 741, in _run_callback
ret = callback()
File "/usr/local/lib/python3.8/dist-packages/tornado/tcpserver.py", line 331, in <lambda>
gen.convert_yielded(future), lambda f: f.result()
File "/root/.local/lib/python3.8/site-packages/distributed-2021.7.1-py3.8.egg/distributed/comm/tcp.py", line 495, in _handle_stream
await self.comm_handler(comm)
File "/root/.local/lib/python3.8/site-packages/distributed-2021.7.1-py3.8.egg/distributed/core.py", line 502, in handle_comm
result = await result
File "/root/.local/lib/python3.8/site-packages/distributed-2021.7.1-py3.8.egg/distributed/scheduler.py", line 5194, in add_client
self.remove_client(client=client)
File "/root/.local/lib/python3.8/site-packages/distributed-2021.7.1-py3.8.egg/distributed/scheduler.py", line 5221, in remove_client
self.client_releases_keys(
File "/root/.local/lib/python3.8/site-packages/distributed-2021.7.1-py3.8.egg/distributed/scheduler.py", line 4961, in client_releases_keys
self.transitions(recommendations)
File "/root/.local/lib/python3.8/site-packages/distributed-2021.7.1-py3.8.egg/distributed/scheduler.py", line 6975, in transitions
self.send_all(client_msgs, worker_msgs)
File "/root/.local/lib/python3.8/site-packages/distributed-2021.7.1-py3.8.egg/distributed/scheduler.py", line 5478, in send_all
w = stream_comms[worker]
KeyError: 'tcp://127.0.0.1:36065'
Exception in thread AsyncProcess Dask Worker process (from Nanny) watch process join:
Traceback (most recent call last):
File "/usr/lib/python3.8/threading.py", line 932, in _bootstrap_inner
```
docker run -it --rm --gpus all \
-v /home/dongm/merlin-on-vertex/src/training/dataprep:/src \
-w /src \
-v /mnt/disks/data:/criteo_data \
nvcr.io/nvidia/merlin/merlin-training:21.09 \
python preprocess.py \
--train_folder /criteo_data/criteo_raw_parquet_train \
--valid_folder /criteo_data/criteo_raw_parquet_valid \
--output_folder /criteo_data/criteo_processed_parquet \
--devices 0,1,2,3,4,5,6,7 \
--protocol tcp \
--device_limit_frac 0.8 \
--device_pool_frac 0.9 \
--num_io_threads 8 \
--part_mem_frac 0.08 \
--out_files_per_proc 4 \
--freq_limit 6 \
--shuffle PER_PARTITION
Will work
**Expected behavior**
The workflow transform should work when there are missing keys in certain partition.
**Environment details (please complete the following information):**
GCP A100 a2-high-8g, | NVIDIA-SMI 470.57.02 Driver Version: 470.57.02 CUDA Version: 11.4
nvcr.io/nvidia/merlin/merlin-training:21.09
Aha! Link: https://nvaiinfra.aha.io/features/MERLIN-505
| @mengdong how are you generating these parquet files? Can you generate a consistent set of columns in each parquet file?
@rjzamora any ideas on how complicated this would be to implement?
If we look at the code indicate by error: https://github.com/jarokaz/merlin-on-vertex/blob/main/src/training/dataprep/preprocess.py#L135-L143
We will see that we use nvt.Workflow(features, client=client) to create the workflow, in which we pass only 1 set of features, which when distributed to Dask workers, should ideally let each worker know the columns per:
```
features, dict_dtypes = create_preprocessing_workflow(
categorical_columns=CATEGORICAL_COLUMNS,
continuous_columns=CONTINUOUS_COLUMNS,
label_columns=LABEL_COLUMNS,
freq_limit=args.freq_limit,
stats_path=stats_output_folder)
```
No sure why this error would occur at all.
@benfred did you have a chance to review @mengdong 's comment?
@mengdong is this the criteo dataset gotten by running our download/convert notebook https://github.com/NVIDIA-Merlin/NVTabular/blob/main/examples/scaling-criteo/01-Download-Convert.ipynb ? Or was this parquet dataset generated by another script like https://github.com/jarokaz/merlin-on-vertex/blob/main/src/training/dataprep/convert_to_parquet.py ?
@jperez999 @rjzamora can either of you reproduce this issue?
it is https://github.com/jarokaz/merlin-on-vertex/blob/main/src/training/dataprep/convert_to_parquet.py.
@mengdong can you link that script for conversion again, seems the links above are now reporting a 404.
Hi @jperez999,
We are using this code to convert to Parquet:
https://github.com/jarokaz/merlin-on-vertex/blob/7bea3758b8ca1166c8a45bd547080013b4078c9a/src/preprocessing/etl.py#L79
Please let me know if you have any question.
Thanks | 2021-12-07T00:39:09 |
NVIDIA-Merlin/NVTabular | 1,312 | NVIDIA-Merlin__NVTabular-1312 | [
"1306"
] | e53f065b040a9db53effa918657891a1dc98ea46 | diff --git a/nvtabular/inference/triton/__init__.py b/nvtabular/inference/triton/__init__.py
--- a/nvtabular/inference/triton/__init__.py
+++ b/nvtabular/inference/triton/__init__.py
@@ -25,6 +25,7 @@
from nvtabular.dispatch import _is_list_dtype, _is_string_dtype, _make_df # noqa
from nvtabular.inference.triton.ensemble import ( # noqa
+ _convert_string2pytorch_dtype,
export_hugectr_ensemble,
export_pytorch_ensemble,
export_tensorflow_ensemble,
| [BUG] Getting error when loading the TF4Rec PyTorch model to the TIS
**Describe the bug**
I am getting the following error when I load a trained TF4Rec PyTorch to TIS:
```
| t4r_pytorch_pt | 1 | UNAVAILABLE: Internal: ImportError: cannot import name '_convert_string2pytorch_dty |
| | | pe' from 'nvtabular.inference.triton' (/nvtabular/nvtabular/inference/triton/__init |
| | | __.py) |
| | | |
| | | At: |
| | | /workspace/models/t4r_pytorch_pt/1/model.py(42): <module> |
| | | <frozen importlib._bootstrap>(219): _call_with_frames_removed |
| | | <frozen importlib._bootstrap_external>(848): exec_module |
| | | <frozen importlib._bootstrap>(686): _load_unlocked |
| | | <frozen importlib._bootstrap>(975): _find_and_load_unlocked |
| | | <frozen importlib._bootstrap>(991): _find_and_load |
+-----------------+---------+---------------------------------------------------------
```
**Steps/Code to reproduce bug**
Run the 02 and 03 notebooks Transformers4Rec tutorial [notebooks](https://github.com/NVIDIA-Merlin/Transformers4Rec/tree/main/examples/tutorial) to train the model. Then serve the model to TIS based on the instructions given on the [inference notebook](https://github.com/NVIDIA-Merlin/Transformers4Rec/blob/main/examples/tutorial/04-Inference-with-Triton.ipynb).
`Oct-2019.parquet` Dataset can be downloaded from here: https://drive.google.com/drive/u/0/folders/1nTuG6UHWOEaZnBJj7YSIVvnphE1zGc1h
**Expected behavior**
Model should be loaded to the TIS without issue.
**Environment details (please complete the following information):**
- Environment location: [Bare-metal, Docker, Cloud(specify cloud provider)] : Docker
- Method of NVTabular install: [conda, Docker, or from source]: Docker `merlin-inference:21.11` and `merlin-pytoch-training:21.11` `
Please do `git pull origin main` && `pip install -e .` to pull the latest main branch.
- If method of install is [Docker], provide `docker pull` & `docker run` commands used
This issue was also submitted by a user on TF4Rec GH repo- https://github.com/NVIDIA-Merlin/Transformers4Rec/issues/339
| 2021-12-10T16:35:56 |
||
NVIDIA-Merlin/NVTabular | 1,357 | NVIDIA-Merlin__NVTabular-1357 | [
"1337"
] | 498328a05048af2f7892988855ebc61bf2427c4b | diff --git a/nvtabular/graph/schema_io/schema_writer_pbtxt.py b/nvtabular/graph/schema_io/schema_writer_pbtxt.py
--- a/nvtabular/graph/schema_io/schema_writer_pbtxt.py
+++ b/nvtabular/graph/schema_io/schema_writer_pbtxt.py
@@ -16,6 +16,7 @@
import os
from pathlib import Path
+import fsspec
import numpy
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
@@ -40,9 +41,7 @@ def _read(cls, schema_path):
@classmethod
def write(cls, schema, schema_path):
- schema_path = Path(schema_path)
- if not schema_path.is_dir():
- raise ValueError(f"The path provided is not a valid directory: {schema_path}")
+ fs = fsspec.get_fs_token_paths(schema_path)[0]
# traverse list of column schema
schema_file = schema_pb2.Schema()
@@ -51,9 +50,16 @@ def write(cls, schema, schema_path):
features.append(create_protobuf_feature(col_schema))
schema_file.feature.extend(features)
- with open(schema_path / "schema.pbtxt", "w") as f:
- f.write(text_format.MessageToString(schema_file))
- return schema
+ try:
+ with fs.open(fs.sep.join([str(schema_path), "schema.pbtxt"]), "w") as f:
+ f.write(text_format.MessageToString(schema_file))
+ return schema
+ except Exception as e:
+ if not fs.isdir(schema_path):
+ raise ValueError(
+ f"The path provided is not a valid directory: {schema_path}"
+ ) from e
+ raise
@classmethod
def load(cls, schema_path):
| [BUG] Test_s3 failing
```
with s3_context(s3_base=s3_base, bucket=engine, files=files) as s3fs:
# Create nvt.Dataset from mock s3 paths
url = f"s3://{engine}" if engine == "parquet" else f"s3://{engine}/*"
dataset = nvt.Dataset(url, engine=engine, storage_options=s3so)
# Check that the iteration API works
columns = mycols_pq if engine == "parquet" else mycols_csv
gdf = nvt.dispatch._concat(list(dataset.to_iter()))[columns]
assert_eq(gdf.reset_index(drop=True), df.reset_index(drop=True))
cat_names = ["name-cat", "name-string"] if engine == "parquet" else ["name-string"]
cont_names = ["x", "y", "id"]
label_name = ["label"]
conts = cont_names >> ops.FillMissing() >> ops.Clip(min_value=0) >> ops.LogOp()
cats = cat_names >> ops.Categorify(cat_cache="host")
processor = nvt.Workflow(conts + cats + label_name)
processor.fit(dataset)
# make sure we can write out the dataset back to S3
# (https://github.com/NVIDIA-Merlin/NVTabular/issues/1214)
> processor.transform(dataset).to_parquet(f"s3://{engine}/output")
/nvtabular/tests/unit/test_s3.py:111:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/nvtabular/nvtabular/io/dataset.py:906: in to_parquet
self.schema.write(output_path)
/nvtabular/nvtabular/graph/schema.py:154: in write
return PbTxt_SchemaWriter.write(self, schema_path)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
cls = <class 'nvtabular.graph.schema_io.schema_writer_pbtxt.PbTxt_SchemaWriter'>
schema = [{'name': 'x', 'tags': [<Tags.CONTINUOUS: 'continuous'>], 'properties': {}, 'dtype': <class 'float'>, '_is_list': Fals...int'>, '_is_list': False}, {'name': 'label', 'tags': [], 'properties': {}, 'dtype': dtype('int64'), '_is_list': False}]
schema_path = PosixPath('s3:/csv/output')
@classmethod
def write(cls, schema, schema_path):
schema_path = Path(schema_path)
if not schema_path.is_dir():
> raise ValueError(f"The path provided is not a valid directory: {schema_path}")
E ValueError: The path provided is not a valid directory: s3:/csv/output
/nvtabular/nvtabular/graph/schema_io/schema_writer_pbtxt.py:45: ValueError
```
| Regression from here: https://github.com/NVIDIA-Merlin/NVTabular/pull/1218 | 2022-01-11T22:59:17 |
|
NVIDIA-Merlin/NVTabular | 1,380 | NVIDIA-Merlin__NVTabular-1380 | [
"1249"
] | fed81e2f0ca4963fb1a7615c4c1fdb372cd5b89b | diff --git a/nvtabular/__init__.py b/nvtabular/__init__.py
--- a/nvtabular/__init__.py
+++ b/nvtabular/__init__.py
@@ -18,7 +18,7 @@
from merlin.graph import ColumnSelector
from merlin.schema import ColumnSchema, Schema
-from . import graph, io, workflow # noqa
+from . import graph, io, utils, workflow # noqa
from ._version import get_versions
# suppress some warnings with cudf warning about column ordering with dlpack
@@ -30,6 +30,8 @@
WorkflowNode = workflow.WorkflowNode
Workflow = workflow.Workflow
Dataset = io.dataset.Dataset
+Distributed = utils.Distributed
+Serial = utils.Serial
# Provides an alias of ColumnSelector so that old usages of ColumnGroup to
@@ -47,6 +49,8 @@ def ColumnGroup(columns):
"ColumnSelector",
"ColumnSchema",
"Schema",
+ "Distributed",
+ "Serial",
]
# cudf warns about column ordering with dlpack methods, ignore it
| diff --git a/tests/unit/test_dask_nvt.py b/tests/unit/test_dask_nvt.py
--- a/tests/unit/test_dask_nvt.py
+++ b/tests/unit/test_dask_nvt.py
@@ -26,7 +26,7 @@
from nvtabular import ColumnSelector, Dataset, Workflow, ops
from nvtabular.io import Shuffle
-from nvtabular.utils import set_dask_client
+from nvtabular.utils import global_dask_client, set_dask_client
from tests.conftest import allcols_csv, mycols_csv, mycols_pq
cudf = pytest.importorskip("cudf")
@@ -295,3 +295,21 @@ def test_filtered_partition(tmpdir, cpu):
# Write result to disk
workflow.transform(dataset).to_parquet(str(tmpdir))
+
+
+def test_merlin_core_execution_managers(client):
+ # This functionality lives in merlin-core,
+ # but we are testing that we can import
+ # from NVTabular with proper behavior.
+ from nvtabular import Distributed, Serial
+
+ # Set distributed client
+ with Distributed(client=client):
+ assert global_dask_client() == client
+
+ # Check that the global dask client
+ # becomes None in a `with Serial()` block
+ with Serial():
+ assert global_dask_client() is None
+
+ assert global_dask_client() == client
| [FEA] Add DistributedClient API and use global client objects in Workflow and Dataset
**Is your feature request related to a problem? Please describe.**
In order to perform distributed ETL and/or spill data between device/host/disk, NVTabular currently requires the user to provide a Dask-Distributed (or Dask-CUDA) cluster. Furthermore, the `nvt.Workflow` (and sometimes `nvt.Dataset`) need to be defined with an explicit `client=` argument in order for the distributed cluster to be used. Although I feel strongly that it would be dangerous and messy for NVTabular to automatically spin up a distributed cluster by default, I do suspect that the user experience could be much better.
**Describe the solution you'd like**
To improve the user experience of distributed ETL with NVTabular, I propose:
1. Simple `LocalCluster`/`LocalCUDACluster`-wrapper APIs be added to NVTabular so that users can enable multi-GPU processing and/or spilling without interacting with distributed/dask_cuda. I am not yet sure of the ideal API to expose in NVTabular, but perhaps something like `DistributedClient` (wrapping `distributed.Client`). This API could be used to automatically generate a local cluster (if the address of an existing cluster is not provided), and we could add a `cpu=False` kwarg to toggle between gpu and cpu mode.
2. [**DONE** in #1318] Automatically detect and **use** an existing Dask client object. NVTabular already [checks for a global dask client](https://github.com/NVIDIA-Merlin/NVTabular/blob/34d01d7e6090d6029ac40010ed79e1558f18759c/nvtabular/workflow/workflow.py#L88) in both `Workflow ` and `Dataset`. However, these checks result in a UserWarning whenever a global client is detected. Instead of warning the user, I propose that NVTabular automatically **use** the client object when it is detected (which is actually the defult behavior in `dask.dataframe` and `dask.array` anyway).
**Describe alternatives you've considered**
The alternative is to continue requiring the user to deploy Dask clusters/clients with `distributed`/`dask_cuda`, and require them to include an explicit `client` argument when defining an `Workflow` objects.
| I think this is a great idea, and it would be very valuable. All our examples/notebooks would be automatically running in multi-gpu and customers would be able to use multi-gpu with minimum effort and knowledge.
Happy to work into this, unless Rick wants to tackle it.
>Happy to work into this, unless Rick wants to tackle it.
Happy to let you tackle it. We could also split the work if you want. I imagine that it would be best to add features in two distinct PRs: (1) automatically use a global client object, and (2) introduce the `DistributedClient` API (or some other NVT-based mechanism for users to say: "I want distributed execution")
We now have the second part of this issue resolved (automatic client detection/handling). In resolving this, we added a `nvtabular.utils.set_dask_client(client="auto")` utility.
Perhaps we could use the same machinery to solve the remaining challenge of this issue. That is: Add a `client="new"` option to `set_dask_client`, and suport `cluster_type=` and `cluster_options=` arguments. In the case that the user specifies `client="new"`, we would use `cluster_type` and `cluster_options` to automatically deploy a local cluster and define a new global client.
By default, using `client="new"`would result in a default `LocalCUDACluster` if gpus are detected on the machine (and a default `LocalCluster` otherwise). | 2022-02-02T20:08:12 |
NVIDIA-Merlin/NVTabular | 1,386 | NVIDIA-Merlin__NVTabular-1386 | [
"1325"
] | ce07052623ba895ab7656c7193260924e7743450 | diff --git a/nvtabular/ops/categorify.py b/nvtabular/ops/categorify.py
--- a/nvtabular/ops/categorify.py
+++ b/nvtabular/ops/categorify.py
@@ -1052,7 +1052,9 @@ def _write_uniques(dfs, base_path, col_selector: ColumnSelector, options: FitOpt
for col in col_selector.names:
name_size = col + "_size"
null_size = 0
- if name_size in df:
+ # Set null size if first element in `col` is
+ # null, and the `size` aggregation is known
+ if name_size in df and df[col].iloc[:1].isnull().any():
null_size = df[name_size].iloc[0]
if options.max_size:
max_emb_size = options.max_size
| diff --git a/tests/unit/ops/test_categorify.py b/tests/unit/ops/test_categorify.py
--- a/tests/unit/ops/test_categorify.py
+++ b/tests/unit/ops/test_categorify.py
@@ -559,3 +559,19 @@ def test_categorify_embedding_sizes(dataset, engine):
workflow.fit_transform(dataset)
assert get_embedding_sizes(workflow) == {"name-cat": (27, 16), "name-string_test": (27, 16)}
+
+
+def test_categorify_no_nulls():
+ # See https://github.com/NVIDIA-Merlin/NVTabular/issues/1325
+ df = nvt.dispatch._make_df(
+ {
+ "user_id": [1, 2, 3, 4, 6, 8, 5, 3] * 10,
+ "item_id": [2, 4, 4, 7, 5, 2, 5, 2] * 10,
+ },
+ )
+ workflow = nvt.Workflow(["user_id", "item_id"] >> ops.Categorify())
+ workflow.fit(nvt.Dataset(df))
+
+ df = pd.read_parquet("./categories/unique.user_id.parquet")
+ assert df["user_id"].iloc[:1].isnull().any()
+ assert df["user_id_size"][0] == 0
| [BUG] Categorify generates Null counts for a df without Nulls
**Describe the bug**
I get an unexpected behaviour from Categorify() op. The unique value stats are not correctly calculated. It includes `nulls` and corresponding size although dataset does not have any nulls. Please see the repro example below for the details.
**Steps/Code to reproduce bug**
Run the code below to repro the issue:
```
import nvtabular as nvt
from nvtabular import Workflow, ops
gdf = nvt.dispatch._make_df(
{
"user_id": [1, 2, 3, 4, 6, 8, 5, 3] *10,
"item_id": [2, 4, 4, 7, 5, 2, 5, 2] *10,
}
)
cats = ["user_id", "item_id"]
cat_features = cats >> ops.Categorify()
workflow = Workflow(cat_features)
workflow.fit_transform(nvt.Dataset(gdf)).to_ddf().compute()
```
THEN
```
import pandas as pd
pd.read_parquet('./categories/unique.user_id.parquet')
user_id user_id_size
0 <NA> 10
1 3 20
2 1 10
3 2 10
4 4 10
5 5 10
6 6 10
7 8 10
```
**Expected behavior**
There should not be null in the `unique.user_id.parquet` file.
**Environment details (please complete the following information):**
- Environment location: [Bare-metal, Docker, Cloud(specify cloud provider)]
- Method of NVTabular install: [conda, Docker, or from source]: Docker, `merlin-tensorflow-training:21.12` with the latest main branch pulled.
- If method of install is [Docker], provide `docker pull` & `docker run` commands used
**Additional context**
Add any other context about the problem here.
| @rjzamora The (known) impact of this is relatively low, but it does look weird and seems like it could cause problems. After talking to @rnyak about it, we're not 100% sure what the correct behavior is. Should the counts include NA with size 0, or omit it entirely when there are no NAs in the dataset?
>The (known) impact of this is relatively low
Seems pretty critical to me :) - I'll see if I can figure out what is going on here. Thank you for the nice/simple reproducer @rnyak ! | 2022-02-09T17:13:09 |
NVIDIA-Merlin/NVTabular | 1,394 | NVIDIA-Merlin__NVTabular-1394 | [
"1392"
] | 424daee860b2b6f6d175a618bb840c7557d71fce | diff --git a/nvtabular/io/dataset.py b/nvtabular/io/dataset.py
--- a/nvtabular/io/dataset.py
+++ b/nvtabular/io/dataset.py
@@ -341,8 +341,7 @@ def __init__(
self.schema = Schema.load(schema_path)
elif (schema_path.parent / "schema.pbtxt").exists():
self.schema = Schema.load(schema_path.parent)
- else:
- self.infer_schema()
+ self.infer_schema()
else:
# df with no schema
self.infer_schema()
@@ -1133,7 +1132,11 @@ def infer_schema(self, n=1):
col_schema = ColumnSchema(column, dtype=dtype_val, _is_list=is_list, _is_ragged=is_list)
column_schemas.append(col_schema)
- self.schema = Schema(column_schemas)
+ schema = Schema(column_schemas)
+ if self.schema:
+ self.schema = self.schema + schema
+ else:
+ self.schema = schema
return self.schema
def sample_dtypes(self, n=1, annotate_lists=False):
| [BUG] NVTabular ETL multi-GPU breaks, if datatypes for column changes
**Describe the bug**
If I run the multi-GPU NVTabular ETL workflows, they throw errors, when datatype of a column changes.
Examples
Movielens: `Exception: "TypeError('Improperly matched output dtypes detected in rating, float64 and int8')"`
Criteo: `Exception: "TypeError('Improperly matched output dtypes detected in C1, int64 and int32')"`
**Steps/Code to reproduce bug**
Run multi-GPU Movielens: https://github.com/NVIDIA-Merlin/NVTabular/blob/main/examples/multi-gpu-movielens/01-03-MultiGPU-Download-Convert-ETL-with-NVTabular-Training-with-TensorFlow.ipynb
Run criteo: https://github.com/NVIDIA-Merlin/NVTabular/blob/main/examples/scaling-criteo/02-ETL-with-NVTabular.ipynb
**Environment details (please complete the following information):**
- merlin-tensorflow-training:22.02 with latest main branch
**Additional information:**
- I tested single-GPU movielens and it worked.
- I changed the single-GPU movielens example to use multi-GPUs and then it breaks
- if I change `ratings = nvt.ColumnSelector(["rating"]) >> nvt.ops.LambdaOp(lambda col: (col > 3.0).astype("int8"))` to `ratings = nvt.ColumnSelector(["rating"])`, then multi-GPU movielens works as well
| 2022-02-15T23:02:56 |
||
NVIDIA-Merlin/NVTabular | 1,398 | NVIDIA-Merlin__NVTabular-1398 | [
"1397"
] | 65d79d904e82233433820a613459cb34006598b5 | diff --git a/nvtabular/ops/__init__.py b/nvtabular/ops/__init__.py
--- a/nvtabular/ops/__init__.py
+++ b/nvtabular/ops/__init__.py
@@ -36,6 +36,7 @@
from .logop import LogOp
from .normalize import Normalize, NormalizeMinMax
from .operator import ColumnSelector, Operator
+from .reduce_dtype_size import ReduceDtypeSize
from .rename import Rename
from .stat_operator import StatOperator
from .target_encoding import TargetEncoding
diff --git a/nvtabular/ops/reduce_dtype_size.py b/nvtabular/ops/reduce_dtype_size.py
new file mode 100644
--- /dev/null
+++ b/nvtabular/ops/reduce_dtype_size.py
@@ -0,0 +1,80 @@
+#
+# Copyright (c) 2021, NVIDIA CORPORATION.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import dask.dataframe as dd
+import numpy as np
+
+from nvtabular.graph.schema import Schema
+
+from ..dispatch import DataFrameType, annotate
+from .operator import ColumnSelector
+from .stat_operator import StatOperator
+
+_INT_DTYPES = [np.int8, np.int16, np.int32, np.int64]
+
+
+class ReduceDtypeSize(StatOperator):
+ """
+ ReduceDtypeSize changes the dtypes of numeric columns. For integer columns
+ this will choose a dtype such that the minimum and maximum values in the
+ column will fit. For float columns this will cast to a float32.
+ """
+
+ def __init__(self, float_dtype=np.float32):
+ super().__init__()
+ self.float_dtype = float_dtype
+ self.ranges = {}
+ self.dtypes = {}
+
+ @annotate("reduce_dtype_size_fit", color="green", domain="nvt_python")
+ def fit(self, col_selector: ColumnSelector, ddf: dd.DataFrame):
+ return {col: (ddf[col].min(), ddf[col].max()) for col in col_selector.names}
+
+ def fit_finalize(self, dask_stats):
+ self.ranges = dask_stats
+
+ def clear(self):
+ self.dtypes = {}
+ self.ranges = {}
+
+ @annotate("reduce_dtype_size_transform", color="darkgreen", domain="nvt_python")
+ def transform(self, col_selector: ColumnSelector, df: DataFrameType) -> DataFrameType:
+ for col, dtype in self.dtypes.items():
+ df[col] = df[col].astype(dtype)
+ return df
+
+ def compute_output_schema(self, input_schema, selector, prev_output_schema=None):
+ if not self.ranges:
+ return input_schema
+
+ output_columns = []
+ for column, (min_value, max_value) in self.ranges.items():
+ column = input_schema[column]
+
+ dtype = column.dtype
+ if np.issubdtype(column.dtype, np.integer):
+ for possible_dtype in _INT_DTYPES:
+ dtype_range = np.iinfo(possible_dtype)
+ if min_value >= dtype_range.min and max_value <= dtype_range.max:
+ dtype = possible_dtype
+ break
+
+ elif np.issubdtype(column.dtype, np.float):
+ dtype = self.float_dtype
+
+ output_columns.append(column.with_dtype(dtype))
+
+ self.dtypes = {column.name: column.dtype for column in output_columns}
+ return Schema(output_columns)
| diff --git a/tests/unit/ops/test_reduce_dtype_size.py b/tests/unit/ops/test_reduce_dtype_size.py
new file mode 100644
--- /dev/null
+++ b/tests/unit/ops/test_reduce_dtype_size.py
@@ -0,0 +1,49 @@
+#
+# Copyright (c) 2021, NVIDIA CORPORATION.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import numpy as np
+import pandas as pd
+import pytest
+
+import nvtabular as nvt
+from tests.conftest import assert_eq
+
+try:
+ import cudf
+
+ _CPU = [True, False]
+except ImportError:
+ _CPU = [True]
+
+
[email protected]("cpu", _CPU)
+def test_reduce_size(tmpdir, cpu):
+ df = pd.DataFrame()
+ if not cpu:
+ df = cudf.DataFrame(df)
+
+ df["int16"] = np.array([2 ** 15 - 1, 0], dtype="int64")
+ df["int32"] = np.array([2 ** 30, -(2 ** 30)], dtype="int64")
+ df["int64"] = np.array([2 ** 60, -(2 ** 60)], dtype="int64")
+ df["float32"] = np.array([1.0, 2.0], dtype="float64")
+
+ workflow = nvt.Workflow(list(df.columns) >> nvt.ops.ReduceDtypeSize())
+ transformed = workflow.fit_transform(nvt.Dataset(df)).to_ddf().compute()
+
+ expected = df
+ for column in df:
+ expected[column] = expected[column].astype(column)
+
+ assert_eq(expected, transformed)
| [FEA] Need optimized column dtypes outputted from NVTabular workflow
**Is your feature request related to a problem? Please describe.**
I am trying to do the following with NVTabular via an LambdaOp, which does not work bcs of the error here: https://github.com/NVIDIA-Merlin/NVTabular/issues/1396.
```
def reduce_mem(col):
if np.issubdtype(col.dtype, np.integer):
if (col.min()>-32768)&(col.max()<32768):
col = col.astype('int16')
elif (col.min()>-2147483648)&(col.max()<2147483648):
col = col.astype('int32')
if np.issubdtype(col.dtype, np.float):
col = col.astype('float32')
return col
reduced_cols = cols >> nvt.ops.LambdaOp(reduce_mem)
dataset = nvt.Dataset(data_path, part_size='500MB')
workflow = nvt.Workflow(reduced_cols)
workflow.fit(dataset)
# Write to new "shuffled" and "processed" dataset
workflow.transform(dataset).to_parquet(
OUTPUT_FOLDER,
out_files_per_proc=1,
shuffle=False
)
```
**Describe the solution you'd like**
Reducing the column dtypes is very common practice in DS pipeline, and it should be a feature of NVT to use optimized data types. We should not try to create a LambdaOP or a custom op to integrate that process in our pipeline. It'd be very useful if it comes naturally.
| 2022-02-17T01:00:38 |
|
NVIDIA-Merlin/NVTabular | 1,412 | NVIDIA-Merlin__NVTabular-1412 | [
"1401"
] | d106197f343a5e2eb39a47f03cd220481ef40559 | diff --git a/nvtabular/ops/__init__.py b/nvtabular/ops/__init__.py
--- a/nvtabular/ops/__init__.py
+++ b/nvtabular/ops/__init__.py
@@ -23,6 +23,7 @@
from .column_similarity import ColumnSimilarity
from .data_stats import DataStats
from .difference_lag import DifferenceLag
+from .drop_low_cardinality import DropLowCardinality
from .dropna import Dropna
from .fill import FillMedian, FillMissing
from .filter import Filter
diff --git a/nvtabular/ops/drop_low_cardinality.py b/nvtabular/ops/drop_low_cardinality.py
new file mode 100644
--- /dev/null
+++ b/nvtabular/ops/drop_low_cardinality.py
@@ -0,0 +1,47 @@
+#
+# Copyright (c) 2021, NVIDIA CORPORATION.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from merlin.schema import Schema, Tags
+
+from ..dispatch import DataFrameType, annotate
+from .operator import ColumnSelector, Operator
+
+
+class DropLowCardinality(Operator):
+ """
+ DropLowCardinality drops low cardinality categorical columns. This requires the
+ cardinality of these columns to be known in the schema - for instance by
+ first encoding these columns using Categorify.
+ """
+
+ def __init__(self, min_cardinality=2):
+ super().__init__()
+ self.min_cardinality = min_cardinality
+ self.to_drop = []
+
+ @annotate("drop_low_cardinality", color="darkgreen", domain="nvt_python")
+ def transform(self, col_selector: ColumnSelector, df: DataFrameType) -> DataFrameType:
+ return df.drop(self.to_drop, axis=1)
+
+ def compute_output_schema(self, input_schema, selector, prev_output_schema=None):
+ output_columns = []
+ for col in input_schema:
+ if Tags.CATEGORICAL in col.tags:
+ domain = col.int_domain
+ if domain and domain.max <= self.min_cardinality:
+ self.to_drop.append(col.name)
+ continue
+ output_columns.append(col)
+ return Schema(output_columns)
| diff --git a/tests/unit/ops/test_drop_low_cardinality.py b/tests/unit/ops/test_drop_low_cardinality.py
new file mode 100644
--- /dev/null
+++ b/tests/unit/ops/test_drop_low_cardinality.py
@@ -0,0 +1,50 @@
+#
+# Copyright (c) 2021, NVIDIA CORPORATION.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import pandas as pd
+import pytest
+
+import nvtabular as nvt
+from tests.conftest import assert_eq
+
+try:
+ import cudf
+
+ _CPU = [True, False]
+except ImportError:
+ _CPU = [True]
+
+
[email protected]("cpu", _CPU)
+def test_drop_low_cardinality(tmpdir, cpu):
+ df = pd.DataFrame()
+ if not cpu:
+ df = cudf.DataFrame(df)
+
+ df["col1"] = ["a", "a", "a", "a", "a"]
+ df["col2"] = ["a", "a", "a", "a", "b"]
+ df["col3"] = ["a", "a", "b", "b", "c"]
+
+ features = list(df.columns) >> nvt.ops.Categorify() >> nvt.ops.DropLowCardinality()
+
+ workflow = nvt.Workflow(features)
+ transformed = workflow.fit_transform(nvt.Dataset(df)).to_ddf().compute()
+
+ assert workflow.output_schema.column_names == ["col2", "col3"]
+
+ expected = df.drop(["col1"], axis=1)
+ expected["col2"] = [1, 1, 1, 1, 2]
+ expected["col3"] = [1, 1, 2, 2, 3]
+ assert_eq(transformed, expected)
| [FEA] Create an op to drop unwanted column based on their `nunique` values
**Is your feature request related to a problem? Please describe.**
We are working on a dataset and there are columns where their cardinality is 1. We want to drop these columns (vey common practice). Currently we use a function to do so:
```
def reduce_features(df):
RMV = []
for c in df.columns[1:]:
if df[c].nunique()<=1:
RMV.append(c)
df.drop(RMV,axis=1,inplace=True)
return
```
However, with large dataset we get OOM issue with cuDF so we have to use pandas instead which takes too long. It'd be useful to incorporate an OP in NVTabular.
If there is a way to do it with NVT other than creating a new op , I'd appreciate the feedback. Thanks.
| @rjzamora Hello. I created the FEA above. it is challenging to do that op since it requires calculation of `nunique()`? thanks.
I guess the "challenge" here is that the `fit` method for this op would be doing something very similar to `Categorify.fit` (perhaps the exact same thing), and I don't think there is any obvious way to avoid duplicating the work in NVTabular (yet). Therefore, if you are dropping columns based on unique AND using Categorify, you will be sampling unique values twice. If you will **always** be calling `Categorify` on the columns that are not being dropped by your op, then I suppose you could just add a new option in `Categoify` to drop columns with too few uniques. | 2022-02-25T23:37:52 |
Subsets and Splits