author
int64 658
755k
| date
stringlengths 19
19
| timezone
int64 -46,800
43.2k
| hash
stringlengths 40
40
| message
stringlengths 5
490
| mods
list | language
stringclasses 20
values | license
stringclasses 3
values | repo
stringlengths 5
68
| original_message
stringlengths 12
491
|
---|---|---|---|---|---|---|---|---|---|
234,887 | 13.11.2020 13:46:13 | 0 | 994200955ed22635f32fd8cb9cf6345cf0b4b763 | CKAN plugin: add to_ckan methods | [
{
"change_type": "MODIFY",
"old_path": "frictionless/package.py",
"new_path": "frictionless/package.py",
"diff": "@@ -407,6 +407,17 @@ class Package(Metadata):\nstorage.write_package(self.to_copy(), force=force)\nreturn storage\n+ def to_ckan(self, *, base_url, dataset_id=None, api_key=None, force=False):\n+ return self.to_storage(\n+ system.create_storage(\n+ \"ckan_datastore\",\n+ base_url=base_url,\n+ dataset_id=dataset_id,\n+ api_key=api_key,\n+ ),\n+ force=force,\n+ )\n+\ndef to_sql(self, *, engine, prefix=\"\", namespace=None, force=False):\n\"\"\"Export package to SQL\n"
},
{
"change_type": "MODIFY",
"old_path": "frictionless/resource.py",
"new_path": "frictionless/resource.py",
"diff": "@@ -828,6 +828,17 @@ class Resource(Metadata):\nstorage.write_resource(self.to_copy(), force=force)\nreturn storage\n+ def to_ckan(self, *, base_url, dataset_id=None, api_key=None, force=False):\n+ return self.to_storage(\n+ system.create_storage(\n+ \"ckan_datastore\",\n+ base_url=base_url,\n+ dataset_id=dataset_id,\n+ api_key=api_key,\n+ ),\n+ force=force,\n+ )\n+\ndef to_sql(self, *, engine, prefix=\"\", namespace=None, force=False):\n\"\"\"Export resource to SQL table\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | CKAN plugin: add to_ckan methods |
234,887 | 13.11.2020 13:46:16 | 0 | 988292c3be271585df315b7858735d66e9a005dc | core: make Decimals JSON-serializable
when we call to_dict/to_list with json=True | [
{
"change_type": "MODIFY",
"old_path": "frictionless/row.py",
"new_path": "frictionless/row.py",
"diff": "from itertools import zip_longest\nfrom collections import OrderedDict\n+from decimal import Decimal\nfrom .helpers import cached_property\nfrom .parsers import JsonParser\nfrom . import errors\n@@ -222,6 +223,8 @@ class Row(OrderedDict):\ncell = self[field.name]\nif field.type not in JsonParser.native_types:\ncell, notes = field.write_cell(cell, ignore_missing=True)\n+ if isinstance(cell, Decimal):\n+ cell = float(cell)\nresult[field.name] = cell\nreturn result\nreturn dict(self)\n@@ -241,6 +244,8 @@ class Row(OrderedDict):\ncell = self[field.name]\nif field.type not in JsonParser.native_types:\ncell, notes = field.write_cell(cell, ignore_missing=True)\n+ if isinstance(cell, Decimal):\n+ cell = float(cell)\nresult.append(cell)\nreturn result\nreturn list(self.values())\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_row.py",
"new_path": "tests/test_row.py",
"diff": "+import json\n+from decimal import Decimal\nfrom frictionless import Row, Field, Schema, extract\n@@ -40,6 +42,20 @@ def test_to_list_with_json_null_values_issue_519():\n]\n+def test_decimal_to_json():\n+ row = Row(\n+ [Decimal(\"53.940135311587831\")],\n+ schema=Schema({\"fields\": [{\"name\": \"dec1\", \"type\": \"number\"}]}),\n+ field_positions=[1],\n+ row_position=1,\n+ row_number=1,\n+ )\n+ # all we really want to 'assert' here is that these methods run without throwing\n+ # TypeError: Object of type 'Decimal' is not JSON serializable\n+ assert isinstance(json.dumps(row.to_dict(json=True)), str)\n+ assert isinstance(json.dumps(row.to_list(json=True)), str)\n+\n+\n# Helpers\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | core: make Decimals JSON-serializable
when we call to_dict/to_list with json=True |
234,887 | 13.11.2020 13:46:21 | 0 | f9396c98e39f4598bf12a7700b90277114fd70af | CKAN plugin: implement write_package and write_resource | [
{
"change_type": "MODIFY",
"old_path": "frictionless/plugins/ckan.py",
"new_path": "frictionless/plugins/ckan.py",
"diff": "@@ -184,11 +184,71 @@ class CkanStorage(Storage):\n# Write\n- def write_resource(self, resource, *, force=False, **options):\n- pass\n+ def _write_table(self, table, force=False):\n+ # Check for existence\n+ if table.name in self._read_table_names():\n+ if not force:\n+ note = f'Table \"{table.name}\" already exists'\n+ raise exceptions.FrictionlessException(errors.StorageError(note=note))\n+ self._write_table_remove(table.name)\n- def write_package(self, package, *, force=False, **options):\n- pass\n+ # Define tables\n+ self.__tables[table.name] = table\n+ datastore_dict = self._write_table_convert_table(table)\n+ datastore_url = \"{}/datastore_create\".format(self.__base_endpoint)\n+ self.__make_ckan_request(datastore_url, method=\"POST\", json=datastore_dict)\n+\n+ # Invalidate cache\n+ self.__bucket_cache = None\n+\n+ def _write_table_convert_table(self, table):\n+ schema = table.schema\n+ datastore_dict = {\"fields\": [], \"resource_id\": table.name, \"force\": True}\n+ for field in schema.fields:\n+ datastore_field = {\"id\": field.name}\n+ datastore_type = self._write_table_convert_field_type(field.type)\n+ if datastore_type:\n+ datastore_field[\"type\"] = datastore_type\n+ datastore_dict[\"fields\"].append(datastore_field)\n+ if schema.primary_key is not None:\n+ datastore_dict[\"primary_key\"] = schema.primary_key\n+ return datastore_dict\n+\n+ def _write_table_convert_field_type(self, type):\n+ DESCRIPTOR_TYPE_MAPPING = {\n+ \"number\": \"float\",\n+ \"string\": \"text\",\n+ \"integer\": \"int\",\n+ \"boolean\": \"bool\",\n+ \"object\": \"json\",\n+ \"array\": \"text[]\",\n+ \"geojson\": \"json\",\n+ \"date\": \"text\",\n+ \"time\": \"time\",\n+ \"year\": \"int\",\n+ \"datetime\": \"timestamp\",\n+ }\n+ return DESCRIPTOR_TYPE_MAPPING.get(type, \"text\")\n+\n+ def _write_table_row_stream(self, name, row_stream):\n+ table = self._read_table(name)\n+ datastore_upsert_url = \"{}/datastore_upsert\".format(self.__base_endpoint)\n+ records = [r.to_dict(json=True) for r in row_stream]\n+ params = {\n+ \"resource_id\": table.name,\n+ \"method\": \"insert\",\n+ \"force\": True,\n+ \"records\": records,\n+ }\n+ self.__make_ckan_request(datastore_upsert_url, method=\"POST\", json=params)\n+\n+ def write_resource(self, resource, *, force=False):\n+ self._write_table(resource, force=force)\n+ self._write_table_row_stream(resource.name, resource.read_row_stream())\n+\n+ def write_package(self, package, *, force=False):\n+ for resource in package.resources:\n+ self.write_resource(resource, force=force)\n# Delete\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | CKAN plugin: implement write_package and write_resource |
234,887 | 13.11.2020 13:46:24 | 0 | 8b799ec9ec699f3e649aef972179bf2a7e97ca1e | CKAN plugin: add tests for write methods
add tests for write methods
restructure existing test code | [
{
"change_type": "DELETE",
"old_path": "data/ckan_mock_responses/datastore_delete.json",
"new_path": null,
"diff": "-{\n- \"help\": \"https://demo.ckan.org/api/3/action/help_show?name=datastore_delete\",\n- \"success\": true,\n- \"result\": {\n- \"resource_id\": \"79843e49-7974-411c-8eb5-fb2d1111d707\"\n- }\n-}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "data/ckan_mock_responses/doesnt_matter.json",
"diff": "+{\n+ \"success\": true,\n+ \"comment\": \"Use this mock when it doesn't matter what is in the response. If we're just looking for a 200 OK status and success: true this mock will do.\"\n+}\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | CKAN plugin: add tests for write methods
- add tests for write methods
- restructure existing test code |
234,887 | 13.11.2020 13:46:27 | 0 | c4653e0836f321693c1e8fd7da29cb51f36e5263 | CKAN plugin: add docstrings | [
{
"change_type": "MODIFY",
"old_path": "frictionless/package.py",
"new_path": "frictionless/package.py",
"diff": "@@ -286,6 +286,13 @@ class Package(Metadata):\n@staticmethod\ndef from_ckan(*, base_url, dataset_id, api_key=None):\n+ \"\"\"Import package from CKAN\n+\n+ Parameters:\n+ base_url (str): (required) URL for CKAN instance (e.g: https://demo.ckan.org/ )\n+ dataset_id (str): (required) ID or slug of dataset to fetch\n+ api_key (str): (optional) Your CKAN API key\n+ \"\"\"\nreturn Package.from_storage(\nsystem.create_storage(\n\"ckan_datastore\",\n@@ -408,6 +415,14 @@ class Package(Metadata):\nreturn storage\ndef to_ckan(self, *, base_url, dataset_id=None, api_key=None, force=False):\n+ \"\"\"Export package to CKAN\n+\n+ Parameters:\n+ base_url (str): (required) URL for CKAN instance (e.g: https://demo.ckan.org/ )\n+ dataset_id (str): (optional) ID or slug of dataset this resource belongs to\n+ api_key (str): (optional) Your CKAN API key\n+ force (bool): (optional) overwrite existing data\n+ \"\"\"\nreturn self.to_storage(\nsystem.create_storage(\n\"ckan_datastore\",\n"
},
{
"change_type": "MODIFY",
"old_path": "frictionless/resource.py",
"new_path": "frictionless/resource.py",
"diff": "@@ -637,6 +637,13 @@ class Resource(Metadata):\n@staticmethod\ndef from_ckan(*, base_url, resource_id, api_key=None):\n+ \"\"\"Import resource from CKAN\n+\n+ Parameters:\n+ base_url (str): (required) URL for CKAN instance (e.g: https://demo.ckan.org/ )\n+ resource_id (str): (required) ID of resource to fetch\n+ api_key (str): (optional) Your CKAN API key\n+ \"\"\"\nreturn Resource.from_storage(\nsystem.create_storage(\n\"ckan_datastore\",\n@@ -829,6 +836,14 @@ class Resource(Metadata):\nreturn storage\ndef to_ckan(self, *, base_url, dataset_id=None, api_key=None, force=False):\n+ \"\"\"Export resource to CKAN\n+\n+ Parameters:\n+ base_url (str): (required) URL for CKAN instance (e.g: https://demo.ckan.org/ )\n+ dataset_id (str): (optional) ID or slug of dataset this resource belongs to\n+ api_key (str): (optional) Your CKAN API key\n+ force (bool): (optional) overwrite existing data\n+ \"\"\"\nreturn self.to_storage(\nsystem.create_storage(\n\"ckan_datastore\",\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | CKAN plugin: add docstrings |
234,887 | 13.11.2020 13:46:30 | 0 | 0bdcde39e3c5183613718e3774dc423c9d296463 | CKAN plugin: make delete_resource safer
delete from self.__tables after the datastore_delete request
this way if we throw a HTTPError calling datastore_delete
we won't end up in an inconsistent state | [
{
"change_type": "MODIFY",
"old_path": "frictionless/plugins/ckan.py",
"new_path": "frictionless/plugins/ckan.py",
"diff": "@@ -259,15 +259,15 @@ class CkanStorage(Storage):\nnote = f'Table \"{name}\" does not exist'\nraise exceptions.FrictionlessException(errors.StorageError(note=note))\n- # Remove from table\n- if name in self.__tables:\n- del self.__tables[name]\n-\n# Remove from ckan\ndatastore_delete_url = \"{}/datastore_delete\".format(self.__base_endpoint)\nparams = {\"resource_id\": name, \"force\": True}\nself.__make_ckan_request(datastore_delete_url, method=\"POST\", json=params)\n+ # Remove from table\n+ if name in self.__tables:\n+ del self.__tables[name]\n+\n# Invalidate cache\nself.__bucket_cache = None\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | CKAN plugin: make delete_resource safer
delete from self.__tables after the datastore_delete request
this way if we throw a HTTPError calling datastore_delete
we won't end up in an inconsistent state |
234,912 | 20.11.2020 13:42:43 | -3,600 | 22408d18695ef9576f3413b6aa1b24fe1ee321c9 | Implemented resource.write | [
{
"change_type": "MODIFY",
"old_path": "frictionless/resource.py",
"new_path": "frictionless/resource.py",
"diff": "@@ -21,7 +21,6 @@ from . import config\n# TODO: rework path/data/location etc\n# TODO: rework path/data updates syncing\n-# TODO: implement save/write as we have table.write\nclass Resource(Metadata):\n\"\"\"Resource representation.\n@@ -609,10 +608,23 @@ class Resource(Metadata):\nlookup[source_name][source_key].add(cells)\nreturn lookup\n+ # Write\n+\n+ def write(self, target=None, **options):\n+ \"\"\"Write the resource to the target\n+\n+ Parameters:\n+ target (str): target path\n+ **options: subset of Resource's constructor options\n+ \"\"\"\n+ with self.to_table() as table:\n+ return table.write(target, **options)\n+\n# Import/Export\n@staticmethod\ndef from_source(source, **options):\n+ \"\"\"Create a resource from path OR data\"\"\"\nif source is None:\nreturn Resource(data=[], **options)\nelif isinstance(source, str):\n@@ -623,6 +635,7 @@ class Resource(Metadata):\n@staticmethod\ndef from_petl(storage, *, view, **options):\n+ \"\"\"Create a resource from PETL container\"\"\"\nreturn Resource(data=view, **options)\n@staticmethod\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Implemented resource.write (#537) |
234,912 | 23.11.2020 10:34:08 | -3,600 | b7288a31d68c89266444d39ec0ecb21714119d16 | Fixed text path parsing | [
{
"change_type": "MODIFY",
"old_path": "frictionless/location.py",
"new_path": "frictionless/location.py",
"diff": "@@ -37,6 +37,8 @@ class Location:\ndetect = helpers.detect_source_scheme_and_format(new_source)\nscheme = detect[0] or config.DEFAULT_SCHEME\nformat = detect[1] or config.DEFAULT_FORMAT\n+ if scheme == \"text\" and source.endswith(f\".{format}\"):\n+ source = source[: -(len(format) + 1)]\n# Set attributes\nself.__name = name\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/loaders/test_text.py",
"new_path": "tests/loaders/test_text.py",
"diff": "@@ -9,3 +9,10 @@ def test_table_text():\nwith Table(source, format=\"csv\") as table:\nassert table.header == [\"header1\", \"header2\"]\nassert table.read_data() == [[\"value1\", \"value2\"], [\"value3\", \"value4\"]]\n+\n+\n+def test_table_text_format_in_path():\n+ source = \"text://header1,header2\\nvalue1,value2\\nvalue3,value4.csv\"\n+ with Table(source) as table:\n+ assert table.header == [\"header1\", \"header2\"]\n+ assert table.read_data() == [[\"value1\", \"value2\"], [\"value3\", \"value4\"]]\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Fixed text path parsing (#543) |
234,912 | 25.11.2020 10:09:27 | -3,600 | e813a8e1249aa47d0b776c78f543fb1d6b52eaed | Deduplicate resource names on package.infer | [
{
"change_type": "MODIFY",
"old_path": "frictionless/package.py",
"new_path": "frictionless/package.py",
"diff": "@@ -119,7 +119,7 @@ class Package(Metadata):\n\"\"\"\nreturn self.get(\"profile\", config.DEFAULT_PACKAGE_PROFILE)\n- @Metadata.property(write=False)\n+ @Metadata.property(cache=False, write=False)\ndef hashing(self):\n\"\"\"\nReturns:\n@@ -127,7 +127,7 @@ class Package(Metadata):\n\"\"\"\nreturn self.__hashing\n- @Metadata.property(write=False)\n+ @Metadata.property(cache=False, write=False)\ndef basepath(self):\n\"\"\"\nReturns:\n@@ -135,16 +135,15 @@ class Package(Metadata):\n\"\"\"\nreturn self.__basepath\n- @property\n+ @Metadata.property(cache=False, write=False)\ndef onerror(self):\n\"\"\"\nReturns:\nignore|warn|raise: on error bahaviour\n\"\"\"\n- assert self.__onerror in [\"ignore\", \"warn\", \"raise\"]\nreturn self.__onerror\n- @Metadata.property(write=False)\n+ @Metadata.property(cache=False, write=False)\ndef trusted(self):\n\"\"\"\nReturns:\n@@ -163,7 +162,7 @@ class Package(Metadata):\nresources = self.get(\"resources\", [])\nreturn self.metadata_attach(\"resources\", resources)\n- @Metadata.property(write=False)\n+ @Metadata.property(cache=False, write=False)\ndef resource_names(self):\n\"\"\"\nReturns:\n@@ -273,6 +272,15 @@ class Package(Metadata):\nfor resource in self.resources:\nresource.infer(only_sample=only_sample)\n+ # Deduplicate names\n+ if len(self.resource_names) != len(set(self.resource_names)):\n+ seen_names = []\n+ for index, name in enumerate(self.resource_names):\n+ count = seen_names.count(name) + 1\n+ if count > 1:\n+ self.resources[index].name = \"%s%s\" % (name, count)\n+ seen_names.append(name)\n+\n# Import/Export\n@staticmethod\n"
},
{
"change_type": "MODIFY",
"old_path": "frictionless/resource.py",
"new_path": "frictionless/resource.py",
"diff": "@@ -313,7 +313,7 @@ class Resource(Metadata):\nstats = {\"hash\": \"\", \"bytes\": 0, \"fields\": 0, \"rows\": 0}\nreturn self.metadata_attach(\"stats\", self.get(\"stats\", stats))\n- @Metadata.property(write=False)\n+ @Metadata.property(cache=False, write=False)\ndef basepath(self):\n\"\"\"\nReturns\n@@ -321,7 +321,7 @@ class Resource(Metadata):\n\"\"\"\nreturn self.__basepath\n- @Metadata.property(write=False)\n+ @Metadata.property(cache=False, write=False)\ndef fullpath(self):\n\"\"\"\nReturns\n@@ -333,16 +333,15 @@ class Resource(Metadata):\nreturn \"multipart\"\nreturn self.source\n- @property\n+ @Metadata.property(cache=False, write=False)\ndef onerror(self):\n\"\"\"\nReturns:\nignore|warn|raise: on error bahaviour\n\"\"\"\n- assert self.__onerror in [\"ignore\", \"warn\", \"raise\"]\nreturn self.__onerror\n- @property\n+ @Metadata.property(cache=False, write=False)\ndef trusted(self):\n\"\"\"\nReturns:\n@@ -350,7 +349,7 @@ class Resource(Metadata):\n\"\"\"\nreturn self.__trusted\n- @property\n+ @Metadata.property(cache=False, write=False)\ndef package(self):\n\"\"\"\nReturns:\n"
},
{
"change_type": "MODIFY",
"old_path": "frictionless/schema.py",
"new_path": "frictionless/schema.py",
"diff": "@@ -92,7 +92,7 @@ class Schema(Metadata):\nfields = self.get(\"fields\", [])\nreturn self.metadata_attach(\"fields\", fields)\n- @Metadata.property(write=False)\n+ @Metadata.property(cache=False, write=False)\ndef field_names(self):\n\"\"\"\nReturns:\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_package.py",
"new_path": "tests/test_package.py",
"diff": "@@ -429,6 +429,20 @@ def test_package_infer_empty_file():\nassert package.resources[0].stats[\"bytes\"] == 0\n+def test_package_infer_duplicate_resource_names_issue_530():\n+ package = Package(\n+ resources=[\n+ Resource(path=\"data/chunk1.csv\"),\n+ Resource(path=\"data/chunk2.csv\"),\n+ Resource(path=\"data/tables/chunk1.csv\"),\n+ Resource(path=\"data/tables/chunk2.csv\"),\n+ ]\n+ )\n+ package.infer()\n+ assert len(set(package.resource_names)) == 4\n+ assert package.resource_names == [\"chunk1\", \"chunk2\", \"chunk12\", \"chunk22\"]\n+\n+\n# Import/Export\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Deduplicate resource names on package.infer (#547) |
234,912 | 25.11.2020 10:19:52 | -3,600 | 33666779310fca483ea5a0b48728443a2d3125dd | Slugify resource names on resource.infer | [
{
"change_type": "MODIFY",
"old_path": "frictionless/helpers.py",
"new_path": "frictionless/helpers.py",
"diff": "@@ -9,6 +9,7 @@ import chardet\nimport tempfile\nimport datetime\nimport stringcase\n+from slugify import slugify\nfrom inspect import signature\nfrom importlib import import_module\nfrom urllib.parse import urlparse, parse_qs\n@@ -136,13 +137,14 @@ def compile_regex(items):\nreturn result\n-# TODO: use slugify\ndef detect_name(source):\n+ name = \"memory\"\nif isinstance(source, str) and \"\\n\" not in source:\n- return os.path.splitext(os.path.basename(source))[0]\n- if isinstance(source, list) and source and isinstance(source[0], str):\n- return os.path.splitext(os.path.basename(source[0]))[0]\n- return \"memory\"\n+ name = os.path.splitext(os.path.basename(source))[0]\n+ elif isinstance(source, list) and source and isinstance(source[0], str):\n+ name = os.path.splitext(os.path.basename(source[0]))[0]\n+ name = slugify(name).lower()\n+ return name\ndef detect_basepath(descriptor):\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_resource.py",
"new_path": "tests/test_resource.py",
"diff": "@@ -606,6 +606,13 @@ def test_resource_infer_from_path():\nassert resource.path == \"data/table.csv\"\n+def test_resource_infer_not_slugified_name_issue_531():\n+ resource = Resource()\n+ resource.infer(\"data/Table With Data.csv\")\n+ assert resource.metadata_valid\n+ assert resource.name == \"table-with-data\"\n+\n+\n# Import/Export\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Slugify resource names on resource.infer (#548) |
234,912 | 25.11.2020 11:10:23 | -3,600 | e993bc77b4ad7eacaa8445ec725a966a6e610399 | Force utf-8 for saving metadata json/yaml | [
{
"change_type": "MODIFY",
"old_path": "frictionless/metadata.py",
"new_path": "frictionless/metadata.py",
"diff": "@@ -112,7 +112,9 @@ class Metadata(helpers.ControlledDict):\nself.to_dict(), indent=2, ensure_ascii=False, cls=encoder_class\n)\ntry:\n- with tempfile.NamedTemporaryFile(\"wt\", delete=False) as file:\n+ with tempfile.NamedTemporaryFile(\n+ \"wt\", delete=False, encoding=\"utf-8\"\n+ ) as file:\njson.dump(\nself.to_dict(), file, indent=2, ensure_ascii=False, cls=encoder_class\n)\n@@ -133,7 +135,9 @@ class Metadata(helpers.ControlledDict):\nif not target:\nreturn yaml.dump(helpers.deepsafe(self.to_dict()), Dumper=IndentDumper)\ntry:\n- with tempfile.NamedTemporaryFile(\"wt\", delete=False) as file:\n+ with tempfile.NamedTemporaryFile(\n+ \"wt\", delete=False, encoding=\"utf-8\"\n+ ) as file:\nyaml.dump(helpers.deepsafe(self.to_dict()), file, Dumper=IndentDumper)\nhelpers.move_file(file.name, target)\nexcept Exception as exc:\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Force utf-8 for saving metadata json/yaml (#549) |
234,912 | 03.12.2020 15:34:47 | -3,600 | 495a7d6cff2c2d1ca9f01c7f9ff9f0f46da7e2ba | Fixed Header not being an original one | [
{
"change_type": "MODIFY",
"old_path": "frictionless/header.py",
"new_path": "frictionless/header.py",
"diff": "@@ -21,6 +21,7 @@ class Header(list):\nassert len(field_positions) in (len(cells), len(schema.fields))\n# Set attributes\n+ original_cells = cells.copy()\nfields = schema.fields\nself.__schema = schema\nself.__field_positions = field_positions\n@@ -35,7 +36,7 @@ class Header(list):\nself.__errors.append(\nerrors.ExtraHeaderError(\nnote=\"\",\n- cells=cells,\n+ cells=list(map(str, original_cells)),\ncell=\"\",\nfield_name=\"\",\nfield_number=len(fields) + field_position - start,\n@@ -52,7 +53,7 @@ class Header(list):\nself.__errors.append(\nerrors.MissingHeaderError(\nnote=\"\",\n- cells=list(map(str, cells)),\n+ cells=list(map(str, original_cells)),\ncell=\"\",\nfield_name=field.name,\nfield_number=field_number,\n@@ -71,7 +72,7 @@ class Header(list):\nself.__errors.append(\nerrors.BlankHeaderError(\nnote=\"\",\n- cells=list(map(str, cells)),\n+ cells=list(map(str, original_cells)),\ncell=\"\",\nfield_name=field.name,\nfield_number=field_number,\n@@ -94,7 +95,7 @@ class Header(list):\nself.__errors.append(\nerrors.DuplicateHeaderError(\nnote=note,\n- cells=list(map(str, cells)),\n+ cells=list(map(str, original_cells)),\ncell=str(cells[field_number - 1]),\nfield_name=field.name,\nfield_number=field_number,\n@@ -109,7 +110,7 @@ class Header(list):\nself.__errors.append(\nerrors.NonMatchingHeaderError(\nnote=\"\",\n- cells=list(map(str, cells)),\n+ cells=list(map(str, original_cells)),\ncell=str(cell),\nfield_name=field.name,\nfield_number=field_number,\n@@ -118,7 +119,7 @@ class Header(list):\n)\n# Save header\n- super().__init__(cells)\n+ super().__init__(original_cells)\n@cached_property\ndef schema(self):\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_header.py",
"new_path": "tests/test_header.py",
"diff": "-from frictionless import Header, Field, Schema\n+from frictionless import Header, Field, Schema, Resource\n# General\n@@ -11,6 +11,22 @@ def test_basic():\nassert header == [\"field1\", \"field2\", \"field3\"]\n+def test_extra_header():\n+ schema = Schema(fields=[Field(name=\"id\")])\n+ resource = Resource(path=\"data/table.csv\", schema=schema)\n+ header = resource.read_header()\n+ assert header == [\"id\", \"name\"]\n+ assert header.valid is False\n+\n+\n+def test_missing_header():\n+ schema = Schema(fields=[Field(name=\"id\"), Field(name=\"name\"), Field(name=\"extra\")])\n+ resource = Resource(path=\"data/table.csv\", schema=schema)\n+ header = resource.read_header()\n+ assert header == [\"id\", \"name\"]\n+ assert header.valid is False\n+\n+\n# Helpers\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Fixed Header not being an original one (#572) |
234,912 | 06.12.2020 10:36:43 | -3,600 | c58486a07ac4fbc3a4754962663026a8b7de00d8 | Drafted support for writing Multipart Data | [
{
"change_type": "MODIFY",
"old_path": "docs/build/working-with-multipart/README.ipynb",
"new_path": "docs/build/working-with-multipart/README.ipynb",
"diff": "\"execution_count\": 1,\n\"metadata\": {\n\"execution\": {\n- \"iopub.execute_input\": \"2020-12-04T10:07:28.430573Z\",\n- \"iopub.status.busy\": \"2020-12-04T10:07:28.429292Z\",\n- \"iopub.status.idle\": \"2020-12-04T10:07:29.028754Z\",\n- \"shell.execute_reply\": \"2020-12-04T10:07:29.027719Z\"\n+ \"iopub.execute_input\": \"2020-12-06T09:33:14.033181Z\",\n+ \"iopub.status.busy\": \"2020-12-06T09:33:14.032286Z\",\n+ \"iopub.status.idle\": \"2020-12-06T09:33:14.630440Z\",\n+ \"shell.execute_reply\": \"2020-12-06T09:33:14.629717Z\"\n}\n},\n\"outputs\": [\n\"source\": [\n\"## Writing Multipart Data\\n\",\n\"\\n\",\n- \"> Not supported\\n\",\n+ \"The actual for writing:\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"```py\\n\",\n+ \"from frictionless import Resource\\n\",\n\"\\n\",\n+ \"resource = Resource(path='data/table.json')\\n\",\n+ \"resource.write('tmp/table{number}.json', scheme=\\\"multipart\\\", control={\\\"chunkSize\\\": 1000000})\\n\",\n+ \"```\\n\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n\"## Configuring Local Data\\n\",\n\"\\n\",\n- \"> Not supported\"\n+ \"There is a control to configure how Frictionless read files using this scheme. For example:\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"```py\\n\",\n+ \"from frictionless import Resource\\n\",\n+ \"from frictionless.plugins.multipart import MultipartControl\\n\",\n+ \"\\n\",\n+ \"control = MultipartControl(chunk_size=1000000)\\n\",\n+ \"resource = Resource(data=[['id', 'name'], [1, 'english'], [2, 'german']])\\n\",\n+ \"resource.write('tmp/table{number}.json', scheme=\\\"multipart\\\", control=control)\\n\",\n+ \"```\\n\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"References:\\n\",\n+ \"- [Multipart Control](https://frictionlessdata.io/tooling/python/schemes-reference/#multipart)\"\n]\n}\n],\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/build/working-with-multipart/README.md",
"new_path": "docs/build/working-with-multipart/README.md",
"diff": "@@ -21,8 +21,29 @@ print(resource.read_rows())\n## Writing Multipart Data\n-> Not supported\n+The actual for writing:\n+\n+```py\n+from frictionless import Resource\n+\n+resource = Resource(path='data/table.json')\n+resource.write('tmp/table{number}.json', scheme=\"multipart\", control={\"chunkSize\": 1000000})\n+```\n+\n## Configuring Local Data\n-> Not supported\n\\ No newline at end of file\n+There is a control to configure how Frictionless read files using this scheme. For example:\n+\n+```py\n+from frictionless import Resource\n+from frictionless.plugins.multipart import MultipartControl\n+\n+control = MultipartControl(chunk_size=1000000)\n+resource = Resource(data=[['id', 'name'], [1, 'english'], [2, 'german']])\n+resource.write('tmp/table{number}.json', scheme=\"multipart\", control=control)\n+```\n+\n+\n+References:\n+- [Multipart Control](https://frictionlessdata.io/tooling/python/schemes-reference/#multipart)\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/working-with-multipart.md",
"new_path": "docs/working-with-multipart.md",
"diff": "@@ -17,8 +17,27 @@ print(resource.read_rows())\n## Writing Multipart Data\n-> Not supported\n+The actual for writing:\n+\n+```py\n+from frictionless import Resource\n+\n+resource = Resource(path='data/table.json')\n+resource.write('tmp/table{number}.json', scheme=\"multipart\", control={\"chunkSize\": 1000000})\n+```\n## Configuring Local Data\n-> Not supported\n+There is a control to configure how Frictionless read files using this scheme. For example:\n+\n+```py\n+from frictionless import Resource\n+from frictionless.plugins.multipart import MultipartControl\n+\n+control = MultipartControl(chunk_size=1000000)\n+resource = Resource(data=[['id', 'name'], [1, 'english'], [2, 'german']])\n+resource.write('tmp/table{number}.json', scheme=\"multipart\", control=control)\n+```\n+\n+References:\n+- [Multipart Control](https://frictionlessdata.io/tooling/python/schemes-reference/#multipart)\n"
},
{
"change_type": "MODIFY",
"old_path": "frictionless/config.py",
"new_path": "frictionless/config.py",
"diff": "@@ -51,6 +51,7 @@ DEFAULT_INFER_VOLUME = 100\nDEFAULT_INFER_CONFIDENCE = 0.9\nDEFAULT_INFER_ENCODING_VOLUME = 10000\nDEFAULT_INFER_ENCODING_CONFIDENCE = 0.5\n+DEFAULT_MULTIPART_CHUNK_SIZE = 100000000\nDEFAULT_RESOURCE_PROFILE = \"data-resource\"\nDEFAULT_PACKAGE_PROFILE = \"data-package\"\nDEFAULT_TRUE_VALUES = [\"true\", \"True\", \"TRUE\", \"1\"]\n"
},
{
"change_type": "MODIFY",
"old_path": "frictionless/plugins/aws.py",
"new_path": "frictionless/plugins/aws.py",
"diff": "@@ -55,7 +55,6 @@ class S3Control(Control):\n@property\ndef endpoint_url(self):\n- super().expand()\nreturn (\nself.get(\"endpointUrl\")\nor os.environ.get(\"S3_ENDPOINT_URL\")\n@@ -66,6 +65,7 @@ class S3Control(Control):\ndef expand(self):\n\"\"\"Expand metadata\"\"\"\n+ super().expand()\nself.setdefault(\"endpointUrl\", self.endpoint_url)\n# Metadata\n"
},
{
"change_type": "MODIFY",
"old_path": "frictionless/plugins/multipart.py",
"new_path": "frictionless/plugins/multipart.py",
"diff": "-from ..exception import FrictionlessException\n+import tempfile\nfrom ..resource import Resource\nfrom ..control import Control\nfrom ..plugin import Plugin\nfrom ..loader import Loader\nfrom ..system import system\n-from .. import errors\n+from .. import helpers\n+from .. import config\n# Plugin\n@@ -46,7 +47,33 @@ class MultipartControl(Control):\n\"\"\"\n- pass\n+ def __init__(\n+ self, descriptor=None, chunk_size=None, newline=None, detect_encoding=None\n+ ):\n+ self.setinitial(\"chunkSize\", chunk_size)\n+ super().__init__(descriptor, newline=newline, detect_encoding=detect_encoding)\n+\n+ @property\n+ def chunk_size(self):\n+ return self.get(\"chunkSize\", config.DEFAULT_MULTIPART_CHUNK_SIZE)\n+\n+ # Expand\n+\n+ def expand(self):\n+ \"\"\"Expand metadata\"\"\"\n+ super().expand()\n+ self.setdefault(\"chunkSize\", self.chunk_size)\n+\n+ # Metadata\n+\n+ metadata_profile = { # type: ignore\n+ \"type\": \"object\",\n+ \"properties\": {\n+ \"chunkSize\": {\"type\": \"number\"},\n+ \"newline\": {\"type\": \"string\"},\n+ \"detectEncoding\": {},\n+ },\n+ }\n# Loader\n@@ -66,15 +93,26 @@ class MultipartLoader(Loader):\ndef read_byte_stream_create(self):\nsource = self.resource.source\nremote = self.resource.remote\n+ # TODO: review\nheadless = self.resource.get(\"dialect\", {}).get(\"header\") is False\n+ headless = headless or self.resource.format != \"csv\"\nbyte_stream = MultipartByteStream(source, remote=remote, headless=headless)\nreturn byte_stream\n# Write\n+ # TODO: raise an exception for csv/header situation?\ndef write_byte_stream_save(self, byte_stream):\n- error = errors.SchemeError(note=\"Writing to Multipart Data is not supported\")\n- raise FrictionlessException(error)\n+ number = 0\n+ while True:\n+ bytes = byte_stream.read(self.resource.control.chunk_size)\n+ if not bytes:\n+ break\n+ number += 1\n+ path = self.resource.source.format(number=number)\n+ with tempfile.NamedTemporaryFile(delete=False) as file:\n+ file.write(bytes)\n+ helpers.move_file(file.name, path)\n# Internal\n@@ -126,6 +164,7 @@ class MultipartByteStream:\nassert offset == 0\nself.__line_stream = self.read_line_stream()\n+ # TODO: review\ndef read(self, size):\nres = b\"\"\nwhile True:\n@@ -137,12 +176,11 @@ class MultipartByteStream:\nbreak\nreturn res\n+ # TODO: review (this situation with header/no-header/skipping like is not yet clear)\ndef read_line_stream(self):\nfor number, path in enumerate(self.__path, start=1):\n- with system.create_loader(Resource(path=path)) as loader:\n+ with system.create_loader(Resource(path=path, trusted=True)) as loader:\nfor line_number, line in enumerate(loader.byte_stream, start=1):\n- if not line.endswith(b\"\\n\"):\n- line += b\"\\n\"\nif not self.__headless and number > 1 and line_number == 1:\ncontinue\nyield line\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Drafted support for writing Multipart Data (#583) |
234,912 | 07.12.2020 09:52:01 | -3,600 | f2d4dc20bbc2641862ec015b6fd4a02a5dee0576 | Improved error message for bad JSON tabular data | [
{
"change_type": "MODIFY",
"old_path": "frictionless/plugins/json.py",
"new_path": "frictionless/plugins/json.py",
"diff": "@@ -164,8 +164,8 @@ class JsonParser(Parser):\ntry:\nyield next(parser.data_stream)\nexcept StopIteration:\n- error = errors.SourceError(note=\"cannot extract tabular data from JSON\")\n- raise FrictionlessException(error)\n+ note = f'cannot extract JSON tabular data from \"{self.resource.source}\"'\n+ raise FrictionlessException(errors.SourceError(note=note))\nif parser.resource.dialect.keyed:\ndialect[\"keyed\"] = True\nyield from parser.data_stream\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Improved error message for bad JSON tabular data (#585) |
234,912 | 07.01.2021 17:19:50 | -10,800 | 5bd794179dcbaab080e1e0d0e692b6c875e44d43 | Fixed schema as a string validation | [
{
"change_type": "MODIFY",
"old_path": "frictionless/resource.py",
"new_path": "frictionless/resource.py",
"diff": "@@ -1485,8 +1485,8 @@ class Resource(Metadata):\nmetadata_duplicate = True\nmetadata_Error = errors.ResourceError\nmetadata_profile = deepcopy(config.RESOURCE_PROFILE)\n- metadata_profile[\"properties\"][\"dialect\"] = {\"type\": \"object\"}\n- metadata_profile[\"properties\"][\"schema\"] = {\"type\": \"object\"}\n+ metadata_profile[\"properties\"][\"dialect\"] = {\"type\": [\"string\", \"object\"]}\n+ metadata_profile[\"properties\"][\"schema\"] = {\"type\": [\"string\", \"object\"]}\ndef metadata_process(self):\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/validate/test_package.py",
"new_path": "tests/validate/test_package.py",
"diff": "@@ -151,6 +151,13 @@ def test_validate_package_with_nopool():\n]\n+def test_validate_package_with_schema_as_string():\n+ report = validate(\n+ {\"resources\": [{\"path\": \"data/table.csv\", \"schema\": \"data/schema.json\"}]}\n+ )\n+ assert report.valid\n+\n+\n# Checksum\nDESCRIPTOR_SH = {\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/validate/test_resource.py",
"new_path": "tests/validate/test_resource.py",
"diff": "@@ -53,6 +53,11 @@ def test_validate_invalid_table():\n]\n+def test_validate_resource_with_schema_as_string():\n+ report = validate({\"path\": \"data/table.csv\", \"schema\": \"data/schema.json\"})\n+ assert report.valid\n+\n+\n# Integrity\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Fixed schema as a string validation (#611) |
234,887 | 18.01.2021 10:17:42 | 0 | 77fb6394be7110aa73de02ed573148996bdd0e1a | Corrections to sql docs | [
{
"change_type": "MODIFY",
"old_path": "docs/working-with-sql.md",
"new_path": "docs/working-with-sql.md",
"diff": "@@ -15,7 +15,7 @@ You can read SQL database:\n```py\nfrom frictionless import Package\n-package = Package.from_pandas(url='postgresql://mydatabase')\n+package = Package.from_sql(url='postgresql://mydatabase')\nprint(package)\nfor resource in package.resources:\nprint(resource.read_rows())\n@@ -31,7 +31,7 @@ You can write SQL databases:\nfrom frictionless import Package\npackage = Package('path/to/datapackage.json')\n-package.to_spss(utl='postgresql://mydatabase')\n+package.to_sql(utl='postgresql://mydatabase')\n```\n## Configuring SQL\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Corrections to sql docs (#617) |
234,900 | 25.01.2021 04:19:35 | 10,800 | e6b0e8b6527f046aa325a355ec1386110962a813 | Explicit the relationship in introduction guide
This is a suggestion. The first time I read, it was not clear that neighbor_id was a self reference to id. | [
{
"change_type": "MODIFY",
"old_path": "docs/guides/introduction-guide.md",
"new_path": "docs/guides/introduction-guide.md",
"diff": "@@ -18,7 +18,7 @@ $ cat data/countries.csv\n5\n-As we can see, it's a data containing information about European countries and their populations. Also, it's easy to notice that there are two fields having a relationship based on a country's identifier.\n+As we can see, it's a data containing information about European countries and their populations. Also, it's easy to notice that there are two fields having a relationship based on a country's identifier: neighbor_id is a Foreign Key to id.\n## Describing Data\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Explicit the relationship in introduction guide (#623)
This is a suggestion. The first time I read, it was not clear that neighbor_id was a self reference to id. |
234,900 | 25.01.2021 04:21:31 | 10,800 | 5bf224fbd5d58fef18f2b19015fc7e293356ca67 | Fixed snippet in introduction guide
In the version 3.48 the method read_row_stream() does not exist. read_rows works though. | [
{
"change_type": "MODIFY",
"old_path": "docs/guides/introduction-guide.md",
"new_path": "docs/guides/introduction-guide.md",
"diff": "@@ -266,7 +266,7 @@ from frictionless import Resource, Table\ndef source():\nresource = Resource(\"tmp/countries.resource.yaml\", basepath='.')\n- for row in resource.read_row_stream():\n+ for row in resource.read_rows():\nif row[\"name\"] == \"France\":\nrow[\"population\"] = 67\nif row[\"name\"] == \"Germany\":\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Fixed snippet in introduction guide (#622)
In the version 3.48 the method read_row_stream() does not exist. read_rows works though. |
234,907 | 26.01.2021 12:44:25 | 0 | 39bacccecd6478b162cf2024f7284721e752b10f | Update introduction-guide.md
'To tell you more' didn't quite seem idiomatic to my ears. I've introduced a minor change to smooth it out. | [
{
"change_type": "MODIFY",
"old_path": "docs/guides/introduction-guide.md",
"new_path": "docs/guides/introduction-guide.md",
"diff": "title: Introduction Guide\n---\n-Let's say we have a few raw data files. It's been just collected by the data researchers, and the quality of data is not yet perfect. To tell you more, they haven't even removed the comments from the first row!\n+Let's say we have a few raw data files. It's been just collected by the data researchers, and the quality of data is still far from perfect. In fact, they haven't even removed the comments from the first row!\n```bash\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Update introduction-guide.md (#634)
'To tell you more' didn't quite seem idiomatic to my ears. I've introduced a minor change to smooth it out. |
234,907 | 01.02.2021 06:36:47 | 0 | f548e36ee1dfe671c2840513064c0a65d6d7e02d | Fixed API references overview
For some reason a bunch of 'c's had dropped out of the instances of the word 'Reference' here. I've added them back in. | [
{
"change_type": "MODIFY",
"old_path": "docs/references/references-overview.md",
"new_path": "docs/references/references-overview.md",
"diff": "@@ -8,23 +8,23 @@ In this section you can find detailed references for:\nList of options for schemes and formats:\n-- [Schemes Referene](schemes-reference.md)\n+- [Schemes Reference](schemes-reference.md)\n- [Formats Reference](formats-reference.md)\n## Plugins\nList of Frictionless Framework plugins and their status:\n-- [Plugins Referene](plugins-reference.md)\n+- [Plugins Reference](plugins-reference.md)\n## Errors\nList of Frictionless Framework errors\n-- [Errors Referene](errors-reference.md)\n+- [Errors Reference](errors-reference.md)\n## API\nFull API reference\n-- [API Referene](api-reference.md)\n+- [API Reference](api-reference.md)\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Fixed API references overview (#640)
For some reason a bunch of 'c's had dropped out of the instances of the word 'Reference' here. I've added them back in. |
234,912 | 10.02.2021 09:35:05 | -10,800 | 1635922f2dafebf8d6002949783cc2b0804a2848 | Fixed encoding detection | [
{
"change_type": "MODIFY",
"old_path": "frictionless/detector.py",
"new_path": "frictionless/detector.py",
"diff": "+import codecs\nimport chardet\nfrom copy import copy, deepcopy\nfrom .exception import FrictionlessException\n@@ -98,7 +99,7 @@ class Detector:\n# Detect\n- def detect_encoding(self, buffer):\n+ def detect_encoding(self, buffer, *, encoding=None):\n\"\"\"Detect encoding from buffer\nParameters:\n@@ -107,8 +108,13 @@ class Detector:\nReturns:\nstr: encoding\n\"\"\"\n+\n+ # Use defined\nif self.__encoding_function:\nreturn self.__encoding_function(buffer)\n+\n+ # Detect encoding\n+ if not encoding:\nresult = chardet.detect(buffer)\nencoding = result[\"encoding\"] or config.DEFAULT_ENCODING\nconfidence = result[\"confidence\"] or 0\n@@ -116,6 +122,23 @@ class Detector:\nencoding = config.DEFAULT_ENCODING\nif encoding == \"ascii\":\nencoding = config.DEFAULT_ENCODING\n+ if encoding is None:\n+ encoding = self.resource.detector.detect_encoding(buffer)\n+\n+ # Normalize encoding\n+ encoding = codecs.lookup(encoding).name\n+ # Work around for incorrect inferion of utf-8-sig encoding\n+ if encoding == \"utf-8\":\n+ if buffer.startswith(codecs.BOM_UTF8):\n+ encoding = \"utf-8-sig\"\n+ # Use the BOM stripping name (without byte-order) for UTF-16 encodings\n+ elif encoding == \"utf-16-be\":\n+ if buffer.startswith(codecs.BOM_UTF16_BE):\n+ encoding = \"utf-16\"\n+ elif encoding == \"utf-16-le\":\n+ if buffer.startswith(codecs.BOM_UTF16_LE):\n+ encoding = \"utf-16\"\n+\nreturn encoding\ndef detect_layout(self, sample, *, layout=None):\n"
},
{
"change_type": "MODIFY",
"old_path": "frictionless/loader.py",
"new_path": "frictionless/loader.py",
"diff": "import io\nimport os\nimport gzip\n-import codecs\nimport shutil\nimport atexit\nimport hashlib\n@@ -83,6 +82,8 @@ class Loader:\nReturns:\nio.TextStream: resource text stream\n\"\"\"\n+ if not self.__text_stream:\n+ self.__text_stream = self.read_text_stream()\nreturn self.__text_stream\n# Open/Close\n@@ -92,7 +93,6 @@ class Loader:\nself.close()\ntry:\nself.__byte_stream = self.read_byte_stream()\n- self.__text_stream = self.read_text_stream()\nreturn self\nexcept Exception:\nself.close()\n@@ -123,14 +123,20 @@ class Loader:\n\"\"\"\ntry:\nbyte_stream = self.read_byte_stream_create()\n- byte_stream = self.read_byte_stream_infer_stats(byte_stream)\n+ byte_stream = self.read_byte_stream_process(byte_stream)\nbyte_stream = self.read_byte_stream_decompress(byte_stream)\n- except IOError as exception:\n- error = errors.SchemeError(note=str(exception))\n- raise FrictionlessException(error)\n+ buffer = self.read_byte_stream_buffer(byte_stream)\n+ self.read_byte_stream_analyze(buffer)\n+ self.__buffer = buffer\n+ except (LookupError, UnicodeDecodeError) as exception:\n+ error = errors.EncodingError(note=str(exception))\n+ raise FrictionlessException(error) from exception\nexcept config.COMPRESSION_EXCEPTIONS as exception:\nerror = errors.CompressionError(note=str(exception))\nraise FrictionlessException(error)\n+ except IOError as exception:\n+ error = errors.SchemeError(note=str(exception))\n+ raise FrictionlessException(error)\nreturn byte_stream\ndef read_byte_stream_create(self):\n@@ -141,8 +147,8 @@ class Loader:\n\"\"\"\nraise NotImplementedError()\n- def read_byte_stream_infer_stats(self, byte_stream):\n- \"\"\"Infer byte stream stats\n+ def read_byte_stream_process(self, byte_stream):\n+ \"\"\"Process byte stream\nParameters:\nbyte_stream (io.ByteStream): resource byte stream\n@@ -150,8 +156,6 @@ class Loader:\nReturns:\nio.ByteStream: resource byte stream\n\"\"\"\n- if not self.resource.get(\"stats\"):\n- return byte_stream\nreturn ByteStreamWithStatsHandling(byte_stream, resource=self.resource)\ndef read_byte_stream_decompress(self, byte_stream):\n@@ -209,59 +213,40 @@ class Loader:\nnote = f'compression \"{self.resource.compression}\" is not supported'\nraise FrictionlessException(errors.CompressionError(note=note))\n- def read_text_stream(self):\n- \"\"\"Read text stream\n+ def read_byte_stream_buffer(self, byte_stream):\n+ \"\"\"Buffer byte stream\n+\n+ Parameters:\n+ byte_stream (io.ByteStream): resource byte stream\nReturns:\n- io.TextStream: resource text stream\n+ bytes: buffer\n\"\"\"\n- try:\n- self.read_text_stream_infer_encoding(self.byte_stream)\n- except (LookupError, UnicodeDecodeError) as exception:\n- error = errors.EncodingError(note=str(exception))\n- raise FrictionlessException(error) from exception\n- return self.read_text_stream_decode(self.byte_stream)\n+ buffer = byte_stream.read(self.resource.detector.buffer_size)\n+ buffer = buffer[: self.resource.detector.buffer_size]\n+ byte_stream.seek(0)\n+ return buffer\n- def read_text_stream_infer_encoding(self, byte_stream):\n- \"\"\"Infer text stream encoding\n+ def read_byte_stream_analyze(self, buffer):\n+ \"\"\"Detect metadta using sample\nParameters:\n- byte_stream (io.ByteStream): resource byte stream\n+ buffer (bytes): byte buffer\n\"\"\"\n# We don't need a default encoding\nencoding = self.resource.get(\"encoding\")\n- buffer = byte_stream.read(self.resource.detector.buffer_size)\n- buffer = buffer[: self.resource.detector.buffer_size]\n- byte_stream.seek(0)\n- if encoding is None:\n- encoding = self.resource.detector.detect_encoding(buffer)\n- encoding = codecs.lookup(encoding).name\n- # Work around for incorrect inferion of utf-8-sig encoding\n- if encoding == \"utf-8\":\n- if buffer.startswith(codecs.BOM_UTF8):\n- encoding = \"utf-8-sig\"\n- # Use the BOM stripping name (without byte-order) for UTF-16 encodings\n- elif encoding == \"utf-16-be\":\n- if buffer.startswith(codecs.BOM_UTF16_BE):\n- encoding = \"utf-16\"\n- elif encoding == \"utf-16-le\":\n- if buffer.startswith(codecs.BOM_UTF16_LE):\n- encoding = \"utf-16\"\n+ encoding = self.resource.detector.detect_encoding(buffer, encoding=encoding)\nself.resource.encoding = encoding\n- self.__buffer = buffer\n- def read_text_stream_decode(self, byte_stream):\n- \"\"\"Decode text stream\n-\n- Parameters:\n- byte_stream (io.ByteStream): resource byte stream\n+ def read_text_stream(self):\n+ \"\"\"Read text stream\nReturns:\n- text_stream (io.TextStream): resource text stream\n+ io.TextStream: resource text stream\n\"\"\"\n# NOTE: this solution might be improved using parser properties\nnewline = \"\" if self.resource.format == \"csv\" else None\n- return io.TextIOWrapper(byte_stream, self.resource.encoding, newline=newline)\n+ return io.TextIOWrapper(self.byte_stream, self.resource.encoding, newline=newline)\n# Write\n"
},
{
"change_type": "MODIFY",
"old_path": "frictionless/resource.py",
"new_path": "frictionless/resource.py",
"diff": "@@ -715,7 +715,6 @@ class Resource(Metadata):\nself[\"scheme\"] = self.scheme\nself[\"format\"] = self.format\nself[\"hashing\"] = self.hashing\n- self[\"encoding\"] = self.encoding\nif self.innerpath:\nself[\"innerpath\"] = self.innerpath\nif self.compression:\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/plugins/test_remote.py",
"new_path": "tests/plugins/test_remote.py",
"diff": "@@ -18,7 +18,6 @@ def test_remote_loader():\n]\[email protected](reason=\"encoding\")\[email protected]\ndef test_remote_loader_latin1():\n# Github returns wrong encoding `utf-8`\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_package.py",
"new_path": "tests/test_package.py",
"diff": "@@ -697,7 +697,6 @@ def test_package_infer_multiple_paths():\nassert package.resources[1].path == \"data2.csv\"\[email protected](reason=\"encoding\")\ndef test_package_infer_non_utf8_file():\npackage = Package(\"data/table-with-accents.csv\")\npackage.infer()\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Fixed encoding detection (#672) |
234,912 | 10.02.2021 09:42:38 | -10,800 | f47d9c7c821aa0f85f84333c36e56d50a8f0c697 | Fixed resource.read_bytes | [
{
"change_type": "MODIFY",
"old_path": "frictionless/resource.py",
"new_path": "frictionless/resource.py",
"diff": "@@ -632,11 +632,10 @@ class Resource(Metadata):\ngen<bytes>?: byte stream\n\"\"\"\nif not self.closed:\n- loader = self.__loader\n- if not loader:\n- loader = system.create_loader(self)\n- loader.open()\n- return loader.byte_stream\n+ if not self.__loader:\n+ self.__loader = system.create_loader(self)\n+ self.__loader.open()\n+ return self.__loader.byte_stream\n@property\ndef text_stream(self):\n@@ -646,11 +645,10 @@ class Resource(Metadata):\ngen<str[]>?: text stream\n\"\"\"\nif not self.closed:\n- loader = self.__loader\n- if not loader:\n- loader = system.create_loader(self)\n- loader.open()\n- return loader.text_stream\n+ if not self.__loader:\n+ self.__loader = system.create_loader(self)\n+ self.__loader.open()\n+ return self.__loader.text_stream\n@property\ndef list_stream(self):\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_resource.py",
"new_path": "tests/test_resource.py",
"diff": "@@ -102,7 +102,6 @@ def test_resource_from_path_remote_error_bad_path():\nassert error.note.count(\"bad.json\")\[email protected](reason=\"ValueError: read length must be positive (Python 3.6)\")\[email protected](helpers.is_platform(\"windows\"), reason=\"It doesn't work for Windows\")\ndef test_resource_source_non_tabular():\npath = \"data/text.txt\"\n@@ -148,7 +147,6 @@ def test_resource_source_non_tabular_error_bad_path():\nassert error.note.count(\"data/bad.txt\")\[email protected]\[email protected](helpers.is_platform(\"windows\"), reason=\"It doesn't work for Windows\")\ndef test_resource_source_path():\npath = \"data/table.csv\"\n@@ -2363,7 +2361,6 @@ def test_resource_reopen_generator():\n# Read\[email protected](reason=\"ValueError: read length must be positive (Python 3.6)\")\[email protected](helpers.is_platform(\"windows\"), reason=\"It doesn't work for Windows\")\ndef test_resource_read_bytes():\nresource = Resource(path=\"data/text.txt\")\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Fixed resource.read_bytes (#673) |
234,912 | 10.02.2021 18:41:22 | -10,800 | 7e8268a371fd70846281346f3d09c13ac877b2fe | Fixed steps.diff/join tests | [
{
"change_type": "MODIFY",
"old_path": "frictionless/steps/table.py",
"new_path": "frictionless/steps/table.py",
"diff": "@@ -376,9 +376,8 @@ class table_normalize(Step):\ndef transform_resource(self, resource):\nwith resource:\n- for number, row in enumerate(resource.row_stream, start=1):\n- if number == 1:\n- yield row.field_names\n+ yield resource.header.to_list()\n+ for row in resource.row_stream:\nyield row.to_list()\n# Metadata\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/steps/test_table.py",
"new_path": "tests/steps/test_table.py",
"diff": "@@ -114,21 +114,19 @@ def test_step_table_diff():\n]\[email protected](reason=\"steps.diff/join\")\ndef test_step_table_diff_with_ignore_order():\nsource = Resource(path=\"data/transform.csv\")\nsource.infer()\ntarget = transform(\nsource,\nsteps=[\n- steps.table_normalize(),\nsteps.table_diff(\nresource=Resource(\ndata=[\n[\"name\", \"id\", \"population\"],\n- [\"germany\", 1, 83],\n- [\"france\", 2, 50],\n- [\"spain\", 3, 47],\n+ [\"germany\", \"1\", \"83\"],\n+ [\"france\", \"2\", \"50\"],\n+ [\"spain\", \"3\", \"47\"],\n]\n),\nignore_order=True,\n@@ -393,15 +391,13 @@ def test_step_table_join_mode_cross():\n]\[email protected](reason=\"steps.diff/join\")\ndef test_step_table_join_mode_negate():\nsource = Resource(path=\"data/transform.csv\")\ntarget = transform(\nsource,\nsteps=[\n- steps.table_normalize(),\nsteps.table_join(\n- resource=Resource(data=[[\"id\", \"note\"], [1, \"beer\"], [4, \"rum\"]]),\n+ resource=Resource(data=[[\"id\", \"note\"], [\"1\", \"beer\"], [\"4\", \"rum\"]]),\nmode=\"negate\",\n),\n],\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Fixed steps.diff/join tests (#678) |
234,917 | 10.02.2021 23:42:52 | 21,600 | 597b5d739d02843b674171905142697189a0cc2a | Improved text of describe doc | [
{
"change_type": "MODIFY",
"old_path": "docs/guides/describing-data.md",
"new_path": "docs/guides/describing-data.md",
"diff": "@@ -17,25 +17,25 @@ In other words, \"describing data\" means creating metadata for your data files. T\n$ pip install frictionless\n```\n-For a dataset, there is even more information that can be provided like general dataset purpose, information about data sources, list of authors, and many more. Of course, when there are many tabular files, relational rules can be very important. Usually, there are foreign keys ensuring the integrity of the dataset; for example, there is some reference table containing country names and other tables using it as a reference. Data in this form is called \"normalized data\" and it occurs very often in scientific and another kind of research.\n+For a dataset, there is even more information that can be provided, like the general purpose of a dataset, information about data sources, list of authors, and more. Of course, when there are many tabular files, relational rules can be very important. Usually, there are foreign keys ensuring the integrity of the dataset; for example, think of a reference table containing country names and other tables using it as a reference. Data in this form is called \"normalized data\" and it occurs very often in scientific and other kinds of research.\n-Having a general understanding of what is \"data describing\", we can now articulate why it's important:\n-- **data validation**; metadata helps to reveal problems in your data on the early stages of your workflow\n-- **data publication**; metadata provides additional information that your data can't include\n+Now that we have a general understanding of what \"describing data\" is, we can now articulate why it's important:\n+- **data validation**: metadata helps to reveal problems in your data during early stages of your workflow\n+- **data publication**: metadata provides additional information that your data doesn't include\n-There are not the only two pros of having metadata but they are two the most important. Please continue reading to learn how Frictionless helps to achieve these advantages describing your data.\n+These are not the only positives of having metadata, but they are two of the most important. Please continue reading to learn how Frictionless helps to achieve these advantages by describing your data.\n## Describe Functions\n-The `describe` functions are the main tool for data describing. In many cases, this high-level interface is enough for data exploration and other needs.\n+The `describe` functions are the main Frictionless tool for describing data. In many cases, this high-level interface is enough for data exploration and other needs.\nThe frictionless framework provides 4 different `describe` functions in Python:\n-- `describe`: it will detect the source type and return Data Resource or Data Package metadata\n-- `describe_schema`: it will always return Table Schema metadata\n-- `describe_resource`: it will always return Data Resource metadata\n-- `describe_package`: it will always return Data Package metadata\n+- `describe`: detects the source type and returns Data Resource or Data Package metadata\n+- `describe_schema`: always returns Table Schema metadata\n+- `describe_resource`: always returns Data Resource metadata\n+- `describe_package`: always returns Data Package metadata\n-In command-line, there is only 1 command but there is a flag to adjust the behavior:\n+In the command-line, there is only 1 command (`describe`) but there is also a flag to adjust the behavior:\n```bash\n$ frictionless describe\n@@ -75,9 +75,9 @@ resources:\n## Describing Schema\n-Table Schema is a specification for providing a \"schema\" (similar to a database schema) for tabular data. This information includes the expected type of each value in a column (\"string\", \"number\", \"date\", etc.), constraints on the value (\"this string can only be at most 10 characters long\"), and the expected format of the data (\"this field should only contain strings that look like email addresses\"). Table Schema can also specify relations between tables.\n+Table Schema is a specification for providing a \"schema\" (similar to a database schema) for tabular data. This information includes the expected data type for each value in a column (\"string\", \"number\", \"date\", etc.), constraints on the value (\"this string can only be at most 10 characters long\"), and the expected format of the data (\"this field should only contain strings that look like email addresses\"). Table Schema can also specify relations between tables.\n-We're going to use this file for this section examples. For this guide, we use solely CSV files because of their demonstrativeness but in-general Frictionless can handle Excel, JSON, SQL, and many other formats:\n+We're going to use this file for the examples in this section. For this guide, we only use CSV files because of their demonstrativeness, but in-general Frictionless can handle data in Excel, JSON, SQL, and many other formats:\n```bash\n$ cat data/country-1.csv\n@@ -92,7 +92,7 @@ id,neighbor_id,name,population\n5,4,Spain,47\n```\n-Let's get Table Schema using Frictionless framework:\n+Let's get a Table Schema using the Frictionless framework:\n```python\nfrom frictionless import describe_schema\n@@ -101,7 +101,7 @@ schema = describe_schema(\"data/country-1.csv\")\nschema.to_yaml(\"tmp/country.schema-simple.yaml\")\n```\n-The high-level functions of Frictionless operate on dataset and resource levels so we have to use Python a little of Python programming to get schema information. Below we will show how to use a command-line interface for similar tasks.\n+The high-level functions of Frictionless operate on the dataset and resource levels so we have to use a little bit of Python programming to get the schema information. Below we will show how to use a command-line interface for similar tasks.\n```bash\n$ cat tmp/country.schema-simple.yaml\n@@ -119,7 +119,7 @@ fields:\ntype: integer\n```\n-As we can see, we were able to get infer basic metadata of our data file but describing data doesn't end here, we can provide additional information we discussed earlier:\n+As we can see, we were able to infer basic metadata from our data file. But describing data doesn't end here - we can provide additional information that we discussed earlier:\n```python\nfrom frictionless import describe_schema\n@@ -141,7 +141,7 @@ Let's break it down:\n- we added a title for all the fields\n- we added a description to the \"Population\" field; the year information can be critical to interpret the data\n- we set a constraint to the \"Population\" field because it can't be less than 0\n-- we added a foreign key saying that \"Identifier of the neighbor\" should present in the \"Identifier\" field\n+- we added a foreign key saying that \"Identifier of the neighbor\" should be present in the \"Identifier\" field\n```bash\n@@ -186,7 +186,7 @@ The Data Resource format describes a data resource such as an individual file or\nThe essence of a Data Resource is a locator for the data it describes.\nA range of other properties can be declared to provide a richer set of metadata.\n-For this section, we will use the file that is slightly more complex to handle. For some reason, cells are separated by the \";\" char and there is a comment on the top:\n+For this section, we will use a file that is slightly more complex to handle. For some reason, cells are separated by the \";\" char and there is a comment on the top:\n```bash\n@@ -233,7 +233,7 @@ schema:\ntype: string\n```\n-OK, that's clearly wrong. As we have seen in the \"Introductory Guide\" Frictionless is capable of inferring some complicated cases' metadata but our table is too weird for it. We need to program it:\n+OK, that's clearly wrong. As we have seen in the \"Introductory Guide\" Frictionless is capable of inferring some complicated cases' metadata but our table is too unusual for it to automatically infer. We need to manually program it:\n```python\nfrom frictionless import Schema, describe\n@@ -245,9 +245,9 @@ resource.schema = Schema(\"tmp/country.schema.yaml\")\nresource.to_yaml(\"tmp/country.resource.yaml\")\n```\n-So what we are doing here:\n-- we set header rows to be row number 2; as humans, we can easily see it\n-- we set CSV Delimiter to be \";\"; this file in not really usual CSV for some reason\n+So what we did here:\n+- we set the header rows to be row number 2; as humans, we can easily see that was the proper row\n+- we set the CSV Delimiter to be \";\"\n- we reuse the schema we created earlier as the data has the same structure and meaning\n```bash\n@@ -293,18 +293,18 @@ schema:\nresource: ''\n```\n-Our resource metadata includes the schema metadata we created earlier but also it has:\n+Our resource metadata includes the schema metadata we created earlier, but it also has:\n- general information about the file's schema, format, and compression\n-- information about CSV Dialect helping software understand how to read it\n-- checksum information as though hash, bytes, and rows\n+- information about CSV Dialect which helps software understand how to read it\n+- checksum information like hash, bytes, and rows\n-But the most important difference is that resource metadata contains the `path` property. It conceptually distinct Data Resource specification from Table Schema specification because while a Table Schema descriptor can describe a class of data files, a Data Resource descriptor describes the only one exact data file, `data/country-2.csv` in our case.\n+But the most important difference is that the resource metadata contains the `path` property. This is a conceptual distinction of the Data Resource specification compared to the Table Schema specification. While a Table Schema descriptor can describe a class of data files, a Data Resource descriptor describes only one exact data file, `data/country-2.csv` in our case.\nUsing programming terminology we could say that:\n- Table Schema descriptor is abstract (for a class of files)\n- Data Resource descriptor is concrete (for an individual file)\n-We will show the practical difference in the \"Using Metadata\" section but in the next section, we will overview the Data Package specification.\n+We will show the practical difference in the \"Using Metadata\" section, but in the next section, we will overview the Data Package specification.\nTo continue learning about data resources please read:\n- [Data Resource Spec](https://specs.frictionlessdata.io/data-resource/)\n@@ -397,9 +397,9 @@ resources:\nscheme: file\n```\n-We have already learned about many concepts that are reflected in this metadata. We can see resources, schemas, fields, and other familiar entities. The difference is that this descriptor has information about multiple files which is the most popular way of sharing data - in datasets. Very often you have not only one data file but also additional data files, some textual documents e.g. PDF, and others. To package all of these files with the corresponding metadata we use data packages.\n+We have already learned about many concepts that are reflected in this metadata. We can see resources, schemas, fields, and other familiar entities. The difference is that this descriptor has information about multiple files which is a popular way of sharing data - in datasets. Very often you have not only one data file but also additional data files, some textual documents e.g. PDF, and others. To package all of these files with the corresponding metadata we use data packages.\n-Following the already familiar to the guide reader pattern, we add some additional metadata:\n+Following the pattern that is already familiar to the guide reader, we add some additional metadata:\n```python\nfrom frictionless import describe\n@@ -415,7 +415,7 @@ package.get_resource(\"country\").schema.foreign_keys.append(\npackage.to_yaml(\"tmp/country.package.yaml\")\n```\n-In this case, we add a relation between different files connecting `id` and `capital_id`. Also, we provide dataset-level metadata to share with the purpose of this dataset. We haven't added individual fields' titles and description but it can be done as it was shown in the \"Table Schema\" section.\n+In this case, we add a relation between different files connecting `id` and `capital_id`. Also, we provide dataset-level metadata to explain the purpose of this dataset. We haven't added individual fields' titles and descriptions, but that can be done as it was shown in the \"Table Schema\" section.\n```bash\n$ cat tmp/country.package.yaml\n@@ -465,17 +465,17 @@ resources:\nresource: capital\n```\n-The main role of the Data Package descriptor is describing a dataset; as we can see, it includes previously shown descriptors as though `schema`, `dialect`, and `resource`. But it's a mistake to think then that Data Package is the least important specification; actually, it completes the Frictionless Data suite making possible sharing and validating not only individual files but complete datasets.\n+The main role of the Data Package descriptor is describing a dataset; as we can see, it includes previously shown descriptors like `schema`, `dialect`, and `resource`. But it would be a mistake to think that Data Package is the least important specification; actually, it completes the Frictionless Data suite making it possible to share and validate not only individual files but also complete datasets.\nTo continue learning about data resources please read:\n- [Data Package Spec](https://specs.frictionlessdata.io/data-package/)\n- [API Reference: Package](../references/api-reference.md#package)\n-## Metadata Purpose\n+## Metadata Importance\n-This documentation contains a great deal of information on how to use metadata and why it's vital for your data. In this article, we're going to provide a quick example based on the \"Data Resource\" section but please read other documents to get the full picture.\n+This documentation contains a great deal of information on how to use metadata and why it's vital for your data. In this section, we're going to provide a quick example based on the \"Data Resource\" section but please read other documents to get the full picture.\n-Let's get back to this exotic data table:\n+Let's get back to this unusual data table:\n```bash\n@@ -539,7 +539,7 @@ id;neighbor_id;name;population\n==============================\n```\n-Basically, that's a really important idea - with not metadata many software will not be able to even read this data file, furthermore, without metadata people can not understand the purpose of this data. Let's now use the `country.resource.yaml` the file we created in the \"Data Resource\" section:\n+This example highlights a really important idea - without metadata many software will not be able to even read this data file. Furthermore, without metadata people cannot understand the purpose of this data. To see how we can use metadata to fix our data, let's now use the `country.resource.yaml` file we created in the \"Data Resource\" section with Frictionless `extract`:\n```bash\n$ frictionless extract tmp/country.resource.yaml --basepath .\n@@ -561,11 +561,11 @@ id neighbor_id name population\n== =========== ======= ==========\n```\n-As we can see, it's now fixed. The metadata we'd had saved the day. If we explore this data in Python we can discover that it also correct data types e.g. `id` is Python's integer not string. This fact will allow exporting and sharing this data without any fear.\n+As we can see, the data is now fixed. The metadata we had saved the day! If we explore this data in Python we can discover that it also corrected data types - e.g. `id` is Python's integer not string. We can now export and share this data without any worries.\n## Inferring Metadata\n-Many Frictionless functions infer metadata under the hood as though `describe`, `extract`, and many more. On a lower-level, it's possible to control this process. Let's create a `Resource`.\n+Many Frictionless functions infer metadata under the hood such as `describe`, `extract`, and many more. On a lower-level, it's possible to control this process. To see this, let's create a `Resource`.\n```python\nfrom pprint import pprint\n@@ -633,7 +633,7 @@ pprint(resource.schema)\n{'name': 'population', 'type': 'integer'}]}\n```\n-Under the hood it, for example, still treats empty string as missing values because it's the specs' default. We can make reveal implicit metadata by expanding it:\n+Under the hood it, for example, still treats empty strings as missing values because it's the specs' default. We can reveal implicit metadata by expanding it:\n```python\nresource.schema.expand()\n@@ -659,7 +659,7 @@ pprint(resource.schema)\n## Transforming Metadata\n-We have seen it before but let's re-iterate; it's possible to transform core metadata properties using Python interface:\n+We have seen this before but let's re-iterate; it's possible to transform core metadata properties using Python's interface:\n```python\nfrom frictionless import Resource\n@@ -672,7 +672,7 @@ resource.layout.header_rows = [2]\nresource.to_yaml(\"tmp/country.resource.yaml\")\n```\n-But not only the Python interface is available. Thanks to the flexibility of the Frictionless Specs, we can add arbitrary metadata to our descriptor. We use dictionary operations for it:\n+But the Python interface is not our only option. Thanks to the flexibility of the Frictionless Specs, we can add arbitrary metadata to our descriptor. We use dictionary operations to do this:\n```python\nfrom frictionless import Resource\n@@ -734,7 +734,7 @@ schema:\n## Validating Metadata\n-Metadata validity is an important topic so it's recommended to validate your metadata before publishing. For example, let's make it invalid:\n+Metadata validity is an important topic, and we recommend validating your metadata before publishing. For example, let's first make it invalid:\n```python\nfrom frictionless import Resource\n@@ -750,7 +750,7 @@ False\n[{'code': 'resource-error', 'name': 'Resource Error', 'tags': ['#general'], 'note': '\"1 is not of type \\'string\\'\" at \"title\" in metadata and at \"properties/title/type\" in profile', 'message': 'The data resource has an error: \"1 is not of type \\'string\\'\" at \"title\" in metadata and at \"properties/title/type\" in profile', 'description': 'A validation cannot be processed.'}]\n```\n-Let's fix our resource metadata:\n+We see this error: `'message': 'The data resource has an error: \"1 is not of type \\'string\\'\" at \"title\" in metadata and at \"properties/title/type\" in profile'` Now, let's fix our resource metadata:\n```python\nfrom frictionless import Resource\n@@ -764,11 +764,11 @@ print(resource.metadata_valid)\nTrue\n```\n-You need to check `metadata.metadata_valid` only if you change it by hands; the available high-level functions like `validate` do it on their own.\n+You need to check `metadata.metadata_valid` only if you change it manually; Frictionless' high-level functions like `validate` do that on their own.\n## Mastering Metadata\n-Metadata class is under the hood of many of Frictionless' classes. Let's overview main `Metadata` features. For a full reference, please read \"API Reference\". Let's take a look at the Metadata class which is a `dict` subclass:\n+The Metadata class is under the hood of many of Frictionless' classes. Let's overview the main `Metadata` features. For a full reference, please read [\"API Reference\"](../references/api-reference.md). Let's take a look at the Metadata class which is a `dict` subclass:\n```\nMetadata(dict)\n@@ -784,18 +784,18 @@ Metadata(dict)\nmetadata_validate\n```\n-This class exists for subclassing and here is important points that will help to work with metadata objects and design and write new metadata classes:\n-- to bind default values to a property it's possible to use `metadata_attach` (see e.g. the `Schema` class)\n+This class exists for subclassing, and here are some important points that will help you work with metadata objects and design and write new metadata classes:\n+- to bind default values to a property it is possible to use `metadata_attach` (see e.g. the `Schema` class)\n- during the initialization a descriptor is processed by `metadata_extract`\n- metadata detect any shallow update and call `metadata_process`\n- checking for validity or errors will trigger `metadata_validate`\n-- functions exporting to json and yaml are available be default\n+- functions exporting to json and yaml are available by default\n- `metadata_profile` can be set to a JSON Schema\n- `metadata_Error` can be set to an Error class\n## Metadata Classes\n-Frictionless has many classes that is derived from the `Metadata` class. It means that all of them can be treated as a metadata object with getters and setters, `to_json` and `to_yaml` function, and other Metadata's API. See \"API Reference\" for more information about these classes:\n+Frictionless has many classes that are derived from the `Metadata` class. This means that all of them can be treated as a metadata objects with getters and setters, `to_json` and `to_yaml` function, and other Metadata's API. See [\"API Reference\"](../references/api-reference.md) for more information about these classes:\n- Package\n- Resource\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Improved text of describe doc (#679) |
234,912 | 11.02.2021 13:04:56 | -10,800 | b6133c6ff677530ceab686ee7488dfd9aae45477 | Update references-overview.md | [
{
"change_type": "MODIFY",
"old_path": "docs/references/references-overview.md",
"new_path": "docs/references/references-overview.md",
"diff": "@@ -6,13 +6,13 @@ In this section you can find detailed references for:\n## Plugins\n-List of the core Frictionless Framework plugins and their status:\n+List of the core plugins and their status:\n- [Plugins Reference](plugins-reference.md)\n-## Elements\n+## Blocks\n-List of Frictionless Framework elements:\n+List of the core building blocks:\n- [Schemes Reference](schemes-reference.md)\n- [Formats Reference](formats-reference.md)\n@@ -23,6 +23,6 @@ List of Frictionless Framework elements:\n## API\n-Full API reference\n+Full API reference:\n- [API Reference](api-reference.md)\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Update references-overview.md |
234,928 | 12.02.2021 22:19:54 | 0 | 8b9e6f254fe7983c42e3047064b23870b81a9eb8 | Jen's suggested edits
Correct typos.
Suggestions for rephrasing and addition of python example. | [
{
"change_type": "MODIFY",
"old_path": "docs/guides/extracting-data.md",
"new_path": "docs/guides/extracting-data.md",
"diff": "title: Extracting Data\n---\n-Extracting data means reading tabular data from some source. We can use various customizations for this process such as providing a file format, table schema, limiting fields or rows amounts, and much more. Let's see this with some real files:\n+Extracting data means reading tabular data from a source. We can use various customizations for this process such as providing a file format, table schema, limiting field or row values, and much more. Let's see this with some real files:\n```bash\n$ cat data/country-3.csv\n@@ -77,19 +77,19 @@ The high-level interface for extracting data provided by Frictionless is a set o\n- `extract_package`: accepts a package descriptor and returns a map of the package's tables\n- `extract_resource`: accepts a resource descriptor and returns a data table\n-In the command-line, there is only 1 command (`extract`) but there is a flag to adjust the behavior:\n+On the command-line, there is only one command (`extract`) but there is a flag to adjust the behavior:\n```bash\n$ frictionless extract\n-$ frictionless extract --type package\n$ frictionless extract --type resource\n+$ frictionless extract --type package\n```\nThe `extract` functions always read data in a form of rows (see the object description below) into memory. The lower-level interfaces will allow you to stream data and various output forms.\n-## Extracting Resource\n+## Extracting a Resource\n-A resource contains only one file and for extracting a resource we can use the same approach we used above except we'll provide only one file. We will extract data using a metadata descriptor:\n+A resource contains only one file. To extract a resource we can use the same approach as above:\n```python\nfrom frictionless import extract\n@@ -106,16 +106,24 @@ pprint(rows)\nRow([('id', 5), ('name', 'Rome')])]\n```\n-In many cases, the code above doesn't really make sense as we can just provide a path to the high-level `extract` function instead of a descriptor to the `extract_resource` function but the power of the descriptor is that it can contain different metadata and be stored on the disc. Let's extend our example:\n+In many cases, the code above doesn't really make sense as we can just provide a path to the high-level `extract` function. Instead, let's use the `extract_resource` function to extract the resource from a descriptor. The power of the descriptor is that it can contain different metadata and be stored on the disc. First let's create the descriptor:\n```python\nfrom frictionless import Resource\nresource = Resource('data/capital-3.csv')\nresource.infer()\n-resource.schema.missing_values.append('3')\n+resource.schema.missing_values.append('3') # set 3 as a missing value\nresource.to_yaml('tmp/capital.resource.yaml')\n```\n+This description can then be used to extract the resource:\n+\n+```python\n+from frictionless import extract_resource\n+\n+data = extract_resource('tmp/capital.resource.yaml')\n+```\n+This can also be done on the command-line:\n```bash\n$ frictionless extract tmp/capital.resource.yaml --basepath .\n@@ -137,11 +145,11 @@ None Paris\n==== ======\n```\n-So what's happened? We set the textual representation of the number \"3\" to be a missing value. It was done only for presentational purposes because it's definitely not a missing value. On the other hand, it demonstrated how metadata can be used.\n+So what has happened? We set the textual representation of the number \"3\" to be a missing value. It was done only for explanation purposes because it's definitely not a missing value. On the other hand, it demonstrated how metadata can be used.\n-## Extracting Package\n+## Extracting a Package\n-Let's start by using the command line-interface. We're going to provide two files to the `extract` command which will be enough to detect that it's a dataset:\n+Let's start by using the command-line interface. We're going to provide two files to the `extract` command which will be enough to detect that it's a dataset:\n```bash\n$ frictionless extract data/*-3.csv\n@@ -177,7 +185,7 @@ id capital_id name population\n== ========== ======= ==========\n```\n-In Python we can do the same by providing a glob for the `extract` function, but instead we will use `extract_package` by providing a package descriptor:\n+In Python we can do the same:\n```python\nfrom frictionless import extract\n@@ -202,10 +210,17 @@ for path, rows in data.items():\nRow([('id', 4), ('name', 'Madrid')]),\nRow([('id', 5), ('name', 'Rome')])]\n```\n+We can also extract the package from a descriptor using the `extract_package` function:\n+\n+```python\n+package = extract_package('tmp/country.package.yaml')\n+\n+pprint(package)\n+```\n## Resource Class\n-The Resource class is also a metadata class which provides various read and stream functions. The `extract` functions always read rows into memory; Resource can do the same but it also gives a choice regarding output data. It can be `rows`, `data`, `text`, or `bytes`. Let's try reading all of them:\n+The Resource class provides metadata about a resource with read and stream functions. The `extract` functions always read rows into memory; Resource can do the same but it also gives a choice regarding output data which can be `rows`, `data`, `text`, or `bytes`. Let's try reading all of them:\n```python\nfrom frictionless import Resource\n@@ -239,7 +254,7 @@ pprint(resource.read_rows())\nRow([('id', 5), ('capital_id', 4), ('name', 'Spain'), ('population', 47)])]\n```\n-It's really handy to read all your data into memory but it's not always possible if a file is really big. For such cases, Frictionless provides streaming functions:\n+It's really handy to read all your data into memory but it's not always possible if a file is very big. For such cases, Frictionless provides streaming functions:\n```python\nfrom frictionless import Resource\n@@ -267,14 +282,13 @@ Row([('id', 5), ('capital_id', 4), ('name', 'Spain'), ('population', 47)])\n## Package Class\n-The Package class is a metadata class which provides an ability to read its contents. First of all, let's create a package descriptor:\n-\n+The Package class provides functions to read the contents of a package. First of all, let's create a package descriptor:\n```bash\n$ frictionless describe data/*-3.csv --json > tmp/country.package.json\n```\n-Now, we can open the created descriptor and read the package's resources:\n+Now, we can open the descriptor and read the package's resources:\n```python\nfrom frictionless import Package\n@@ -297,11 +311,11 @@ pprint(package.get_resource('capital-3').read_rows())\nRow([('id', 5), ('name', 'Rome')])]\n```\n-The package by itself doesn't provide any read functions directly because it's a role of its resources. So everything written below for the Resource class can be used within a package.\n+The package by itself doesn't provide any read functions directly because that is a role of its resources. So everything written above for the Resource class can be used within a package.\n## Header Class\n-After opening a resource you get access to a `resource.header` object. It's a list of normalized labels but also provides some additional functionality. Let's take a look:\n+After opening a resource you get access to a `resource.header` object. This is a list of normalized labels but also provides some additional functionality. Let's take a look:\n```python\n@@ -328,12 +342,11 @@ with Resource('data/capital-3.csv') as resource:\nAs List: ['id', 'name']\n-The example above covers the case when a header is valid. For a header with tabular errors this information can be much more useful revealing discrepancies, duplicates or missing cells information. Please read \"API Reference\" for more details.\n+The example above covers the case when a header is valid. For a header that contains errors in its tabular structure this information can be much more useful, revealing discrepancies, duplicates or missing cell information. Please read \"API Reference\" for more details.\n## Row Class\n-The `extract`, `resource.read_rows()` and many other functions return or yield row objects. It's a `dict` providing additional API shown below:\n-\n+The `extract`, `resource.read_rows()` and other functions return or yield row objects. In Python, this returns a dictionary with the following information:\n```python\nfrom frictionless import Resource, Detector\n@@ -372,4 +385,4 @@ As Dict: {'id': None, 'name': 'London'}\nAs List: [None, 'London']\n```\n-As we can see, it provides a lot of information which is especially useful when a row is not valid. Our row is valid but we demonstrated how it can preserve data about row missing values. It also preserves data about all errored cells. Please read \"API Reference\" for more details.\n+As we can see, this output provides a lot of information which is especially useful when a row is not valid. Our row is valid but we demonstrated how it can preserve data about missing values. It also preserves data about all cells that contain errors. Please read \"API Reference\" for more details.\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Jen's suggested edits
Correct typos.
Suggestions for rephrasing and addition of python example. |
234,912 | 16.02.2021 09:27:02 | -10,800 | f4756a1a9d86116d7794345b1d30a5849ed2bc58 | Removed commmand-line prompt from Copy | [
{
"change_type": "MODIFY",
"old_path": "docs/build.py",
"new_path": "docs/build.py",
"diff": "@@ -15,7 +15,7 @@ from frictionless import plugins, errors, checks, steps, types, helpers\ndef main():\n- build_introduction()\n+ # build_introduction()\nbuild_plugins_reference()\nbuild_schemes_reference()\nbuild_formats_reference()\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/guides/describing-data.md",
"new_path": "docs/guides/describing-data.md",
"diff": "@@ -14,7 +14,7 @@ In other words, \"describing data\" means creating metadata for your data files. T\n- and others\n```bash title=\"CLI\"\n-$ pip install frictionless\n+pip install frictionless\n```\nFor a dataset, there is even more information that can be provided, like the general purpose of a dataset, information about data sources, list of authors, and more. Of course, when there are many tabular files, relational rules can be very important. Usually, there are foreign keys ensuring the integrity of the dataset; for example, think of a reference table containing country names and other tables using it as a reference. Data in this form is called \"normalized data\" and it occurs very often in scientific and other kinds of research.\n@@ -38,17 +38,17 @@ The frictionless framework provides 4 different `describe` functions in Python:\nIn the command-line, there is only 1 command (`describe`) but there is also a flag to adjust the behavior:\n```bash title=\"CLI\"\n-$ frictionless describe\n-$ frictionless describe --type schema\n-$ frictionless describe --type resource\n-$ frictionless describe --type package\n+frictionless describe\n+frictionless describe --type schema\n+frictionless describe --type resource\n+frictionless describe --type package\n```\nFor example, if we want a Data Package descriptor for a single file:\n```bash title=\"CLI\"\n-$ frictionless describe data/table.csv --type package\n+frictionless describe data/table.csv --type package\n```\n```yaml\n---\n@@ -79,7 +79,7 @@ Table Schema is a specification for providing a \"schema\" (similar to a database\nWe're going to use this file for the examples in this section. For this guide, we only use CSV files because of their demonstrativeness, but in-general Frictionless can handle data in Excel, JSON, SQL, and many other formats:\n```bash title=\"CLI\"\n-$ cat data/country-1.csv\n+cat data/country-1.csv\n```\n```csv title=\"data/country-1.csv\"\nid,neighbor_id,name,population\n@@ -102,7 +102,7 @@ schema.to_yaml(\"tmp/country.schema-simple.yaml\")\nThe high-level functions of Frictionless operate on the dataset and resource levels so we have to use a little bit of Python programming to get the schema information. Below we will show how to use a command-line interface for similar tasks.\n```bash title=\"CLI\"\n-$ cat tmp/country.schema-simple.yaml\n+cat tmp/country.schema-simple.yaml\n```\n```yaml\nfields:\n@@ -142,7 +142,7 @@ Let's break it down:\n```bash title=\"CLI\"\n-$ cat tmp/country.schema.yaml\n+cat tmp/country.schema.yaml\n```\n```yaml\nfields:\n@@ -186,7 +186,7 @@ For this section, we will use a file that is slightly more complex to handle. Fo\n```bash title=\"CLI\"\n-$ cat data/country-2.csv\n+cat data/country-2.csv\n```\n```csv title=\"data/country-2.csv\"\n# Author: the scientist\n@@ -201,7 +201,7 @@ id;neighbor_id;name;population\nLet's describe it this time using the command-line interface:\n```bash title=\"CLI\"\n-$ frictionless describe data/country-2.csv\n+frictionless describe data/country-2.csv\n```\n```yaml\n---\n@@ -245,7 +245,7 @@ So what we did here:\n- we reuse the schema we created earlier as the data has the same structure and meaning\n```bash title=\"CLI\"\n-$ cat tmp/country.resource.yaml\n+cat tmp/country.resource.yaml\n```\n```yaml\nencoding: utf-8\n@@ -320,7 +320,7 @@ The data included in the package may be provided as:\nFor this section, we will use the following files:\n```bash title=\"CLI\"\n-$ cat data/country-3.csv\n+cat data/country-3.csv\n```\n```csv title=\"data/country-3.csv\"\nid,capital_id,name,population\n@@ -332,7 +332,7 @@ id,capital_id,name,population\n```\n```bash title=\"CLI\"\n-$ cat data/capital-3.csv\n+cat data/capital-3.csv\n```\n```csv title=\"data/capital-3.csv\"\nid,name\n@@ -346,7 +346,7 @@ id,name\nFirst of all, let's describe our package using the command-line interface. We did it before for a resource but now we're going to use a glob pattern to indicate that there are multiple files:\n```bash title=\"CLI\"\n-$ frictionless describe data/*-3.csv\n+frictionless describe data/*-3.csv\n```\n```yaml\n---\n@@ -408,7 +408,7 @@ package.to_yaml(\"tmp/country.package.yaml\")\nIn this case, we add a relation between different files connecting `id` and `capital_id`. Also, we provide dataset-level metadata to explain the purpose of this dataset. We haven't added individual fields' titles and descriptions, but that can be done as it was shown in the \"Table Schema\" section.\n```bash title=\"CLI\"\n-$ cat tmp/country.package.yaml\n+cat tmp/country.package.yaml\n```\n```yaml\ntitle: Countries and their capitals\n@@ -468,7 +468,7 @@ Let's get back to this unusual data table:\n```bash title=\"CLI\"\n-$ cat data/country-2.csv\n+cat data/country-2.csv\n```\n```csv title=\"data/country-2.csv\"\n# Author: the scientist\n@@ -483,7 +483,7 @@ id;neighbor_id;name;population\nAs we tried before, by default Frictionless can't properly describe this file so we got something like:\n```bash title=\"CLI\"\n-$ frictionless describe data/country-2.csv\n+frictionless describe data/country-2.csv\n```\n```yaml\n---\n@@ -506,7 +506,7 @@ schema:\nTrying to extract the data will fail this way:\n```bash title=\"CLI\"\n-$ frictionless extract data/country-2.csv\n+frictionless extract data/country-2.csv\n```\n```\n---\n@@ -528,7 +528,7 @@ id;neighbor_id;name;population\nThis example highlights a really important idea - without metadata many software will not be able to even read this data file. Furthermore, without metadata people cannot understand the purpose of this data. To see how we can use metadata to fix our data, let's now use the `country.resource.yaml` file we created in the \"Data Resource\" section with Frictionless `extract`:\n```bash title=\"CLI\"\n-$ frictionless extract tmp/country.resource.yaml --basepath .\n+frictionless extract tmp/country.resource.yaml --basepath .\n```\n```\n---\n@@ -565,7 +565,7 @@ pprint(resource)\nFrictionless always tries to be as explicit as possible. We didn't provide any metadata except for `path` so we got the expected result. But now, we'd like to `infer` additional metadata:\n-> Note that we use the `stats` argument for the `resource.infer` function. We can ask for stats using CLI with `$ frictionless describe data/table.csv --stats`\n+> Note that we use the `stats` argument for the `resource.infer` function. We can ask for stats using CLI with `frictionless describe data/table.csv --stats`\n```python title=\"Python\"\n@@ -667,7 +667,7 @@ resource.to_yaml(\"tmp/country.resource.yaml\")\nLet's check it out:\n```bash title=\"CLI\"\n-$ cat tmp/country.resource.yaml\n+cat tmp/country.resource.yaml\n```\n```yaml\ncustomKey1: Value1\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/guides/extracting-data.md",
"new_path": "docs/guides/extracting-data.md",
"diff": "@@ -5,7 +5,7 @@ title: Extracting Data\nExtracting data means reading tabular data from some source. We can use various customizations for this process such as providing a file format, table schema, limiting fields or rows amounts, and much more. Let's see this with some real files:\n```bash title=\"CLI\"\n-$ cat data/country-3.csv\n+cat data/country-3.csv\n```\n```csv title=\"data/country-3.csv\"\nid,capital_id,name,population\n@@ -17,7 +17,7 @@ id,capital_id,name,population\n```\n```bash title=\"CLI\"\n-$ cat data/capital-3.csv\n+cat data/capital-3.csv\n```\n```csv title=\"data/capital-3.csv\"\nid,name\n@@ -31,7 +31,7 @@ id,name\nTo start, we will use the command-line interface:\n```bash title=\"CLI\"\n-$ frictionless extract data/country-3.csv\n+frictionless extract data/country-3.csv\n```\n```\n---\n@@ -76,9 +76,9 @@ The high-level interface for extracting data provided by Frictionless is a set o\nIn the command-line, there is only 1 command (`extract`) but there is a flag to adjust the behavior:\n```bash title=\"CLI\"\n-$ frictionless extract\n-$ frictionless extract --type package\n-$ frictionless extract --type resource\n+frictionless extract\n+frictionless extract --type package\n+frictionless extract --type resource\n```\nThe `extract` functions always read data in a form of rows (see the object description below) into memory. The lower-level interfaces will allow you to stream data and various output forms.\n@@ -113,7 +113,7 @@ resource.to_yaml('tmp/capital.resource.yaml')\n```\n```bash title=\"CLI\"\n-$ frictionless extract tmp/capital.resource.yaml --basepath .\n+frictionless extract tmp/capital.resource.yaml --basepath .\n```\n```\n---\n@@ -138,7 +138,7 @@ So what's happened? We set the textual representation of the number \"3\" to be a\nLet's start by using the command line-interface. We're going to provide two files to the `extract` command which will be enough to detect that it's a dataset:\n```bash title=\"CLI\"\n-$ frictionless extract data/*-3.csv\n+frictionless extract data/*-3.csv\n```\n```\n---\n@@ -261,7 +261,7 @@ The Package class is a metadata class which provides an ability to read its cont\n```bash title=\"CLI\"\n-$ frictionless describe data/*-3.csv --json > tmp/country.package.json\n+frictionless describe data/*-3.csv --json > tmp/country.package.json\n```\nNow, we can open the created descriptor and read the package's resources:\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/guides/transforming-data.md",
"new_path": "docs/guides/transforming-data.md",
"diff": "@@ -14,7 +14,7 @@ The main difference between the first two and pipelines that resource and packag\n```bash title=\"CLI\"\n-$ cat data/transform.csv\n+cat data/transform.csv\n```\n```csv title=\"data/transform.csv\"\nid,name,population\n@@ -24,7 +24,7 @@ id,name,population\n```\n```bash title=\"CLI\"\n-$ cat data/transform-groups.csv\n+cat data/transform-groups.csv\n```\n```csv title=\"data/transform-groups.csv\"\nid,name,population,year\n@@ -37,7 +37,7 @@ id,name,population,year\n```\n```bash title=\"CLI\"\n-$ cat data/transform-pivot.csv\n+cat data/transform-pivot.csv\n```\n```csv title=\"data/transform-pivot.csv\"\nregion,gender,style,units\n@@ -737,7 +737,7 @@ target = transform(\n```\n```bash title=\"CLI\"\n-$ cat tmp/transform.json\n+cat tmp/transform.json\n```\n```json title=\"tmp/transform.json\"\n[\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/guides/validating-data.md",
"new_path": "docs/guides/validating-data.md",
"diff": "@@ -6,7 +6,7 @@ Tabular data validation is a process of identifying tabular problems that have p\n```bash title=\"CLI\"\n-$ cat data/capital-invalid.csv\n+cat data/capital-invalid.csv\n```\n```csv title=\"data/capital-valid.csv\"\nid,name,name\n@@ -27,7 +27,7 @@ Using the command-line interface we can validate this file. Frictionless provide\n```bash title=\"CLI\"\n-$ frictionless validate data/capital-invalid.csv\n+frictionless validate data/capital-invalid.csv\n```\n```\n---\n@@ -58,12 +58,12 @@ The high-level interface for validating data provided by Frictionless is a set o\nIn command-line, there is only 1 command but there is a flag to adjust the behavior:\n```bash title=\"CLI\"\n-$ frictionless validate\n-$ frictionless validate --source-type schema\n-$ frictionless validate --source-type resource\n-$ frictionless validate --source-type package\n-$ frictionless validate --source-type inquiry\n-$ frictionless validate --source-type table\n+frictionless validate\n+frictionless validate --source-type schema\n+frictionless validate --source-type resource\n+frictionless validate --source-type package\n+frictionless validate --source-type inquiry\n+frictionless validate --source-type table\n```\n## Validating Schema\n@@ -82,7 +82,7 @@ And validate it using the command-line interface:\n```bash title=\"CLI\"\n-$ frictionless validate tmp/invalid.schema.yaml\n+frictionless validate tmp/invalid.schema.yaml\n```\n```\n---\n@@ -102,13 +102,13 @@ Schema validation can be very useful when you work with different classes of tab\nAs it was shown in the \"Describing Data\" guide a resource is a container having both metadata and data. We need to create a resource descriptor to validate it:\n```bash title=\"CLI\"\n-$ frictionless describe data/capital-invalid.csv --json > tmp/capital.resource.json\n+frictionless describe data/capital-invalid.csv --json > tmp/capital.resource.json\n```\nLet's now use the command-line interface to ensure that we are getting the same result as we had without using a resource:\n```bash title=\"CLI\"\n-$ frictionless validate tmp/capital.resource.json --basepath .\n+frictionless validate tmp/capital.resource.json --basepath .\n```\n```\n---\n@@ -140,7 +140,7 @@ resource.to_yaml('tmp/capital.resource.yaml')\nWe have added a few bad metrics to our resource descriptor. The validation below reports it in addition to all the errors we had before. This example is showing how concepts like Data Resource can be extremely useful when working with data.\n```bash title=\"CLI\"\n-$ frictionless validate tmp/capital.resource.yaml --basepath .\n+frictionless validate tmp/capital.resource.yaml --basepath .\n```\n```\n---\n@@ -163,7 +163,7 @@ None 3 duplicate-header Header \"name\" in field at position \"3\" is duplica\nA package is a set of resources + additional metadata. To showcase a package validation we need one more tabular file:\n```bash title=\"CLI\"\n-$ cat data/capital-valid.csv\n+cat data/capital-valid.csv\n```\n```csv title=\"data/capital-valid.csv\"\nid,name\n@@ -177,8 +177,8 @@ id,name\nLet's describe and validate a package:\n```bash title=\"CLI\"\n-$ frictionless describe data/capital-*id.csv --json > tmp/capital.package.json\n-$ frictionless validate tmp/capital.package.json --basepath .\n+frictionless describe data/capital-*id.csv --json > tmp/capital.package.json\n+frictionless validate tmp/capital.package.json --basepath .\n```\n```\n---\n@@ -220,7 +220,7 @@ inquiry.to_yaml('tmp/capital.inquiry.yaml')\nTasks in the Inquiry accept the same arguments written in camelCase as the corresponding `validate` functions have. As usual, let' run validation:\n```bash title=\"CLI\"\n-$ frictionless validate tmp/capital.inquiry.yaml\n+frictionless validate tmp/capital.inquiry.yaml\n```\n```\n---\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/introduction/introduction.md",
"new_path": "docs/introduction/introduction.md",
"diff": "@@ -33,7 +33,7 @@ Frictionless is a framework to describe, extract, validate, and transform tabula\n## Example\n-```bash\n+```\n$ frictionless validate data/invalid.csv\n[invalid] data/invalid.csv\n"
},
{
"change_type": "MODIFY",
"old_path": "portal/src/css/custom.css",
"new_path": "portal/src/css/custom.css",
"diff": "+/* Header */\n+\n.header-mainsite-link {\nopacity: 0.5;\npadding-right: 0;\n@@ -20,3 +22,9 @@ html[data-theme='dark'] .header-github-link:before {\nbackground: url(\"data:image/svg+xml,%3Csvg viewBox='0 0 24 24' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath fill='white' d='M12 .297c-6.63 0-12 5.373-12 12 0 5.303 3.438 9.8 8.205 11.385.6.113.82-.258.82-.577 0-.285-.01-1.04-.015-2.04-3.338.724-4.042-1.61-4.042-1.61C4.422 18.07 3.633 17.7 3.633 17.7c-1.087-.744.084-.729.084-.729 1.205.084 1.838 1.236 1.838 1.236 1.07 1.835 2.809 1.305 3.495.998.108-.776.417-1.305.76-1.605-2.665-.3-5.466-1.332-5.466-5.93 0-1.31.465-2.38 1.235-3.22-.135-.303-.54-1.523.105-3.176 0 0 1.005-.322 3.3 1.23.96-.267 1.98-.399 3-.405 1.02.006 2.04.138 3 .405 2.28-1.552 3.285-1.23 3.285-1.23.645 1.653.24 2.873.12 3.176.765.84 1.23 1.91 1.23 3.22 0 4.61-2.805 5.625-5.475 5.92.42.36.81 1.096.81 2.22 0 1.606-.015 2.896-.015 3.286 0 .315.21.69.825.57C20.565 22.092 24 17.592 24 12.297c0-6.627-5.373-12-12-12'/%3E%3C/svg%3E\")\nno-repeat;\n}\n+\n+/* Codeblock */\n+\n+.language-bash .token-line:before {\n+ content: '$ ';\n+}\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Removed commmand-line prompt from Copy (#691) |
234,917 | 16.02.2021 01:07:59 | 21,600 | 573504406a2bdf0e098365cb8f312bcdd539eea2 | Improved introduction docs
* edits intro guide, getting started, intro, guide overview
* adds install troubleshooting info
* fix space
apparently I added a space, so I'm removing it now... | [
{
"change_type": "MODIFY",
"old_path": "README.md",
"new_path": "README.md",
"diff": "@@ -50,4 +50,4 @@ $ frictionless validate data/invalid.csv\n## Documentation\nPlease visit our documentation portal:\n-- https://framework.frictionlessdata.io/docs/guides/getting-started\n+- https://framework.frictionlessdata.io/docs/introduction/introduction\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/guides/guides-overview.md",
"new_path": "docs/guides/guides-overview.md",
"diff": "title: Guides Overview\n---\n+## How to use this documentation\n+\nFrictionless Framework documentation has various guides which can be grouped by an end user level in relation to the framework:\n-## Beginner Level\n+### Beginner Level\nInstallation and first steps with Frictionless Framework:\n-- [Getting Started](getting-started.md)\n-- [Introduction Guide](introduction-guide.md)\n+- [Quick Start](quick-start)\n+- [Overview Example](overview-example)\n-## Intermediate Level\n+### Intermediate Level\nMain Frictionless Framework functionality: describe, extract, validate, and transform:\n@@ -20,7 +22,7 @@ Main Frictionless Framework functionality: describe, extract, validate, and tran\n- [Validating Data](validating-data.md)\n- [Transforming Data](transforming-data.md)\n-## Advanced Level\n+### Advanced Level\nExplor the following section for more advanced guides:\n- [Mastering the Framework](package-guide.md)\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/introduction/whats-next.md",
"new_path": "docs/introduction/whats-next.md",
"diff": "@@ -6,7 +6,7 @@ Based on your familiarity level and your tasks you can continue with one of the\n## Guides\n-If you want to know more about core Frictionless Framework concepts:\n+If you want to know more about core Frictionless Framework functions:\n- [Guides Overview](../guides/guides-overview.md)\n## Tutorials\n"
},
{
"change_type": "MODIFY",
"old_path": "portal/sidebars.js",
"new_path": "portal/sidebars.js",
"diff": "@@ -9,14 +9,14 @@ module.exports = {\nintroduction: [\"introduction/introduction\", \"introduction/whats-next\"],\nguides: [\n\"guides/guides-overview\",\n- \"guides/getting-started\",\n- \"guides/introduction-guide\",\n+ \"guides/quick-start\",\n+ \"guides/overview-example\",\n\"guides/describing-data\",\n\"guides/extracting-data\",\n\"guides/validating-data\",\n\"guides/transforming-data\",\n{\n- \"Mastring the Framework\": [\n+ \"Mastering the Framework\": [\n\"guides/package-guide\",\n\"guides/resource-guide\",\n\"guides/schema-guide\",\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Improved introduction docs (#685)
* edits intro guide, getting started, intro, guide overview
* adds install troubleshooting info
* fix space
apparently I added a space, so I'm removing it now...
Co-authored-by: roll <[email protected]> |
234,912 | 17.02.2021 16:40:09 | -10,800 | 215433fe6dc20b336883332ba89cc9e7d6eaee0d | Added Package Guide | [
{
"change_type": "MODIFY",
"old_path": "docs/guides/framework/package-guide.md",
"new_path": "docs/guides/framework/package-guide.md",
"diff": "title: Package Guide\n---\n-> This guide in under development. We are moving some shared Package information from describe, extract, validate, and transform guides to this guide.\n+The Data Package is a core Frictionless Data concept meaning a set of resources with additional metadata provided. You can read [Package Spec](https://specs.frictionlessdata.io/data-package/) for more information.\n+\n+## Creating Package\n+\n+Let's create a data package:\n+\n+```python title=\"Python\"\n+from frictionless import Package\n+\n+package = Package('data/table.csv') # from a resource path\n+package = Package('data/tables/*') # from a resources glob\n+package = Package(['data/tables/chunk1.csv', 'data/tables/chunk2.csv]') # from a list\n+package = Package('data/package/datapackage.json') # from a descriptor path\n+package = Package({'resources': {'path': 'data/table.csv'}}) # from a descriptor\n+```\n+\n+As you can see it's possible to create a package providing different kinds of sources which will be detector to have some type automatically (e.g. whether it's a glob or a path). It's possible to make this step more explicit:\n+\n+```python title=\"Python\"\n+from frictionless import Package, Resource\n+\n+package = Package(resources=[Resource(path='data/table.csv')]) # from resources\n+package = Package(descriptor='data/package/datapackage.json') # from a descriptor\n+```\n+\n+## Describing Package\n+\n+The specs support a great deal of package metadata which is possible to have with Frictionless Framework too:\n+\n+```python title=\"Python\"\n+from frictionless import Package, Resource\n+\n+package = Package(\n+ name='package',\n+ title='My Package',\n+ descriptor='My Package for the Guide',\n+ resources=[Resource(path='data/table.csv')],\n+ # it's possible to provide all the official properties like homepage, version, etc\n+)\n+```\n+\n+If you have created a package from a descriptor you can access this properties:\n+\n+```python title=\"Python\"\n+from frictionless import Package\n+\n+package = Package('some/datapackage.json')\n+package.name\n+package.title\n+package.description\n+# and others\n+```\n+\n+And edit them:\n+\n+```python title=\"Python\"\n+from frictionless import Package\n+\n+package = Package('some/datapackage.json')\n+package.name = 'new name'\n+package.title = 'New Title'\n+package.description = 'New Description'\n+# and others\n+```\n+\n+## Resoure Management\n+\n+The core purpose of having a package is to provide an ability to have a set of resources. The Package class provides useful methods to manage resources:\n+\n+\n+```python title=\"Python\"\n+from frictionless import Package, Resource\n+\n+package = Package('data/package/datapackage.json')\n+print(package.resources)\n+print(package.resource_names)\n+package.add_resource(Resource(name='new', data=[['key1', 'key2'], ['val1', 'val2']]))\n+resource = package.get_resource('new')\n+print(package.has_resource('new'))\n+package.remove_resource('new')\n+```\n+\n+## Saving Descriptor\n+\n+As any of the Metadata classes the Package class can be saved as JSON or YAML:\n+\n+```python title=\"Python\"\n+from frictionless import Package\n+package = Package('data/tables/*')\n+package.to_json('package.json') # Save as JSON\n+package.to_yaml('package.yaml') # Save as YAML\n+```\n+\n+## Package Options\n+\n+The Package consturctor accept a few additional options to tweak how it and the underlaying resoures will work:\n+\n+### Basepath\n+\n+Will make all the paths treated as relative to this path.\n+\n+### Detector\n+\n+[Detector](detector.md) object to tweak metadata detection.\n+\n+### Onerror\n+\n+There are 3 possible values for reacting on tabular errors:\n+- ignore (default)\n+- warn\n+- raise\n+\n+### Trusted\n+\n+By default an error will be reaised on [unsafe paths](https://specs.frictionlessdata.io/data-resource/#url-or-path). Setting `trusted` to `True` will disable this behaviour.\n+\n+### Hashing\n+\n+Will be passed to underlaying resources as a default hasing algorithm.\n+\n+[Detector](detector.md) object to tweak metadata detection.\n+\n+## Import/Export\n+\n+It's possible to import and export package from/to:\n+- bigquery\n+- ckan\n+- sql\n+- zip\n+\n+### BigQuery\n+\n+> This functionality is in the draft state.\n+\n+### Ckan\n+\n+> This functionality is in the draft state.\n+\n+### Sql\n+\n+> This functionality is in the draft state.\n+\n+### Zip\n+\n+> This functionality is in the draft state.\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Added Package Guide (#698) |
234,912 | 17.02.2021 17:32:08 | -10,800 | a0d0e49b68f333cdc55077b680a226e92b45a2c6 | Added Layout Guide | [
{
"change_type": "MODIFY",
"old_path": "docs/guides/framework/detector-guide.md",
"new_path": "docs/guides/framework/detector-guide.md",
"diff": "@@ -32,7 +32,7 @@ The detector class instance are accepted by many classes and functions:\n- validate\n- and more\n-In cases, you just need to create a Detector instance using desired options and pass to the classed and function from above.\n+You just need to create a Detector instance using desired options and pass to the classed and function from above.\n## Detector Options\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/guides/framework/layout-guide.md",
"new_path": "docs/guides/framework/layout-guide.md",
"diff": "title: Layout Guide\n---\n-> This guide in under development. We are moving some shared Layout information from describe, extract, validate, and transform guides to this guide.\n+The Layout concept give us an ability to manage table header and pick/skip arbitrary fields and rows from the raw data stream.\n+\n+```bash title=\"CLI\"\n+$ cat data/matrix.csv\n+```\n+```csv\n+f1,f2,f3,f4\n+11,12,13,14\n+21,22,23,24\n+31,32,33,34\n+41,42,43,44\n+```\n+\n+## Layout Usage\n+\n+The Layout class instance are accepted by many classes and functions:\n+\n+- Resource\n+- describe\n+- extract\n+- validate\n+- and more\n+\n+You just need to create a Layout instance using desired options and pass to the classed and function from above.\n+\n+## Layout Options\n+\n+Let's list all the available Layout options with simple usage examples:\n+\n+### Header\n+\n+It's a boolean flag which defaults to `True` indicating whether the data has a header row or not. In the following example the header row will be treated as a data row:\n+\n+```python\n+from frictionless import Table, Layout\n+\n+layout = Layout(header=False)\n+with Resource('data/capital-3.csv', layout=layout) as resource:\n+ pprint(resource.header)\n+ pprint(resource.read_rows())\n+```\n+```\n+[]\n+[Row([('field1', 'id'), ('field2', 'name')]),\n+ Row([('field1', '1'), ('field2', 'London')]),\n+ Row([('field1', '2'), ('field2', 'Berlin')]),\n+ Row([('field1', '3'), ('field2', 'Paris')]),\n+ Row([('field1', '4'), ('field2', 'Madrid')]),\n+ Row([('field1', '5'), ('field2', 'Rome')])]\n+```\n+\n+### Header Rows\n+\n+If header is `True` which is default, this parameters indicates where to find the header row or header rows for a multiline header. Let's see on example how the first two data rows can be treated as a part of a header:\n+\n+```python\n+from frictionless import Table, Layout\n+\n+layout = Layout(header_rows=[1, 2, 3])\n+with Resource('data/capital-3.csv', layout=layout) as resource:\n+ pprint(resource.header)\n+ pprint(resource.read_rows())\n+```\n+```\n+['id 1 2', 'name London Berlin']\n+[Row([('id 1 2', 3), ('name London Berlin', 'Paris')]),\n+ Row([('id 1 2', 4), ('name London Berlin', 'Madrid')]),\n+ Row([('id 1 2', 5), ('name London Berlin', 'Rome')])]\n+```\n+\n+### Header Join\n+\n+If there are multiple header rows which is managed by `header_rows` parameter, we can set a string to be a separator for a header's cell join operation. Usually it's very handy for some \"fancy\" Excel files. For the sake of simplicity, we will show on a CSV file:\n+\n+```python\n+from frictionless import Table, Layout\n+\n+layout = Layout(header_rows=[1, 2, 3], header_join='/')\n+with Resource('data/capital-3.csv', layout=layout) as resource:\n+ pprint(resource.header)\n+ pprint(resource.read_rows())\n+```\n+```\n+['id/1/2', 'name/London/Berlin']\n+[Row([('id/1/2', 3), ('name/London/Berlin', 'Paris')]),\n+ Row([('id/1/2', 4), ('name/London/Berlin', 'Madrid')]),\n+ Row([('id/1/2', 5), ('name/London/Berlin', 'Rome')])]\n+```\n+\n+### Header Case\n+\n+By default a header is validated in a case sensitive mode. To disable this behaviour we can set the `header_case` parameter to `False`. This option is accepted by any Layout and a dialect can be passed to `extract`, `validate` and other functions. Please note that it doesn't affect a resulting header it only affects how it's validated:\n+\n+```python\n+from frictionless import Table, Schema, Field, Layout\n+\n+layout = Layout(header_case=False)\n+schema = Schema(fields=[Field(name=\"ID\"), Field(name=\"NAME\")])\n+with Resource('data/capital-3.csv', layout=layout, schema=schema) as resource:\n+ print(f'Header: {resource.header}')\n+ print(f'Valid: {resource.header.valid}') # without \"header_case\" it will have 2 errors\n+```\n+```\n+Header: ['id', 'name']\n+Valid: True\n+```\n+\n+### Pick/Skip Fields\n+\n+We can pick and skip arbitrary fields based on a header row. These options accept a list of field numbers, a list of strings or a regex to match. All the queries below do the same thing for this file:\n+\n+```python\n+from frictionless import extract, Layout\n+\n+print(extract('data/matrix.csv', layout=Layout(pick_fields=[2, 3])))\n+print(extract('data/matrix.csv', layout=Layout(skip_fields=[1, 4])))\n+print(extract('data/matrix.csv', layout=Layout(pick_fields=['f2', 'f3'])))\n+print(extract('data/matrix.csv', layout=Layout(skip_fields=['f1', 'f4'])))\n+print(extract('data/matrix.csv', layout=Layout(pick_fields=['<regex>f[23]'])))\n+print(extract('data/matrix.csv', layout=Layout(skip_fields=['<regex>f[14]'])))\n+```\n+```\n+[Row([('f2', 12), ('f3', 13)]), Row([('f2', 22), ('f3', 23)]), Row([('f2', 32), ('f3', 33)]), Row([('f2', 42), ('f3', 43)])]\n+[Row([('f2', 12), ('f3', 13)]), Row([('f2', 22), ('f3', 23)]), Row([('f2', 32), ('f3', 33)]), Row([('f2', 42), ('f3', 43)])]\n+[Row([('f2', 12), ('f3', 13)]), Row([('f2', 22), ('f3', 23)]), Row([('f2', 32), ('f3', 33)]), Row([('f2', 42), ('f3', 43)])]\n+[Row([('f2', 12), ('f3', 13)]), Row([('f2', 22), ('f3', 23)]), Row([('f2', 32), ('f3', 33)]), Row([('f2', 42), ('f3', 43)])]\n+[Row([('f2', 12), ('f3', 13)]), Row([('f2', 22), ('f3', 23)]), Row([('f2', 32), ('f3', 33)]), Row([('f2', 42), ('f3', 43)])]\n+[Row([('f2', 12), ('f3', 13)]), Row([('f2', 22), ('f3', 23)]), Row([('f2', 32), ('f3', 33)]), Row([('f2', 42), ('f3', 43)])]\n+```\n+\n+### Limit/Offset Fields\n+\n+There are two options that provide an ability to limit amount of fields similar to SQL's directives:\n+\n+```python\n+from frictionless import extract, Layout\n+\n+print(extract('data/matrix.csv', layout=Layout(limit_fields=2)))\n+print(extract('data/matrix.csv', layout=Layout(offset_fields=2)))\n+```\n+```\n+[Row([('f1', 11), ('f2', 12)]), Row([('f1', 21), ('f2', 22)]), Row([('f1', 31), ('f2', 32)]), Row([('f1', 41), ('f2', 42)])]\n+[Row([('f3', 13), ('f4', 14)]), Row([('f3', 23), ('f4', 24)]), Row([('f3', 33), ('f4', 34)]), Row([('f3', 43), ('f4', 44)])]\n+```\n+\n+### Pick/Skip Rows\n+\n+It's alike the field counterparts but it will be compared to the first cell of a row. All the queries below do the same thing for this file but take into account that when picking we need to also pick a header row. In addition, there is special value `<blank>` that matches a row if it's completely blank:\n+\n+```python\n+from frictionless import extract, Layout\n+\n+print(extract('data/matrix.csv', layout=Layout(pick_rows=[1, 3, 4])))\n+print(extract('data/matrix.csv', layout=Layout(skip_rows=[2, 5])))\n+print(extract('data/matrix.csv', layout=Layout(pick_rows=['f1', '21', '31'])))\n+print(extract('data/matrix.csv', layout=Layout(skip_rows=['11', '41'])))\n+print(extract('data/matrix.csv', layout=Layout(pick_rows=['<regex>(f1|[23]1)'])))\n+print(extract('data/matrix.csv', layout=Layout(skip_rows=['<regex>[14]1'])))\n+print(extract('data/matrix.csv', layout=Layout(pick_rows=['<blank>'])))\n+```\n+```\n+[Row([('f1', 21), ('f2', 22), ('f3', 23), ('f4', 24)]), Row([('f1', 31), ('f2', 32), ('f3', 33), ('f4', 34)])]\n+[Row([('f1', 21), ('f2', 22), ('f3', 23), ('f4', 24)]), Row([('f1', 31), ('f2', 32), ('f3', 33), ('f4', 34)])]\n+[Row([('f1', 21), ('f2', 22), ('f3', 23), ('f4', 24)]), Row([('f1', 31), ('f2', 32), ('f3', 33), ('f4', 34)])]\n+[Row([('f1', 21), ('f2', 22), ('f3', 23), ('f4', 24)]), Row([('f1', 31), ('f2', 32), ('f3', 33), ('f4', 34)])]\n+[Row([('f1', 21), ('f2', 22), ('f3', 23), ('f4', 24)]), Row([('f1', 31), ('f2', 32), ('f3', 33), ('f4', 34)])]\n+[Row([('f1', 21), ('f2', 22), ('f3', 23), ('f4', 24)]), Row([('f1', 31), ('f2', 32), ('f3', 33), ('f4', 34)])]\n+[]\n+```\n+\n+### Limit/Offset Rows\n+\n+This is a quite popular option used to limit amount of rows to read:\n+\n+```python\n+from frictionless import extract, Layout\n+\n+print(extract('data/matrix.csv', layout=Layout(limit_rows=2)))\n+print(extract('data/matrix.csv', layout=Layout(offset_rows=2)))\n+```\n+```\n+[Row([('f1', 11), ('f2', 12), ('f3', 13), ('f4', 14)]), Row([('f1', 21), ('f2', 22), ('f3', 23), ('f4', 24)])]\n+[Row([('f1', 31), ('f2', 32), ('f3', 33), ('f4', 34)]), Row([('f1', 41), ('f2', 42), ('f3', 43), ('f4', 44)])]\n+```\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/guides/framework/package-guide.md",
"new_path": "docs/guides/framework/package-guide.md",
"diff": "@@ -105,7 +105,7 @@ Will make all the paths treated as relative to this path.\n### Detector\n-[Detector](detector.md) object to tweak metadata detection.\n+[Detector](detector-guide.md) object to tweak metadata detection.\n### Onerror\n@@ -122,7 +122,7 @@ By default an error will be reaised on [unsafe paths](https://specs.frictionless\nWill be passed to underlaying resources as a default hasing algorithm.\n-[Detector](detector.md) object to tweak metadata detection.\n+[Detector](detector-guide.md) object to tweak metadata detection.\n## Import/Export\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/guides/framework/resource-guide.md",
"new_path": "docs/guides/framework/resource-guide.md",
"diff": "@@ -71,7 +71,7 @@ Header: \"['id', 'name']\"\nMany of the properties above not only can be read from the existent Table but also can be provided as an option to alter the Table behaviour, for example:\n-## Table's Lifecycle\n+## Resource Lifecycle\nYou might have noticed that we had to duplicate the `with Table(...)` statement in some examples. The reason is that Table is a streaming interface. Once it's read you need to open it again. Let's show it in an example:\n@@ -238,27 +238,6 @@ with Table(source, control=control) as table:\nExact parameters depend on schemes and can be found in the \"Schemes Reference\". For example, the Remote Control provides `http_timeout`, `http_session`, and others but there is only one option available for all controls:\n-### Detect Encoding\n-\n-It's a function that can be provided to adjust the encoding detection. This function accepts a data sample and returns a detected encoding:\n-\n-\n-```python\n-from frictionless import Table, Control\n-\n-control = Control(detect_encoding=lambda sample: \"utf-8\")\n-with Table(\"data/capital-3.csv\", control=control) as table:\n- print(table.source)\n- print(table.encoding)\n-```\n-\n- data/capital-3.csv\n- utf-8\n-\n-\n-Further reading:\n-- Schemes Reference\n-\n## Table Dialect\nThe Dialect adjusts the way tabular parsers work. The concept is similar to the Control above. Let's use the CSV Dialect to adjust the delimiter configuration:\n@@ -281,268 +260,7 @@ with Table(source, scheme='text', format='csv', dialect=dialect) as table:\nThere are a great deal of options available for different dialects that can be found in \"Formats Reference\". We will list the properties that can be used with every dialect:\n-### Header\n-\n-It's a boolean flag which defaults to `True` indicating whether the data has a header row or not. In the following example the header row will be treated as a data row:\n-\n-\n-```python\n-from frictionless import Table, Dialect\n-\n-dialect = Dialect(header=False)\n-with Table('data/capital-3.csv', dialect=dialect) as table:\n- pprint(table.header)\n- pprint(table.read_rows())\n-```\n-\n- []\n- [Row([('field1', 'id'), ('field2', 'name')]),\n- Row([('field1', '1'), ('field2', 'London')]),\n- Row([('field1', '2'), ('field2', 'Berlin')]),\n- Row([('field1', '3'), ('field2', 'Paris')]),\n- Row([('field1', '4'), ('field2', 'Madrid')]),\n- Row([('field1', '5'), ('field2', 'Rome')])]\n-\n-\n-### Header Rows\n-\n-If header is `True` which is default, this parameters indicates where to find the header row or header rows for a multiline header. Let's see on example how the first two data rows can be treated as a part of a header:\n-\n-\n-```python\n-from frictionless import Table, Dialect\n-\n-dialect = Dialect(header_rows=[1, 2, 3])\n-with Table('data/capital-3.csv', dialect=dialect) as table:\n- pprint(table.header)\n- pprint(table.read_rows())\n-```\n-\n- ['id 1 2', 'name London Berlin']\n- [Row([('id 1 2', 3), ('name London Berlin', 'Paris')]),\n- Row([('id 1 2', 4), ('name London Berlin', 'Madrid')]),\n- Row([('id 1 2', 5), ('name London Berlin', 'Rome')])]\n-\n-\n-### Header Join\n-\n-If there are multiple header rows which is managed by `header_rows` parameter, we can set a string to be a separator for a header's cell join operation. Usually it's very handy for some \"fancy\" Excel files. For the sake of simplicity, we will show on a CSV file:\n-\n-\n-```python\n-from frictionless import Table, Dialect\n-\n-dialect = Dialect(header_rows=[1, 2, 3], header_join='/')\n-with Table('data/capital-3.csv', dialect=dialect) as table:\n- pprint(table.header)\n- pprint(table.read_rows())\n-```\n-\n- ['id/1/2', 'name/London/Berlin']\n- [Row([('id/1/2', 3), ('name/London/Berlin', 'Paris')]),\n- Row([('id/1/2', 4), ('name/London/Berlin', 'Madrid')]),\n- Row([('id/1/2', 5), ('name/London/Berlin', 'Rome')])]\n-\n-\n-### Header Case\n-\n-> *New in version 3.23*\n-\n-By default a header is validated in a case sensitive mode. To disable this behaviour we can set the `header_case` parameter to `False`. This option is accepted by any Dialect and a dialect can be passed to `extract`, `validate` and other functions. Please note that it doesn't affect a resulting header it only affects how it's validated:\n-\n-\n-```python\n-from frictionless import Table, Schema, Field, Dialect\n-\n-dialect = Dialect(header_case=False)\n-schema = Schema(fields=[Field(name=\"ID\"), Field(name=\"NAME\")])\n-with Table('data/capital-3.csv', dialect=dialect, schema=schema) as table:\n- print(f'Header: {table.header}')\n- print(f'Valid: {table.header.valid}') # without \"header_case\" it will have 2 errors\n-```\n-\n- Header: ['id', 'name']\n- Valid: True\n-\n-\n-Further reading:\n-- Formats Reference\n-\n-## Table Query\n-\n-Using header management described in the \"Table Dialect\" section we can have a basic skipping rows ability e.g. if we set `dialect.header_rows=[2]` we will skip the first row but it's very limited. There is a much more powerful interface called Table Queries to indicate where exactly to get tabular data from a file. We will use a simple file looking like a matrix:\n-\n-\n-```python\n-! cat data/matrix.csv\n-```\n-\n- f1,f2,f3,f4\n- 11,12,13,14\n- 21,22,23,24\n- 31,32,33,34\n- 41,42,43,44\n-\n-\n-### Pick/Skip Fields\n-\n-We can pick and skip arbitrary fields based on a header row. These options accept a list of field numbers, a list of strings or a regex to match. All the queries below do the same thing for this file:\n-\n-\n-```python\n-from frictionless import extract, Query\n-\n-print(extract('data/matrix.csv', query=Query(pick_fields=[2, 3])))\n-print(extract('data/matrix.csv', query=Query(skip_fields=[1, 4])))\n-print(extract('data/matrix.csv', query=Query(pick_fields=['f2', 'f3'])))\n-print(extract('data/matrix.csv', query=Query(skip_fields=['f1', 'f4'])))\n-print(extract('data/matrix.csv', query=Query(pick_fields=['<regex>f[23]'])))\n-print(extract('data/matrix.csv', query=Query(skip_fields=['<regex>f[14]'])))\n-```\n-\n- [Row([('f2', 12), ('f3', 13)]), Row([('f2', 22), ('f3', 23)]), Row([('f2', 32), ('f3', 33)]), Row([('f2', 42), ('f3', 43)])]\n- [Row([('f2', 12), ('f3', 13)]), Row([('f2', 22), ('f3', 23)]), Row([('f2', 32), ('f3', 33)]), Row([('f2', 42), ('f3', 43)])]\n- [Row([('f2', 12), ('f3', 13)]), Row([('f2', 22), ('f3', 23)]), Row([('f2', 32), ('f3', 33)]), Row([('f2', 42), ('f3', 43)])]\n- [Row([('f2', 12), ('f3', 13)]), Row([('f2', 22), ('f3', 23)]), Row([('f2', 32), ('f3', 33)]), Row([('f2', 42), ('f3', 43)])]\n- [Row([('f2', 12), ('f3', 13)]), Row([('f2', 22), ('f3', 23)]), Row([('f2', 32), ('f3', 33)]), Row([('f2', 42), ('f3', 43)])]\n- [Row([('f2', 12), ('f3', 13)]), Row([('f2', 22), ('f3', 23)]), Row([('f2', 32), ('f3', 33)]), Row([('f2', 42), ('f3', 43)])]\n-\n-\n-### Limit/Offset Fields\n-\n-There are two options that provide an ability to limit amount of fields similar to SQL's directives:\n-\n-\n-```python\n-from frictionless import extract, Query\n-\n-print(extract('data/matrix.csv', query=Query(limit_fields=2)))\n-print(extract('data/matrix.csv', query=Query(offset_fields=2)))\n-```\n-\n- [Row([('f1', 11), ('f2', 12)]), Row([('f1', 21), ('f2', 22)]), Row([('f1', 31), ('f2', 32)]), Row([('f1', 41), ('f2', 42)])]\n- [Row([('f3', 13), ('f4', 14)]), Row([('f3', 23), ('f4', 24)]), Row([('f3', 33), ('f4', 34)]), Row([('f3', 43), ('f4', 44)])]\n-\n-\n-### Pick/Skip Rows\n-\n-It's alike the field counterparts but it will be compared to the first cell of a row. All the queries below do the same thing for this file but take into account that when picking we need to also pick a header row. In addition, there is special value `<blank>` that matches a row if it's completely blank:\n-\n-\n-```python\n-from frictionless import extract, Query\n-\n-print(extract('data/matrix.csv', query=Query(pick_rows=[1, 3, 4])))\n-print(extract('data/matrix.csv', query=Query(skip_rows=[2, 5])))\n-print(extract('data/matrix.csv', query=Query(pick_rows=['f1', '21', '31'])))\n-print(extract('data/matrix.csv', query=Query(skip_rows=['11', '41'])))\n-print(extract('data/matrix.csv', query=Query(pick_rows=['<regex>(f1|[23]1)'])))\n-print(extract('data/matrix.csv', query=Query(skip_rows=['<regex>[14]1'])))\n-print(extract('data/matrix.csv', query=Query(pick_rows=['<blank>'])))\n-```\n-\n- [Row([('f1', 21), ('f2', 22), ('f3', 23), ('f4', 24)]), Row([('f1', 31), ('f2', 32), ('f3', 33), ('f4', 34)])]\n- [Row([('f1', 21), ('f2', 22), ('f3', 23), ('f4', 24)]), Row([('f1', 31), ('f2', 32), ('f3', 33), ('f4', 34)])]\n- [Row([('f1', 21), ('f2', 22), ('f3', 23), ('f4', 24)]), Row([('f1', 31), ('f2', 32), ('f3', 33), ('f4', 34)])]\n- [Row([('f1', 21), ('f2', 22), ('f3', 23), ('f4', 24)]), Row([('f1', 31), ('f2', 32), ('f3', 33), ('f4', 34)])]\n- [Row([('f1', 21), ('f2', 22), ('f3', 23), ('f4', 24)]), Row([('f1', 31), ('f2', 32), ('f3', 33), ('f4', 34)])]\n- [Row([('f1', 21), ('f2', 22), ('f3', 23), ('f4', 24)]), Row([('f1', 31), ('f2', 32), ('f3', 33), ('f4', 34)])]\n- []\n-\n-\n-### Limit/Offset Rows\n-\n-This is a quite popular option used to limit amount of rows to read:\n-\n-\n-```python\n-from frictionless import extract, Query\n-\n-print(extract('data/matrix.csv', query=Query(limit_rows=2)))\n-print(extract('data/matrix.csv', query=Query(offset_rows=2)))\n-```\n-\n- [Row([('f1', 11), ('f2', 12), ('f3', 13), ('f4', 14)]), Row([('f1', 21), ('f2', 22), ('f3', 23), ('f4', 24)])]\n- [Row([('f1', 31), ('f2', 32), ('f3', 33), ('f4', 34)]), Row([('f1', 41), ('f2', 42), ('f3', 43), ('f4', 44)])]\n-\n-\n-## Header Options\n-\n-Header management is a responsibility of \"Table Dialect\" which will be described below but Table accept a special `headers` argument that plays a role of a high-level helper in setting different header options.\n-\n-It accepts a `False` values indicating that there is no header row:\n-\n-\n-```python\n-from frictionless import Table\n-\n-with Table('data/capital-3.csv', headers=False) as table:\n- pprint(table.header)\n- pprint(table.read_rows())\n-```\n-\n- []\n- [Row([('field1', 'id'), ('field2', 'name')]),\n- Row([('field1', '1'), ('field2', 'London')]),\n- Row([('field1', '2'), ('field2', 'Berlin')]),\n- Row([('field1', '3'), ('field2', 'Paris')]),\n- Row([('field1', '4'), ('field2', 'Madrid')]),\n- Row([('field1', '5'), ('field2', 'Rome')])]\n-\n-\n-It accepts an integer indicating the header row number:\n-\n-\n-```python\n-from frictionless import Table\n-\n-with Table('data/capital-3.csv', headers=2) as table:\n- pprint(table.header)\n- pprint(table.read_rows())\n-```\n-\n- ['1', 'London']\n- [Row([('1', 2), ('London', 'Berlin')]),\n- Row([('1', 3), ('London', 'Paris')]),\n- Row([('1', 4), ('London', 'Madrid')]),\n- Row([('1', 5), ('London', 'Rome')])]\n-\n-\n-It accepts a list of integers indicating a multiline header row numbers:\n-\n-\n-```python\n-from frictionless import Table\n-\n-with Table('data/capital-3.csv', headers=[1,2,3]) as table:\n- pprint(table.header)\n- pprint(table.read_rows())\n-```\n-\n- ['id 1 2', 'name London Berlin']\n- [Row([('id 1 2', 3), ('name London Berlin', 'Paris')]),\n- Row([('id 1 2', 4), ('name London Berlin', 'Madrid')]),\n- Row([('id 1 2', 5), ('name London Berlin', 'Rome')])]\n-\n-\n-It accepts a pair containing a list of integers indicating a multiline header row numbers and a string indicating a joiner for a concatenate operation:\n-\n-\n-```python\n-from frictionless import Table\n-\n-with Table('data/capital-3.csv', headers=[[1,2,3], '/']) as table:\n- pprint(table.header)\n- pprint(table.read_rows())\n-```\n-\n- ['id/1/2', 'name/London/Berlin']\n- [Row([('id/1/2', 3), ('name/London/Berlin', 'Paris')]),\n- Row([('id/1/2', 4), ('name/London/Berlin', 'Madrid')]),\n- Row([('id/1/2', 5), ('name/London/Berlin', 'Rome')])]\n-\n-\n-## Integrity Options\n+## Resource Options\nExtraction function and classes accepts a few options that are needed to manage integrity behaviour:\n@@ -590,25 +308,3 @@ except Exception as exception:\n```\n[type-error] The cell \"1\" in row at position \"2\" and field \"name\" at position \"1\" has incompatible type: type is \"string/default\"\n-\n-\n-### Lookup\n-\n-The lookup is a special object providing relational information in cases when it's not possible to extract. For example, the Package is capable of getting a lookup object from its resource while a table object needs it to be provided. Let's see an example:\n-\n-\n-```python\n-from frictionless import Table\n-\n-source = [[\"name\"], [1], [2], [4]]\n-lookup = {\"other\": {(\"name\",): {(1,), (2,), (3,)}}}\n-fk = {\"fields\": [\"name\"], \"reference\": {\"fields\": [\"name\"], \"resource\": \"other\"}}\n-with Table(source, lookup=lookup, patch_schema={\"foreignKeys\": [fk]}) as table:\n- for row in table:\n- if row.row_number == 3:\n- assert row.valid is False\n- assert row.errors[0].code == \"foreign-key-error\"\n- continue\n- assert row.valid\n-\n-```\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Added Layout Guide (#699) |
234,912 | 17.02.2021 17:50:43 | -10,800 | 8b21797beb36c28f1c601b222d15094b12f92e58 | Added Field Guide | [
{
"change_type": "MODIFY",
"old_path": "docs/guides/framework/field-guide.md",
"new_path": "docs/guides/framework/field-guide.md",
"diff": "title: Field Guide\n---\n-> This guide in under development. We are moving some shared Field information from describe, extract, validate, and transform guides to this guide.\n+Field is a lower level object that helps describe and convert tabular data.\n+\n+## Creating Field\n+\n+Let's create a field:\n+\n+```python title=\"Python\"\n+from frictionless import Field\n+\n+field = Field(name='name', type='integer')\n+```\n+\n+Usually we work with fields which were already created by a schema:\n+\n+```python title=\"Python\"\n+from frictionless import describe\n+\n+resource = describe('data/table.csv')\n+field = schema.get_fied('id')\n+```\n+\n+## Field Types\n+\n+Frictionless Framework supports all the [Table Schema Spec](https://specs.frictionlessdata.io/table-schema/#types-and-formats) field types along with an ability to create custom types.\n+\n+For some types there are additional properties available:\n+\n+```python title=\"Python\"\n+from frictionless import describe\n+\n+resource = describe('data/table.csv')\n+field = schema.get_fied('id') # it's an integer\n+field.bare_number\n+```\n+\n+## Reading Cell\n+\n+During the process of data reading a schema uses a field internally. If needed a user can convert their data using this interface:\n+\n+```python title=\"Python\"\n+from frictionless import Field\n+\n+field = Field(name='name', type='integer')\n+field.read_cell('3') # 3\n+```\n+\n+## Writing Cell\n+\n+During the process of data writing a schema uses a field internally. The same as with reasing a user can convert their data using this interface:\n+\n+```python title=\"Python\"\n+from frictionless import Field\n+\n+field = Field(name='name', type='integer')\n+field.write_cell(3) # '3'\n+```\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/guides/framework/layout-guide.md",
"new_path": "docs/guides/framework/layout-guide.md",
"diff": "@@ -5,7 +5,7 @@ title: Layout Guide\nThe Layout concept give us an ability to manage table header and pick/skip arbitrary fields and rows from the raw data stream.\n```bash title=\"CLI\"\n-$ cat data/matrix.csv\n+cat data/matrix.csv\n```\n```csv\nf1,f2,f3,f4\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Added Field Guide (#700) |
234,912 | 18.02.2021 11:43:37 | -10,800 | 819c301c13a6867ba6f98f7babb7b296f97b3b34 | Added Schema Guide | [
{
"change_type": "MODIFY",
"old_path": "docs/guides/framework/field-guide.md",
"new_path": "docs/guides/framework/field-guide.md",
"diff": "@@ -33,7 +33,7 @@ For some types there are additional properties available:\nfrom frictionless import describe\nresource = describe('data/table.csv')\n-field = schema.get_fied('id') # it's an integer\n+field = resource.schema.get_fied('id') # it's an integer\nfield.bare_number\n```\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/guides/framework/package-guide.md",
"new_path": "docs/guides/framework/package-guide.md",
"diff": "title: Package Guide\n---\n-The Data Package is a core Frictionless Data concept meaning a set of resources with additional metadata provided. You can read [Package Spec](https://specs.frictionlessdata.io/data-package/) for more information.\n+The Data Package is a core Frictionless Data concept meaning a set of resources with additional metadata provided. You can read [Data Package Spec](https://specs.frictionlessdata.io/data-package/) for more information.\n## Creating Package\n@@ -43,7 +43,7 @@ package = Package(\n)\n```\n-If you have created a package from a descriptor you can access this properties:\n+If you have created a package, for example, from a descriptor you can access this properties:\n```python title=\"Python\"\nfrom frictionless import Package\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/guides/framework/schema-guide.md",
"new_path": "docs/guides/framework/schema-guide.md",
"diff": "title: Schema Guide\n---\n-> This guide in under development. We are moving some shared Schema information from describe, extract, validate, and transform guides to this guide.\n+The Table Schema is a core Frictionless Data concept meaning a metadata information regarding tabular data source. You can read [Table Schema Spec](https://specs.frictionlessdata.io/table-schema/) for more information.\n-## Schema Options\n+## Creating Schema\n-By default, a schema for a table is inferred under the hood but we can also pass it explicitly.\n+Let's create a table schema:\n-### Schema\n+```python title=\"Python\"\n+from frictionless import Schema, describe\n-The most common way is providing a schema argument to the Table constructor. For example, let's make the `id` field be a string instead of an integer:\n+schema = describe('data/table.csv', type='schema') # from a resource path\n+schema = Schema('data/schema.json') # from a descriptor path\n+schema = Schema({'fields': {'name': 'id', 'type': 'integer'}}) # from a descriptor\n+```\n+\n+As you can see it's possible to create a schema providing different kinds of sources which will be detector to have some type automatically (e.g. whether it's a dict or a path). It's possible to make this step more explicit:\n+\n+```python title=\"Python\"\n+from frictionless import Schema, Field\n+\n+schema = Schema(fields=[Field(name='id', type='string')]) # from fields\n+schema = Schema(descriptor='data/schema.json') # from a descriptor\n+```\n+\n+## Describing Schema\n+\n+The specs support some additional schema's metadata:\n+\n+```python title=\"Python\"\n+from frictionless import Schema, Resource\n+\n+package = Schema(\n+ fields=[Field(name='id', type='string')],\n+ missing_values=['na'],\n+ primary_key=['id'],\n+ # foreign_keys\n+)\n+```\n+\n+If you have created a schema, for example, from a descriptor you can access this properties:\n+\n+```python title=\"Python\"\n+from frictionless import Schema\n+\n+schema = Schema('data/schema.json')\n+schema.missing_values\n+schema.primary_key\n+# and others\n+```\n+\n+And edit them:\n+\n+```python title=\"Python\"\n+from frictionless import Schema\n+schema = Schema('data/schema.json')\n+schema.missing_values.append('-')\n+# and others\n+```\n+\n+## Field Management\n+\n+The Schema class provides useful methods to manage fields:\n+\n+\n+```python title=\"Python\"\n+from frictionless import Schema, Field\n+\n+schema = Schema('data/schema.json')\n+print(schema.fields)\n+print(schema.field_names)\n+schema.add_field(Field(name='name', type='string'))\n+field = schema.get_field('name')\n+print(schema.has_field('name'))\n+schema.remove_field('name')\n+```\n+\n+## Saving Descriptor\n-```python\n-from frictionless import Table, Schema, Field\n+As any of the Metadata classes the Schema class can be saved as JSON or YAML:\n-schema = Schema(fields=[Field(name='id', type='string'), Field(name='name', type='string')])\n-with Table('data/capital-3.csv', schema=schema) as table:\n- pprint(table.schema)\n- pprint(table.read_rows())\n+```python title=\"Python\"\n+from frictionless import Schema\n+schema = Schema(field=[Field(name='id', type='integer')])\n+schema.to_json('schema.json') # Save as JSON\n+schema.to_yaml('schema.yaml') # Save as YAML\n```\n- {'fields': [{'name': 'id', 'type': 'string'},\n- {'name': 'name', 'type': 'string'}]}\n- [Row([('id', '1'), ('name', 'London')]),\n- Row([('id', '2'), ('name', 'Berlin')]),\n- Row([('id', '3'), ('name', 'Paris')]),\n- Row([('id', '4'), ('name', 'Madrid')]),\n- Row([('id', '5'), ('name', 'Rome')])]\n+## Reading Cells\n+During the process of data reading a resource useses a schema to convert data:\n+\n+```python title=\"Python\"\n+from frictionless import Schema, Field\n+\n+schema = Schema(fields=[Field(type='integer'), Field(type='string')])\n+schema.read_cells(['3', 'value']) # [3, 'value']\n+```\n+\n+## Writing Cells\n+\n+During the process of data writing a resource useses a schema to convert data:\n+\n+```python title=\"Python\"\n+from frictionless import Schema, Field\n+\n+schema = Schema(fields=[Field(type='integer'), Field(type='string')])\n+schema.write_cells([3, 'value']) # ['3', 'value']\n+schema.write_cells([3, 'value'], types=['string']) # [3, 'value']\n+```\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Added Schema Guide (#702) |
234,912 | 18.02.2021 12:19:31 | -10,800 | 1674effe39ee0a473cfbc65e858e1c18a4fd96c0 | Added Resource Guide | [
{
"change_type": "MODIFY",
"old_path": "docs/guides/framework/package-guide.md",
"new_path": "docs/guides/framework/package-guide.md",
"diff": "@@ -9,13 +9,14 @@ The Data Package is a core Frictionless Data concept meaning a set of resources\nLet's create a data package:\n```python title=\"Python\"\n-from frictionless import Package\n+from frictionless import Package, Resource\npackage = Package('data/table.csv') # from a resource path\npackage = Package('data/tables/*') # from a resources glob\npackage = Package(['data/tables/chunk1.csv', 'data/tables/chunk2.csv]') # from a list\npackage = Package('data/package/datapackage.json') # from a descriptor path\npackage = Package({'resources': {'path': 'data/table.csv'}}) # from a descriptor\n+package = Package(resources=[Resource(path='data/table.csv')]) # from arguments\n```\nAs you can see it's possible to create a package providing different kinds of sources which will be detector to have some type automatically (e.g. whether it's a glob or a path). It's possible to make this step more explicit:\n@@ -61,7 +62,7 @@ And edit them:\nfrom frictionless import Package\npackage = Package('some/datapackage.json')\n-package.name = 'new name'\n+package.name = 'new-name'\npackage.title = 'New Title'\npackage.description = 'New Description'\n# and others\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Added Resource Guide (#703) |
234,912 | 18.02.2021 15:24:05 | -10,800 | cfe9ce135cd2329fbf80165960091fc5f76c0c9a | Added Inquiry/Report guide drafts | [
{
"change_type": "MODIFY",
"old_path": "docs/guides/framework/inquiry-guide.md",
"new_path": "docs/guides/framework/inquiry-guide.md",
"diff": "title: Inquiry Guide\n---\n-> This guide in under development. We are moving some shared Inquiry information from describe, extract, validate, and transform guides to this guide.\n+The Inquiry gives you an ability to create arbitrary validation jobs containing a set of individual validation tasks.\n+\n+## Creating Inquiry\n+\n+Let's create an inquiry that includes an individual file validation and a resource validation:\n+\n+```python title=\"Python\"\n+from frictionless import Inquiry\n+\n+inquiry = Inquiry({'tasks': [\n+ {'source': 'data/capital-valid.csv'},\n+ {'source': 'tmp/capital.resource.json', 'basepath': '.'},\n+]})\n+inquiry.to_yaml('tmp/capital.inquiry.yaml')\n+```\n+\n+## Validating Inquiry\n+\n+Tasks in the Inquiry accept the same arguments written in camelCase as the corresponding `validate` functions have. As usual, let' run validation:\n+\n+```bash title=\"CLI\"\n+frictionless validate tmp/capital.inquiry.yaml\n+```\n+```\n+---\n+valid: data/capital-valid.csv\n+---\n+---\n+invalid: ./data/capital-invalid.csv\n+---\n+\n+==== ===== ================ ====================================================================================================================\n+row field code message\n+==== ===== ================ ====================================================================================================================\n+None 3 duplicate-header Header \"name\" in field at position \"3\" is duplicated to header in another field: at position \"2\"\n+ 10 3 missing-cell Row at position \"10\" has a missing cell in field \"name2\" at position \"3\"\n+ 11 None blank-row Row at position \"11\" is completely blank\n+ 12 4 extra-cell Row at position \"12\" has an extra value in field at position \"4\"\n+ 12 1 type-error The cell \"x\" in row at position \"12\" and field \"id\" at position \"1\" has incompatible type: type is \"integer/default\"\n+==== ===== ================ ====================================================================================================================\n+```\n+\n+At first sight, it's no clear why such a construct exists but when your validation workflow gets complex, the Inquiry can provide a lot of flexibility and power. Last but not least, the Inquiry will use multiprocessing if there are more than 1 task provided.\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/guides/framework/report-guide.md",
"new_path": "docs/guides/framework/report-guide.md",
"diff": "title: Report Guide\n---\n-> This guide in under development. We are moving some shared Report information from describe, extract, validate, and transform guides to this guide.\n+## Validation Report\n+\n+All the `validate` functions return the Validation Report. It's an unified object containing information about a validation: source details, found error, etc. Let's explore a report:\n+\n+```python title=\"Python\"\n+from pprint import pprint\n+from frictionless import validate\n+\n+report = validate('data/capital-invalid.csv', pick_errors=['duplicate-header'])\n+pprint(report)\n+```\n+```\n+{'errors': [],\n+ 'stats': {'errors': 1, 'tables': 1},\n+ 'tables': [{'compression': 'no',\n+ 'compressionPath': '',\n+ 'dialect': {},\n+ 'encoding': 'utf-8',\n+ 'errors': [{'cell': 'name',\n+ 'cells': ['id', 'name', 'name'],\n+ 'code': 'duplicate-header',\n+ 'description': 'Two columns in the header row have '\n+ 'the same value. Column names should '\n+ 'be unique.',\n+ 'fieldName': 'name2',\n+ 'fieldNumber': 3,\n+ 'fieldPosition': 3,\n+ 'message': 'Header \"name\" in field at position \"3\" is '\n+ 'duplicated to header in another field: at '\n+ 'position \"2\"',\n+ 'name': 'Duplicate Header',\n+ 'note': 'at position \"2\"',\n+ 'tags': ['#head', '#structure']}],\n+ 'format': 'csv',\n+ 'hashing': 'md5',\n+ 'header': ['id', 'name', 'name'],\n+ 'partial': False,\n+ 'path': 'data/capital-invalid.csv',\n+ 'query': {},\n+ 'schema': {'fields': [{'name': 'id', 'type': 'integer'},\n+ {'name': 'name', 'type': 'string'},\n+ {'name': 'name2', 'type': 'string'}]},\n+ 'scheme': 'file',\n+ 'scope': ['duplicate-header'],\n+ 'stats': {'bytes': 171,\n+ 'errors': 1,\n+ 'fields': 3,\n+ 'hash': 'dcdeae358cfd50860c18d953e021f836',\n+ 'rows': 11},\n+ 'time': 0.019,\n+ 'valid': False}],\n+ 'time': 0.019,\n+ 'valid': False,\n+ 'version': '3.38.1'}\n+```\n+\n+As we can see, there are a lot of information; you can find its details description in \"API Reference\". Errors are grouped by tables; for some validation there are can be dozens of tables. Let's use the `report.flatten` function to simplify errors representation:\n+\n+```python title=\"Python\"\n+from frictionless import validate\n+\n+report = validate('data/capital-invalid.csv', pick_errors=['duplicate-header'])\n+pprint(report.flatten(['rowPosition', 'fieldPosition', 'code', 'message']))\n+```\n+```\n+[[None,\n+ 3,\n+ 'duplicate-header',\n+ 'Header \"name\" in field at position \"3\" is duplicated to header in another '\n+ 'field: at position \"2\"']]\n+```\n+\n+In some situation, an error can't be associated with a table; then it goes to the top-level `report.errors` property:\n+\n+```python title=\"Python\"\n+from frictionless import validate_schema\n+\n+report = validate_schema('bad.json')\n+pprint(report)\n+```\n+```\n+{'errors': [{'code': 'schema-error',\n+ 'description': 'Provided schema is not valid.',\n+ 'message': 'The data source could not be successfully described '\n+ 'by the invalid Table Schema: cannot extract metadata '\n+ '\"bad.json\" because \"[Errno 2] No such file or '\n+ 'directory: \\'bad.json\\'\"',\n+ 'name': 'Schema Error',\n+ 'note': 'cannot extract metadata \"bad.json\" because \"[Errno 2] No '\n+ 'such file or directory: \\'bad.json\\'\"',\n+ 'tags': ['#table', '#schema']}],\n+ 'stats': {'errors': 1, 'tables': 0},\n+ 'tables': [],\n+ 'time': 0.0,\n+ 'valid': False,\n+ 'version': '3.38.1'}\n+```\n+\n+## Validation Errors\n+\n+The Error object is at the heart of the validation process. The Report has `report.errors` and `report.tables[].errors` properties that can contain the Error object. Let's explore it:\n+\n+```python title=\"Python\"\n+from frictionless import validate\n+\n+report = validate('data/capital-invalid.csv', pick_errors=['duplicate-header'])\n+error = report.table.error # it's only available for 1 table / 1 error sitution\n+print(f'Code: \"{error.code}\"')\n+print(f'Name: \"{error.name}\"')\n+print(f'Tags: \"{error.tags}\"')\n+print(f'Note: \"{error.note}\"')\n+print(f'Message: \"{error.message}\"')\n+print(f'Description: \"{error.description}\"')\n+```\n+```\n+Code: \"duplicate-header\"\n+Name: \"Duplicate Header\"\n+Tags: \"['#head', '#structure']\"\n+Note: \"at position \"2\"\"\n+Message: \"Header \"name\" in field at position \"3\" is duplicated to header in another field: at position \"2\"\"\n+Description: \"Two columns in the header row have the same value. Column names should be unique.\"\n+```\n+\n+Above, we have listed universal error properties. Depending on the type of an error there can be additional ones. For example, for our `duplicate-header` error:\n+\n+\n+```python title=\"Python\"\n+from frictionless import validate\n+\n+report = validate('data/capital-invalid.csv', pick_errors=['duplicate-header'])\n+error = report.table.error # it's only available for 1 table / 1 error sitution\n+pprint(error)\n+```\n+```\n+{'cell': 'name',\n+ 'cells': ['id', 'name', 'name'],\n+ 'code': 'duplicate-header',\n+ 'description': 'Two columns in the header row have the same value. Column '\n+ 'names should be unique.',\n+ 'fieldName': 'name2',\n+ 'fieldNumber': 3,\n+ 'fieldPosition': 3,\n+ 'message': 'Header \"name\" in field at position \"3\" is duplicated to header in '\n+ 'another field: at position \"2\"',\n+ 'name': 'Duplicate Header',\n+ 'note': 'at position \"2\"',\n+ 'tags': ['#head', '#structure']}\n+```\n+\n+Please explore \"Errors Reference\" to learn about all the available errors and their properties.\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Added Inquiry/Report guide drafts (#704) |
234,912 | 18.02.2021 15:43:06 | -10,800 | a143e5883561661420642b9535274fe0f5c46d98 | Added Pipeline/Status guide drafts | [
{
"change_type": "MODIFY",
"old_path": "docs/guides/framework/pipeline-guide.md",
"new_path": "docs/guides/framework/pipeline-guide.md",
"diff": "title: Pipeline Guide\n---\n-> This guide in under development. We are moving some shared Pipeline information from describe, extract, validate, and transform guides to this guide.\n+A pipeline is a metadata object having one of these types:\n+- resource\n+- package\n+- others (depending on custom plugins you use)\n+\n+## Creating Pipeline\n+\n+For resource and package types it's basically the same functionality as we have seen above but written declaratively. So let's just run the same resource transformation as we did in the `Tranforming Resource` section:\n+\n+```python title=\"Python\"\n+from pprint import pprint\n+from frictionless import Pipeline, transform, steps\n+\n+pipeline = Pipeline({\n+ 'type': 'resource',\n+ 'source': {'path': 'data/transform.csv'},\n+ 'steps': [\n+ {'code': 'table-normalize'},\n+ {'code': 'table-melt', field_name: 'name'}\n+ ]\n+})\n+```\n+\n+## Validating Pipeline\n+\n+Let's run this pipeline:\n+\n+```python title=\"Python\"\n+status = transform(pipeline)\n+pprint(status.task.target.schema)\n+pprint(status.task.target.read_rows())\n+```\n+```\n+{'fields': [{'name': 'name', 'type': 'string'},\n+ {'name': 'variable'},\n+ {'name': 'value'}]}\n+[Row([('name', 'germany'), ('variable', 'id'), ('value', 1)]),\n+ Row([('name', 'germany'), ('variable', 'population'), ('value', 83)]),\n+ Row([('name', 'france'), ('variable', 'id'), ('value', 2)]),\n+ Row([('name', 'france'), ('variable', 'population'), ('value', 66)]),\n+ Row([('name', 'spain'), ('variable', 'id'), ('value', 3)]),\n+ Row([('name', 'spain'), ('variable', 'population'), ('value', 47)])]\n+```\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/guides/framework/status-guide.md",
"new_path": "docs/guides/framework/status-guide.md",
"diff": "title: Status Guide\n---\n-> This guide in under development. We are moving some shared Status information from describe, extract, validate, and transform guides to this guide.\n+The Status class instance is a result of a Pipeline execution.\n+\n+## Getting Status\n+\n+We need to run a pipeline to get a status:\n+\n+```python title=\"Python\"\n+from pprint import pprint\n+from frictionless import Pipeline, transform, steps\n+\n+pipeline = Pipeline({\n+ 'type': 'resource',\n+ 'source': {'path': 'data/transform.csv'},\n+ 'steps': [\n+ {'code': 'table-normalize'},\n+ {'code': 'table-melt', field_name: 'name'}\n+ ]\n+})\n+status = transform(pipeline)\n+```\n+\n+## Exploring Status\n+\n+Let's explore the execution status:\n+\n+```python title=\"Python\"\n+pprint(status.valid)\n+pprint(status.task.target.schema)\n+pprint(status.task.target.read_rows())\n+```\n+```\n+True\n+{'fields': [{'name': 'name', 'type': 'string'},\n+ {'name': 'variable'},\n+ {'name': 'value'}]}\n+[Row([('name', 'germany'), ('variable', 'id'), ('value', 1)]),\n+ Row([('name', 'germany'), ('variable', 'population'), ('value', 83)]),\n+ Row([('name', 'france'), ('variable', 'id'), ('value', 2)]),\n+ Row([('name', 'france'), ('variable', 'population'), ('value', 66)]),\n+ Row([('name', 'spain'), ('variable', 'id'), ('value', 3)]),\n+ Row([('name', 'spain'), ('variable', 'population'), ('value', 47)])]\n+```\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/guides/transform-guide.md",
"new_path": "docs/guides/transform-guide.md",
"diff": "@@ -113,7 +113,6 @@ The exact transformation we have applied actually doesn't make any sense as we j\nA pipeline is a metadata object having one of these types:\n- resource\n- package\n-- dataflows\n- others (depending on custom plugins you use)\nFor resource and package types it's basically the same functionality as we have seen above but written declaratively. So let's just run the same resource transformation as we did in the `Tranforming Resource` section:\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Added Pipeline/Status guide drafts (#705) |
234,928 | 18.02.2021 21:31:21 | 0 | 2b6bb83c8a130eb48568ae3b9d395d88f8f12c8d | Further suggestions for edits | [
{
"change_type": "MODIFY",
"old_path": "docs/guides/extracting-data.md",
"new_path": "docs/guides/extracting-data.md",
"diff": "@@ -74,10 +74,10 @@ pprint(rows)\nThe high-level interface for extracting data provided by Frictionless is a set of `extract` functions:\n- `extract`: detects the source type and extracts data accordingly\n-- `extract_package`: accepts a package descriptor and returns a map of the package's tables\n- `extract_resource`: accepts a resource descriptor and returns a data table\n+- `extract_package`: accepts a package descriptor and returns a map of the package's tables\n-On the command-line, there is only one command (`extract`) but there is a flag to adjust the behavior:\n+On the command-line, the command would be used as follows:\n```bash\n$ frictionless extract\n@@ -85,7 +85,7 @@ $ frictionless extract --type resource\n$ frictionless extract --type package\n```\n-The `extract` functions always read data in a form of rows (see the object description below) into memory. The lower-level interfaces will allow you to stream data and various output forms.\n+The `extract` functions always read data in the form of rows, into memory. The lower-level interfaces will allow you to stream data and various output forms.\n## Extracting a Resource\n@@ -106,14 +106,16 @@ pprint(rows)\nRow([('id', 5), ('name', 'Rome')])]\n```\n-In many cases, the code above doesn't really make sense as we can just provide a path to the high-level `extract` function. Instead, let's use the `extract_resource` function to extract the resource from a descriptor. The power of the descriptor is that it can contain different metadata and be stored on the disc. First let's create the descriptor:\n+Using the `extract_resource` function though, we can extract the resource from a descriptor. The power of the descriptor is that it can contain different metadata and be stored on the disc.\n+\n+First let's create the descriptor:\n```python\nfrom frictionless import Resource\nresource = Resource('data/capital-3.csv')\nresource.infer()\n-resource.schema.missing_values.append('3') # set 3 as a missing value\n+resource.schema.missing_values.append('3') # set 3 as a missing value for the purposes of this example\nresource.to_yaml('tmp/capital.resource.yaml')\n```\nThis description can then be used to extract the resource:\n@@ -145,11 +147,11 @@ None Paris\n==== ======\n```\n-So what has happened? We set the textual representation of the number \"3\" to be a missing value. It was done only for explanation purposes because it's definitely not a missing value. On the other hand, it demonstrated how metadata can be used.\n+So what has happened? We set the textual representation of the number \"3\" to be a missing value. It was done only for explanation purposes because it's definitely not a missing value. On the other hand, it demonstrated how metadata can be used. In the output we can see how the id number 3 now appears as \"None\" representing a missing value.\n## Extracting a Package\n-Let's start by using the command-line interface. We're going to provide two files to the `extract` command which will be enough to detect that it's a dataset:\n+We're going to provide two files to the `extract` command which will be enough to detect that it's a dataset. Let's start by using the command-line interface:\n```bash\n$ frictionless extract data/*-3.csv\n@@ -203,6 +205,7 @@ for path, rows in data.items():\nRow([('id', 3), ('capital_id', 2), ('name', 'Germany'), ('population', 83)]),\nRow([('id', 4), ('capital_id', 5), ('name', 'Italy'), ('population', 60)]),\nRow([('id', 5), ('capital_id', 4), ('name', 'Spain'), ('population', 47)])]\n+\n'data/capital-3.csv'\n[Row([('id', 1), ('name', 'London')]),\nRow([('id', 2), ('name', 'Berlin')]),\n@@ -213,6 +216,8 @@ for path, rows in data.items():\nWe can also extract the package from a descriptor using the `extract_package` function:\n```python\n+from frictionless import extract_package\n+\npackage = extract_package('tmp/country.package.yaml')\npprint(package)\n@@ -287,6 +292,7 @@ The Package class provides functions to read the contents of a package. First of\n```bash\n$ frictionless describe data/*-3.csv --json > tmp/country.package.json\n```\n+Note that --json is used here to output the descriptor in JSON format. Without this, the default output is in YAML format as we saw above.\nNow, we can open the descriptor and read the package's resources:\n@@ -315,7 +321,7 @@ The package by itself doesn't provide any read functions directly because that i\n## Header Class\n-After opening a resource you get access to a `resource.header` object. This is a list of normalized labels but also provides some additional functionality. Let's take a look:\n+After opening a resource you get access to a `resource.header` object which describes the resource in more detail. This is a list of normalized labels but also provides some additional functionality. Let's take a look:\n```python\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Further suggestions for edits |
234,904 | 24.02.2021 04:36:10 | 10,800 | 8eeb2e072384422db87df14a016738b36dc472aa | Minor improvements to the extraction guide | [
{
"change_type": "MODIFY",
"old_path": "docs/guides/extracting-data.md",
"new_path": "docs/guides/extracting-data.md",
"diff": "@@ -111,7 +111,7 @@ from frictionless import Resource\nresource = Resource('data/capital-3.csv')\nresource.infer()\n-resource.schema.missing_values.append('3')\n+resource.schema.missing_values.append('3') # will interpret 3 as a missing value\nresource.to_yaml('tmp/capital.resource.yaml')\n```\n@@ -134,7 +134,7 @@ None Paris\n==== ======\n```\n-So what's happened? We set the textual representation of the number \"3\" to be a missing value. It was done only for presentational purposes because it's definitely not a missing value. On the other hand, it demonstrated how metadata can be used.\n+So what's happened? We set the textual representation of the number \"3\" to be a missing value. It was done only for illustrational purposes because it's definitely not a missing value. On the other hand, it demonstrated how metadata can be used.\n## Extracting Package\n@@ -200,7 +200,7 @@ for path, rows in data.items():\n## Resource Class\n-The Resource class is also a metadata class which provides various read and stream functions. The `extract` functions always read rows into memory; Resource can do the same but it also gives a choice regarding output data. It can be `rows`, `data`, `text`, or `bytes`. Let's try reading all of them:\n+The Resource class is also a metadata class which provides various read and stream functions. The `extract` functions always reads rows into memory; Resource can do the same but it also gives a choice regarding output data. It can be `rows`, `data`, `text`, or `bytes`. Let's try reading all of them:\n```python title=\"Python\"\nfrom frictionless import Resource\n@@ -267,7 +267,7 @@ The Package class is a metadata class which provides an ability to read its cont\nfrictionless describe data/*-3.csv --json > tmp/country.package.json\n```\n-Now, we can open the created descriptor and read the package's resources:\n+We can also create a descriptor with the Package class in Python and read the package's resources:\n```python title=\"Python\"\nfrom frictionless import Package\n@@ -320,7 +320,7 @@ with Resource('data/capital-3.csv') as resource:\nAs List: ['id', 'name']\n-The example above covers the case when a header is valid. For a header with tabular errors this information can be much more useful revealing discrepancies, duplicates or missing cells information. Please read \"API Reference\" for more details.\n+The example above covers the case when a header is valid. For a header with tabular errors this information can be much more useful revealing discrepancies, duplicates or missing cells information. Please read \"[API Reference](/docs/references/api-reference/)\" for more details.\n## Row Class\n@@ -337,6 +337,7 @@ with Resource('data/capital-3.csv', detector=detector) as resource:\nprint(f'Fields: {row.fields}')\nprint(f'Field Names: {row.field_names}')\nprint(f'Field Positions: {row.field_positions}')\n+ print(f'Value of field \"name\": {row[\"name\"]}') # accessed as a dict\nprint(f'Row Position: {row.row_position}') # physical line number starting from 1\nprint(f'Row Number: {row.row_number}') # counted row number starting from 1\nprint(f'Blank Cells: {row.blank_cells}')\n@@ -349,10 +350,11 @@ with Resource('data/capital-3.csv', detector=detector) as resource:\n```\n```\nRow: Row([('id', None), ('name', 'London')])\n-Cells: ['1', 'londong']\n+Cells: ['1', 'London']\nFields: [{'name': 'id', 'type': 'integer'}, {'name': 'name', 'type': 'string'}]\nField Names: ['id', 'name']\nField Positions: [1, 2]\n+Value of field \"name\": London\nRow Position: 2\nRow Number: 1\nBlank Cells: {'id': '1'}\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Minor improvements to the extraction guide (#707) |
234,912 | 24.02.2021 12:18:07 | -10,800 | a093290a0a4677d5b7c6a5fc94a4bca8b06d579e | Improved the migration document | [
{
"change_type": "MODIFY",
"old_path": "docs/development/migration.md",
"new_path": "docs/development/migration.md",
"diff": "title: Migration\n---\n-> Frictionless Framework requires Python3.6+\n+Frictionless is a logical continuation of many existing packages created for Frictionless Data as though `datapackage` or `tableschema`. Although, most of these packages will be supported going forward, you can migrate to Frictionless, which is Python 3.6+, as it improves many aspects of working with data and metadata. This document also covers migration from one framework's version to another.\n-Frictionless is a logical continuation of many currently existing packages:\n-- goodtables\n-- datapackage\n-- tableschema\n-- tableschema-drivers\n-- tabulator\n+## From v3 to v4\n-Although, most of these packages will be supported going forward, you can migrate to Frictionless as it improves many aspects of working with data and metadata.\n+Version 3 of the Frictionless Framework was experimental/unstable and active for 6 months. We improved the framework dramatically during this period of time including various minor API changes and one major API change - `Table` class has been merged into the `Resource` class. Please read the updated documentation to migrate to version 4:\n+- [Guides Overview](../guides/guides-overview.md)\n+- [Tutorials Overview](../tutorials/tutorials-overview.md)\n+- [References Overview](../references/references-overview.md)\n-## From frictionless@3\n+## From dataflows\n-> This section in under development. Please consult with [CHANGELOG](../development/changelog.md) while it's being completed.\n-\n-### Table\n-\n-The main change in v4 is that `Table` is now merged into the `Resource` class. You can use `Resource` mostly the same way you used `Table` previously.\n-\n-```python\n-# Before\n-with Table('data/table.csv') as table:\n- # use table\n-# After\n-with Resource('data/table.csv') as resource:\n- # use resource\n-```\n+Frictionless Framework provides the `frictionless transform` function for data transformation. It can be used to migrate from `dataflows` or `datapackage-pipelines`:\n+- [Transform Guide](../guides/transform-guide.md)\n+- [Transform Steps](../guides/transform-steps.md)\n## From goodtables\n-Frictionless provides the `frictionless validate` function which is in high-level exactly the same as `goodtables validate`. Also `frictionless describe` is an improved version of `goodtables init`. You instead need to use the `frictionless` command instead of the `goodtables` command:\n-\n-```bash\n-# Before\n-$ goodtables validate table.csv\n-# After\n-$ frictionless validate table.csv\n-```\n-\n-The Python interface is also mostly identical:\n-\n-```python\n-# Before\n-report = goodtables.validate('table.csv')\n-# After\n-report = frictionless.validate('table.csv')\n-```\n-\n-Please read the following sections and use `frictionless validate --help` to learn what is the difference in the options and in report's properties.\n-\n-### Validate\n-\n-- a schema is inferred by default (use \"Infer Options\" and \"Schema Options\" to manage)\n-- `order_fields` was replaced by `sync_schema` (see \"Schema Options\")\n-- `checks` was replaced by `pick/skip_errors` and `extra_checks`\n-- `error_limit` was replaced by `limit_errors` (see \"Errors Options\")\n-- `row_limit` was replaced by `query` (see \"Table Query)\n-- `preset` was replaced by `source_type`\n-\n-### Report\n-\n-- all the properties now are camelCased instead of being lower-cased\n-- various error code changes (see \"Errors Reference\")\n-- errors now have both row position and row number\n-- `row-number` was replaced by `rowPosition`\n-- high-level `warnings` was replaced by `errors`\n+Frictionless Framework provides the `frictionless validate` function which is in high-level exactly the same as `goodtables validate`. Also `frictionless describe` is an improved version of `goodtables init`. You instead need to use the `frictionless` command instead of the `goodtables` command:\n+- [Validation Guide](../guides/validation-guide.md)\n+- [Validation Checks](../guides/validation-checks.md)\n## From datapackage\n-Frictionless has `Resource` and `Package` classes which is almost the same as `datapackage` has. There are a lot of improvements for working with metadata described in the \"Describing Data\" guide.\n-\n-```python\n-# Before\n-resource = datapackage.Resource('resource.json')\n-package = datapackage.Package('package.json')\n-# After\n-resource = frictionless.Resource('resource.json')\n-package = frictionless.Package('package.json')\n-```\n-\n-### Package\n-\n-- added YAML support\n-- the Package object is now a dict\n-- there is no `package.descriptor` anymore\n-- it's now possible to use keyword arguments in the constructor\n-- it's now possible to use attribute setters to update a package\n-- `package.save` is replaced by `package.to_json`\n-\n-### Resource\n-\n-- added YAML support\n-- the Resource object is now a dict\n-- there is no `resource.descriptor` anymore\n-- it's now possible to use keyword arguments in the constructor\n-- it's now possible to use attribute setters to update a resource\n-- `resource.save` is replaced by `**resource**.to_json`\n-- `resource.read` was replaced by `resource.read_data/rows`\n-- `resource.iter` was replaced by `resource.stream_data/rows`\n-- `resource.raw_read` was replaced by `resource.read_bytes`\n-- `resource.raw_iter` was replaced by `resource.stream_bytes`\n+Frictionless Framework has `Package` and `Resource` classes which is almost the same as `datapackage` has:\n+- [Package Guide](../guides/framework/package-guide.md)\n+- [Resource Guide](../guides/framework/resource-guide.md)\n## From tableschema\n-Frictionless has `Schema` and `Fields` classes which is almost the same as `tableschema` has. There are a lot of improvements for working with metadata described in the \"Describing Data\" guide.\n-\n-```python\n-# Before\n-schema = tableschema.Schema('schema.json')\n-field = tableschema.Field('field.json')\n-# After\n-schema = frictionless.Schema('schema.json')\n-field = frictionless.Field('field.json')\n-```\n-\n-### Schema\n-\n-- added YAML support\n-- the Package object is now a dict\n-- there is no `schema.descriptor` anymore\n-- it's now possible to use keyword arguments in the constructor\n-- it's now possible to use attribute setters to update a schema\n-- `schema.save` is replaced by `schema.to_json`\n-- `schema.cast_row` is replaced by `schema.read_data`\n-\n-### Field\n-\n-- added YAML support\n-- the Resource object is now a dict\n-- there is no `resource.descriptor` anymore\n-- it's now possible to use keyword arguments in the constructor\n-- it's now possible to use attribute setters to update a resource\n-- `field.save` is replaced by `field.to_json`\n-- `field.cast_value` is replaced by `field.read_cell`\n+Frictionless Framework has `Schema` and `Field` classes which is almost the same as `tableschema` has:\n+- [Schema Guide](../guides/framework/schema-guide.md)\n+- [Field Guide](../guides/framework/field-guide.md)\n## From tabulator\n-Frictionless has `Table` class which is an equivalent of the tabulator's `Stream` class.\n-\n-```python\n-# Before\n-with tabulator.Stream('table.csv') as stream:\n- print(stream.read())\n-# After\n-with frictionless.Table('table.csv') as table:\n- print(table.read_rows())\n-```\n-\n-### Table\n-\n-- the Table class now always infers `table.schema`\n-- `table.read` was replace by `table.read_data/rows`\n-- `table.iter` was replaced by `table.data/row_stream`\n-- `table.hash/size` was replaced by the `table.stats` property\n-- various changes in the constructor options (see \"Extracting Data\")\n+Frictionless has `Resource` class which is an equivalent of the tabulator's `Stream` class:\n+- [Resource Guide](../guides/framework/resource-guide.md)\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Improved the migration document (#713) |
234,912 | 01.03.2021 12:10:34 | -10,800 | 81dadeb0f7a069d40cd00324163cc9711f2aa394 | Fixed describing non-tabular html | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "data/text.html",
"diff": "+<!DOCTYPE html>\n+<html>\n+<head>\n+ <meta charset=\"UTF-8\">\n+</head>\n+<body>\n+text\n+</body>\n+</html\n"
},
{
"change_type": "MODIFY",
"old_path": "frictionless/plugins/html.py",
"new_path": "frictionless/plugins/html.py",
"diff": "@@ -100,16 +100,13 @@ class HtmlParser(Parser):\ndef read_list_stream_create(self):\npq = helpers.import_from_plugin(\"pyquery\", plugin=\"html\").PyQuery\n- dialect = self.resource.dialect\n- # Get Page content\n+ # Get table\npage = pq(self.loader.text_stream.read(), parser=\"html\")\n-\n- # Find required table\n- if dialect.selector:\n- table = pq(page.find(dialect.selector)[0])\n- else:\n- table = page\n+ tables = page.find(self.resource.dialect.selector)\n+ table = pq(tables[0]) if tables else None\n+ if not table:\n+ return\n# Stream headers\ndata = (\n"
},
{
"change_type": "MODIFY",
"old_path": "frictionless/resource.py",
"new_path": "frictionless/resource.py",
"diff": "@@ -691,10 +691,12 @@ class Resource(Metadata):\nstats? (bool): stream file completely and infer stats\n\"\"\"\nif not self.closed:\n- raise FrictionlessException(\"Resource.infer canot be used on a open resource\")\n+ note = \"Resource.infer canot be used on a open resource\"\n+ raise FrictionlessException(errors.ResourceError(note=note))\nwith self:\nif not stats:\n- return self.pop(\"stats\", None)\n+ self.pop(\"stats\", None)\n+ return\nstream = self.row_stream or self.byte_stream\nhelpers.pass_through(stream)\n@@ -1023,7 +1025,10 @@ class Resource(Metadata):\nself.__fragment = fragment\nself.__field_positions = field_positions\nself.__fragment_positions = fragment_positions\n- self.stats[\"fields\"] = len(self.schema.fields)\n+ self.stats[\"fields\"] = len(schema.fields)\n+ # NOTE: review whether it's a proper place for this fallback to data resource\n+ if not schema:\n+ self.profile = \"data-resource\"\ndef __read_detect_lookup(self):\nlookup = {}\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/describe/test_main.py",
"new_path": "tests/describe/test_main.py",
"diff": "@@ -126,3 +126,16 @@ def test_describe_non_tabular_resource_issue_641():\n\"bytes\": 262443,\n},\n}\n+\n+\n+def test_describe_non_tabular_html_issue_715():\n+ resource = describe(\"data/text.html\")\n+ assert resource == {\n+ \"path\": \"data/text.html\",\n+ \"name\": \"text\",\n+ \"profile\": \"data-resource\",\n+ \"scheme\": \"file\",\n+ \"format\": \"html\",\n+ \"hashing\": \"md5\",\n+ \"encoding\": \"utf-8\",\n+ }\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/plugins/test_html.py",
"new_path": "tests/plugins/test_html.py",
"diff": "@@ -12,7 +12,6 @@ from frictionless.plugins.html import HtmlDialect\n(\"data/table1.html\", \"table\"),\n(\"data/table2.html\", \"table\"),\n(\"data/table3.html\", \".mememe\"),\n- (\"data/table4.html\", \"\"),\n],\n)\ndef test_html_parser(source, selector):\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Fixed describing non-tabular html (#723) |
234,912 | 01.03.2021 16:02:55 | -10,800 | 07eb38975d512463b00464ed58aab19f1e4fbf99 | Fixed schema/inquiry validation from CLI | [
{
"change_type": "MODIFY",
"old_path": "frictionless/validate/inquiry.py",
"new_path": "frictionless/validate/inquiry.py",
"diff": "@@ -18,8 +18,6 @@ def validate_inquiry(source, *, parallel=False, **options):\nReport: validation report\n\"\"\"\n- # TODO: remove this quicfix\n- options.pop(\"detector\", None)\nnative = isinstance(source, Inquiry)\ninquiry = source.to_copy() if native else Inquiry(source, **options)\nreturn inquiry.run(parallel=parallel)\n"
},
{
"change_type": "MODIFY",
"old_path": "frictionless/validate/main.py",
"new_path": "frictionless/validate/main.py",
"diff": "@@ -30,4 +30,9 @@ def validate(source, type=None, **options):\nif validate is None:\nnote = f\"Not supported validate type: {type}\"\nraise FrictionlessException(errors.GeneralError(note=note))\n+ # NOTE:\n+ # Review whether it's a proper place for this (program sends a detector)\n+ # We might resolve it when we convert Detector to be a metadata\n+ if type in [\"inquiry\", \"schema\"]:\n+ options.pop(\"detector\", None)\nreturn validate(source, **options)\n"
},
{
"change_type": "MODIFY",
"old_path": "frictionless/validate/schema.py",
"new_path": "frictionless/validate/schema.py",
"diff": "@@ -25,8 +25,6 @@ def validate_schema(source, **options):\n# Create schema\ntry:\n- # TODO: remove this quickfix\n- options.pop(\"detector\", None)\nnative = isinstance(source, Schema)\nschema = source.to_copy() if native else Schema(source, **options)\nexcept FrictionlessException as exception:\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Fixed schema/inquiry validation from CLI (#725) |
234,912 | 01.03.2021 18:10:59 | -10,800 | cefa453150b23f58c0201f2b15fbbf9feca9858e | Added lxml dependency to ods | [
{
"change_type": "MODIFY",
"old_path": "setup.py",
"new_path": "setup.py",
"diff": "@@ -45,7 +45,7 @@ EXTRAS_REQUIRE = {\n\"gsheets\": [\"pygsheets>=2.0\"],\n\"html\": [\"pyquery>=1.4\"],\n\"json\": [\"ijson>=3.0\", \"jsonlines>=1.2\"],\n- \"ods\": [\"ezodf>=0.3\"],\n+ \"ods\": [\"ezodf>=0.3\", \"lxml>=4.0\"],\n\"pandas\": [\"pandas>=1.0\"],\n\"s3\": [\"boto3>=1.9\"],\n\"server\": [\"gunicorn>=20.0\", \"flask>=1.1\"],\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Added lxml dependency to ods (#727) |
234,898 | 01.03.2021 18:19:05 | -3,600 | 83c97213aa8beaea8f9f3bc74bc48fc6104f71f5 | Consistently use period after last sentence in blockqoutes | [
{
"change_type": "MODIFY",
"old_path": "docs/guides/describing-data.md",
"new_path": "docs/guides/describing-data.md",
"diff": "@@ -78,7 +78,7 @@ Table Schema is a specification for providing a \"schema\" (similar to a database\nWe're going to use this file for the examples in this section. For this guide, we only use CSV files because of their demonstrativeness, but in-general Frictionless can handle data in Excel, JSON, SQL, and many other formats:\n-> Download [`country-1.csv`](https://raw.githubusercontent.com/frictionlessdata/frictionless-py/master/data/country-1.csv) into the `data` folder to reproduce the examples\n+> Download [`country-1.csv`](https://raw.githubusercontent.com/frictionlessdata/frictionless-py/master/data/country-1.csv) into the `data` folder to reproduce the examples.\n```bash title=\"CLI\"\ncat data/country-1.csv\n@@ -186,7 +186,7 @@ A range of other properties can be declared to provide a richer set of metadata.\nFor this section, we will use a file that is slightly more complex to handle. For some reason, cells are separated by the \";\" char and there is a comment on the top:\n-> Download [`country-2.csv`](https://raw.githubusercontent.com/frictionlessdata/frictionless-py/master/data/country-2.csv) into the `data` folder to reproduce the examples\n+> Download [`country-2.csv`](https://raw.githubusercontent.com/frictionlessdata/frictionless-py/master/data/country-2.csv) into the `data` folder to reproduce the examples.\n```bash title=\"CLI\"\ncat data/country-2.csv\n@@ -322,7 +322,7 @@ The data included in the package may be provided as:\nFor this section, we will use the following files:\n-> Download [`country-3.csv`](https://raw.githubusercontent.com/frictionlessdata/frictionless-py/master/data/country-3.csv) into the `data` folder to reproduce the examples\n+> Download [`country-3.csv`](https://raw.githubusercontent.com/frictionlessdata/frictionless-py/master/data/country-3.csv) into the `data` folder to reproduce the examples.\n```bash title=\"CLI\"\ncat data/country-3.csv\n@@ -336,7 +336,7 @@ id,capital_id,name,population\n5,4,Spain,47\n```\n-> Download [`capital-3.csv`](https://raw.githubusercontent.com/frictionlessdata/frictionless-py/master/data/capital-3.csv) into the `data` folder to reproduce the examples\n+> Download [`capital-3.csv`](https://raw.githubusercontent.com/frictionlessdata/frictionless-py/master/data/capital-3.csv) into the `data` folder to reproduce the examples.\n```bash title=\"CLI\"\ncat data/capital-3.csv\n@@ -572,7 +572,7 @@ pprint(resource)\nFrictionless always tries to be as explicit as possible. We didn't provide any metadata except for `path` so we got the expected result. But now, we'd like to `infer` additional metadata:\n-> Note that we use the `stats` argument for the `resource.infer` function. We can ask for stats using CLI with `frictionless describe data/table.csv --stats`\n+> Note that we use the `stats` argument for the `resource.infer` function. We can ask for stats using CLI with `frictionless describe data/table.csv --stats`.\n```python title=\"Python\"\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/guides/extension/format-guide.md",
"new_path": "docs/guides/extension/format-guide.md",
"diff": "@@ -10,7 +10,7 @@ The Parser is responsible for parsing data from/to different data sources as tho\n## Parser Example\n-> This parser has quite a naive experimental implementation\n+> This parser has quite a naive experimental implementation.\n```python title=\"Python\"\nclass HtmlParser(Parser):\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/guides/extension/step-guide.md",
"new_path": "docs/guides/extension/step-guide.md",
"diff": "@@ -6,7 +6,7 @@ The Step concept is a part of the Transform API. You can create a custom Step to\n## Step Example\n-> This step uses PETL under the hood\n+> This step uses PETL under the hood.\n```python title=\"Python\"\nclass cell_set(Step):\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/guides/extracting-data.md",
"new_path": "docs/guides/extracting-data.md",
"diff": "@@ -4,7 +4,7 @@ title: Extracting Data\nExtracting data means reading tabular data from a source. We can use various customizations for this process such as providing a file format, table schema, limiting fields or rows amount, and much more. Let's see this with some real files:\n-> Download [`country-3.csv`](https://raw.githubusercontent.com/frictionlessdata/frictionless-py/master/data/country-3.csv) into the `data` folder to reproduce the examples\n+> Download [`country-3.csv`](https://raw.githubusercontent.com/frictionlessdata/frictionless-py/master/data/country-3.csv) into the `data` folder to reproduce the examples.\n```bash title=\"CLI\"\ncat data/country-3.csv\n@@ -17,7 +17,7 @@ id,capital_id,name,population\n4,5,Italy,60\n5,4,Spain,47\n```\n-> Download [`capital-3.csv`](https://raw.githubusercontent.com/frictionlessdata/frictionless-py/master/data/capital-3.csv) into the `data` folder to reproduce the examples\n+> Download [`capital-3.csv`](https://raw.githubusercontent.com/frictionlessdata/frictionless-py/master/data/capital-3.csv) into the `data` folder to reproduce the examples.\n```bash title=\"CLI\"\ncat data/capital-3.csv\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/guides/quick-start.md",
"new_path": "docs/guides/quick-start.md",
"diff": "@@ -6,7 +6,7 @@ Let's get started with Frictionless! We will learn how to install and use the fr\n## Installation\n-> The framework requires Python3.6+. Versioning follows the [SemVer Standard](https://semver.org/)\n+> The framework requires Python3.6+. Versioning follows the [SemVer Standard](https://semver.org/).\n```bash title=\"CLI\"\npip install frictionless\n@@ -65,11 +65,11 @@ frictionless transform --help\n## Example\n-> For more examples, use the [Basic Examples](basic-examples.md)\n+> For more examples, use the [Basic Examples](basic-examples.md).\nWe will take a very messy data file:\n-> Download [`invalid.csv`](https://raw.githubusercontent.com/frictionlessdata/frictionless-py/master/data/invalid.csv) into the `data` folder to reproduce the examples\n+> Download [`invalid.csv`](https://raw.githubusercontent.com/frictionlessdata/frictionless-py/master/data/invalid.csv) into the `data` folder to reproduce the examples.\n```bash title=\"CLI\"\ncat data/invalid.csv\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/guides/transform-guide.md",
"new_path": "docs/guides/transform-guide.md",
"diff": "title: Transform Guide\n---\n-> This guide assumes basic familiarity with the Frictionless Framework. To learn more, please read the [Introduction](https://framework.frictionlessdata.io/docs/guides/introduction) and [Quick Start](https://framework.frictionlessdata.io/docs/guides/quick-start)\n+> This guide assumes basic familiarity with the Frictionless Framework. To learn more, please read the [Introduction](https://framework.frictionlessdata.io/docs/guides/introduction) and [Quick Start](https://framework.frictionlessdata.io/docs/guides/quick-start).\nTransforming data in Frictionless means modifying data and metadata from state A to state B. For example, it could be a messy Excel file we need to transform to a cleaned CSV file or a folder of data files we want to update and save as a data package.\n@@ -14,7 +14,7 @@ Frictionless supports a few different kinds of data and metadata transformations\nThe main difference between these three is that resource and package transforms are imperative while pipelines can be created beforehand or shared as a JSON file.\n-> Download [`transform.csv`](https://raw.githubusercontent.com/frictionlessdata/frictionless-py/master/data/transform.csv) into the `data` folder to reproduce the examples\n+> Download [`transform.csv`](https://raw.githubusercontent.com/frictionlessdata/frictionless-py/master/data/transform.csv) into the `data` folder to reproduce the examples.\n```bash title=\"CLI\"\ncat data/transform.csv\n@@ -75,7 +75,7 @@ There are dozens of other available steps that will be covered below.\nTransforming a package is not much more difficult than a resource. Basically, a package is a set of resources so we will be transforming resources exactly the same way as we did above and we will be managing the resources list itself, adding or removing them:\n-> NOTE: This example is about to be fixed in https://github.com/frictionlessdata/frictionless-py/issues/715\n+> NOTE: This example is about to be fixed in https://github.com/frictionlessdata/frictionless-py/issues/715.\n```python title=\"Python\"\nfrom pprint import pprint\n@@ -194,7 +194,7 @@ See [Transform Steps](transform-steps.md) for a list of available steps.\nHere is an example of a custom step written as a Python function:\n-> NOTE: This example is about to be fixed in https://github.com/frictionlessdata/frictionless-py/issues/715\n+> NOTE: This example is about to be fixed in https://github.com/frictionlessdata/frictionless-py/issues/715.\n```python title=\"Python\"\nfrom pprint import pprint\n@@ -224,7 +224,7 @@ Learn more about custom steps in the [Step Guide](extension/step-guide.md).\n## Transform Utils\n-> Transform Utils is under construction\n+> Transform Utils is under construction.\n## Working with PETL\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/guides/transform-steps.md",
"new_path": "docs/guides/transform-steps.md",
"diff": "title: Transform Steps\n---\n-> This guide assumes basic familiarity with the Frictionless Framework. To learn more, please read the [Introduction](https://framework.frictionlessdata.io/docs/guides/introduction) and [Quick Start](https://framework.frictionlessdata.io/docs/guides/quick-start)\n+> This guide assumes basic familiarity with the Frictionless Framework. To learn more, please read the [Introduction](https://framework.frictionlessdata.io/docs/guides/introduction) and [Quick Start](https://framework.frictionlessdata.io/docs/guides/quick-start).\nFrictionless includes more than 40+ built-in transform steps. They are grouped by the object so you can find them easily if you have code auto completion. Start typing, for example, `steps.table...` and you will see all the available steps. The groups are listed below and you will find every group described in more detail in the next sections. It's also possible to write custom transform steps. Please read the section below to learn more about it. Let's prepare the data that we need to show how the checks below work:\n-> Download [`transform.csv`](https://raw.githubusercontent.com/frictionlessdata/frictionless-py/master/data/transform.csv) into the `data` folder to reproduce the examples\n+> Download [`transform.csv`](https://raw.githubusercontent.com/frictionlessdata/frictionless-py/master/data/transform.csv) into the `data` folder to reproduce the examples.\n```bash title=\"CLI\"\ncat data/transform.csv\n@@ -18,7 +18,7 @@ id,name,population\n3,spain,47\n```\n-> Download [`transform-groups.csv`](https://raw.githubusercontent.com/frictionlessdata/frictionless-py/master/data/transform-groups.csv) into the `data` folder to reproduce the examples\n+> Download [`transform-groups.csv`](https://raw.githubusercontent.com/frictionlessdata/frictionless-py/master/data/transform-groups.csv) into the `data` folder to reproduce the examples.\n```bash title=\"CLI\"\ncat data/transform-groups.csv\n@@ -33,7 +33,7 @@ id,name,population,year\n6,spain,33,1920\n```\n-> Download [`transform-pivot.csv`](https://raw.githubusercontent.com/frictionlessdata/frictionless-py/master/data/transform-pivot.csv) into the `data` folder to reproduce the examples\n+> Download [`transform-pivot.csv`](https://raw.githubusercontent.com/frictionlessdata/frictionless-py/master/data/transform-pivot.csv) into the `data` folder to reproduce the examples.\n```bash title=\"CLI\"\ncat data/transform-pivot.csv\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/guides/validation-checks.md",
"new_path": "docs/guides/validation-checks.md",
"diff": "title: Validation Checks\n---\n-> This guide assumes basic familiarity with the Frictionless Framework. To learn more, please read the [Introduction](https://framework.frictionlessdata.io/docs/guides/introduction) and [Quick Start](https://framework.frictionlessdata.io/docs/guides/quick-start)\n+> This guide assumes basic familiarity with the Frictionless Framework. To learn more, please read the [Introduction](https://framework.frictionlessdata.io/docs/guides/introduction) and [Quick Start](https://framework.frictionlessdata.io/docs/guides/quick-start).\nThere are various validation checks included in the core Frictionless Framework along with an ability to create custom checks. Let's review what's in the box.\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/guides/validation-guide.md",
"new_path": "docs/guides/validation-guide.md",
"diff": "title: Validation Guide\n---\n-> This guide assumes basic familiarity with the Frictionless Framework. To learn more, please read the [Introduction](https://framework.frictionlessdata.io/docs/guides/introduction) and [Quick Start](https://framework.frictionlessdata.io/docs/guides/quick-start)\n+> This guide assumes basic familiarity with the Frictionless Framework. To learn more, please read the [Introduction](https://framework.frictionlessdata.io/docs/guides/introduction) and [Quick Start](https://framework.frictionlessdata.io/docs/guides/quick-start).\nTabular data validation is a process of identifying tabular problems that have occured in your data so you can correct them. Let's explore how Frictionless helps to achieve this task using an invalid data table example:\n-> Download [`capital-invalid.csv`](https://raw.githubusercontent.com/frictionlessdata/frictionless-py/master/data/capital-invalid.csv) and put into the `data` folder to reproduce the examples\n+> Download [`capital-invalid.csv`](https://raw.githubusercontent.com/frictionlessdata/frictionless-py/master/data/capital-invalid.csv) and put into the `data` folder to reproduce the examples.\n```bash title=\"CLI\"\ncat data/capital-invalid.csv\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Consistently use period after last sentence in blockqoutes (#728) |
234,912 | 02.03.2021 18:44:23 | -10,800 | 1c767b93e8e2b66d50d9b9c8383b1a803841e5a5 | Fixed stream tutorial | [
{
"change_type": "MODIFY",
"old_path": "docs/tutorials/schemes/stream-tutorial.md",
"new_path": "docs/tutorials/schemes/stream-tutorial.md",
"diff": "@@ -14,7 +14,7 @@ You can read Stream using `Package/Resource` or `Table` API, for example:\nfrom frictionless import Resource\nwith open('data/table.csv', 'rb') as file:\n- resource = Resource(path=file, format='csv')\n+ resource = Resource(file, format='csv')\nprint(resource.read_rows())\n```\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Fixed stream tutorial (#737) |
234,907 | 05.03.2021 22:20:28 | 0 | 59aabac6592c5e82a3c037fa5541f0c903583f98 | Update validation-guide.md
Just correcting a typo and smoothing the English in the headings a little bit | [
{
"change_type": "MODIFY",
"old_path": "docs/guides/validation-guide.md",
"new_path": "docs/guides/validation-guide.md",
"diff": "@@ -66,7 +66,7 @@ frictionless validate --type package\nfrictionless validate --type inquiry\n```\n-## Validating Schema\n+## Validating a Schema\nThe `validate_schema` function is the only function validating solely metadata. To see this work, let's create an invalid table schema:\n@@ -97,7 +97,7 @@ schema-error The data source could not be successfully described by the invalid\nWe see that the schema is invalid and the error is displayed. Schema validation can be very useful when you work with different classes of tables and create schemas for them. Using this function will ensure that the metadata is valid.\n-## Validating Resource\n+## Validating a Resource\nAs was shown in the [\"Describing Data\" guide](https://framework.frictionlessdata.io/docs/guides/describing-data), a resource is a container having both metadata and data. We need to create a resource descriptor and then we can validate it:\n@@ -161,7 +161,7 @@ None None byte-count-error The data source does not match the expected byte\n```\n-## Validating Package\n+## Validating a Package\nA package is a set of resources + additional metadata. To showcase a package validation we need to use one more tabular file:\n@@ -206,7 +206,7 @@ valid: ./data/capital-valid.csv\nAs we can see, the result is in a similar format to what we have already seen, and shows errors as we expected: we have one invalid resource and one valid resource. One important note regarding the package validation: if there is more than one resource, it will use multiprocessing to speed up the process.\n-## Validating Inquiry\n+## Validating an Inquiry\nInquiry gives you an ability to create arbitrary validation jobs containing a set of individual validation tasks. Tasks in the Inquiry accept the same arguments written in camelCase as the corresponding `validate` functions.\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Update validation-guide.md (#747)
Just correcting a typo and smoothing the English in the headings a little bit |
234,923 | 12.03.2021 17:49:26 | 10,800 | 98b1fe8598421c446a30380d4fa8c23947785a00 | add link to download 'capital.valid.csv' | [
{
"change_type": "MODIFY",
"old_path": "docs/guides/validation-guide.md",
"new_path": "docs/guides/validation-guide.md",
"diff": "@@ -165,6 +165,8 @@ None None byte-count-error The data source does not match the expected byte\nA package is a set of resources + additional metadata. To showcase a package validation we need to use one more tabular file:\n+> Download [``capital-valid.csv``](https://raw.githubusercontent.com/frictionlessdata/frictionless-py/master/data/capital-valid.csv) and put into the `data` folder to reproduce the examples.\n+\n```bash title=\"CLI\"\ncat data/capital-valid.csv\n```\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | add link to download 'capital.valid.csv' (#730) |
234,887 | 14.03.2021 08:08:50 | 0 | c2e99309f20c526c19b517d09a112af68167513d | Remove url param from sql docs
Since `url` is no longer a named param | [
{
"change_type": "MODIFY",
"old_path": "docs/tutorials/formats/sql-tutorial.md",
"new_path": "docs/tutorials/formats/sql-tutorial.md",
"diff": "@@ -36,7 +36,7 @@ You can write SQL databases:\nfrom frictionless import Package\npackage = Package('path/to/datapackage.json')\n-package.to_sql(url='postgresql://mydatabase')\n+package.to_sql('postgresql://mydatabase')\n```\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Remove url param from sql docs (#753)
Since https://github.com/frictionlessdata/frictionless-py/pull/655 `url` is no longer a named param |
234,884 | 21.03.2021 08:26:35 | -3,600 | 47b98949956dce915c0cd1b2427f8fe2aac4cacd | Fix csv write ignoring dialect options like line_terminator | [
{
"change_type": "MODIFY",
"old_path": "frictionless/plugins/csv.py",
"new_path": "frictionless/plugins/csv.py",
"diff": "@@ -243,9 +243,8 @@ class CsvParser(Parser):\noptions = {}\nsource = resource\ntarget = self.resource\n- for name in vars(target.dialect.to_python()):\n- value = getattr(target.dialect, name, None)\n- if value is not None:\n+ for name, value in vars(target.dialect.to_python()).items():\n+ if not name.startswith(\"_\") and value is not None:\noptions[name] = value\nwith tempfile.NamedTemporaryFile(\n\"wt\", delete=False, encoding=target.encoding, newline=\"\"\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Fix csv write ignoring dialect options like line_terminator (#765)
Co-authored-by: Florian Fritz <[email protected]> |
234,912 | 23.03.2021 14:32:28 | -10,800 | 5c538bbd179494584ec73dcff57b66965afd870b | Fixed field.constraints type | [
{
"change_type": "MODIFY",
"old_path": "frictionless/field.py",
"new_path": "frictionless/field.py",
"diff": "@@ -146,6 +146,7 @@ class Field(Metadata):\ndict: constraints\n\"\"\"\nconstraints = self.get(\"constraints\", {})\n+ constraints = constraints if isinstance(constraints, dict) else {}\nreturn self.metadata_attach(\"constraints\", constraints)\[email protected]\n@@ -417,7 +418,7 @@ class Field(Metadata):\nfor name in self.constraints.keys():\nif name not in self.__type.constraints + [\"unique\"]:\nnote = f'constraint \"{name}\" is not supported by type \"{self.type}\"'\n- yield errors.SchemaError(note=note)\n+ yield errors.FieldError(note=note)\n# Metadata\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_field.py",
"new_path": "tests/test_field.py",
"diff": "@@ -67,22 +67,22 @@ def test_field_standard_specs_properties(create_descriptor):\ntype=\"string\",\nformat=\"format\",\nmissing_values=\"missing\",\n- constraints=[],\n+ constraints={},\nrdf_type=\"rdf\",\n)\n- resource = (\n+ field = (\nField(**options)\nif not create_descriptor\nelse Field(helpers.create_descriptor(**options))\n)\n- assert resource.name == \"name\"\n- assert resource.title == \"title\"\n- assert resource.description == \"description\"\n- assert resource.type == \"string\"\n- assert resource.format == \"format\"\n- assert resource.missing_values == \"missing\"\n- assert resource.constraints == []\n- assert resource.rdf_type == \"rdf\"\n+ assert field.name == \"name\"\n+ assert field.title == \"title\"\n+ assert field.description == \"description\"\n+ assert field.type == \"string\"\n+ assert field.format == \"format\"\n+ assert field.missing_values == \"missing\"\n+ assert field.constraints == {}\n+ assert field.rdf_type == \"rdf\"\n# Constraints\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Fixed field.constraints type (#770) |
234,912 | 26.03.2021 18:08:34 | -10,800 | f63b8806a36ced0a33de802d366618a86f6df05e | Implemented resource.to_view() | [
{
"change_type": "MODIFY",
"old_path": "frictionless/resource.py",
"new_path": "frictionless/resource.py",
"diff": "@@ -1077,6 +1077,11 @@ class Resource(Metadata):\n# Import/Export\ndef to_dict(self):\n+ \"\"\"Create a dict from the resource\n+\n+ Returns\n+ dict: dict representation\n+ \"\"\"\n# Data can be not serializable (generators/functions)\ndescriptor = super().to_dict()\ndata = descriptor.pop(\"data\", None)\n@@ -1085,7 +1090,11 @@ class Resource(Metadata):\nreturn descriptor\ndef to_copy(self, **options):\n- \"\"\"Create a copy of the resource\"\"\"\n+ \"\"\"Create a copy from the resource\n+\n+ Returns\n+ Resource: resource copy\n+ \"\"\"\ndescriptor = self.to_dict()\nreturn Resource(\ndescriptor,\n@@ -1097,7 +1106,28 @@ class Resource(Metadata):\n**options,\n)\n- def to_snapshot(self):\n+ def to_view(self, type=\"look\", **options):\n+ \"\"\"Create a view from the resource\n+\n+ See PETL's docs for more information:\n+ https://petl.readthedocs.io/en/stable/util.html#visualising-tables\n+\n+ Parameters:\n+ type (look|lookall|see|display|displayall): view's type\n+ **options (dict): options to be passed to PETL\n+\n+ Returns\n+ str: resource's view\n+ \"\"\"\n+ assert type in [\"look\", \"lookall\", \"see\", \"display\", \"displayall\"]\n+ return getattr(self.to_petl(normalize=True), type)(**options)\n+\n+ def to_snap(self):\n+ \"\"\"Create a snapshot from the resource\n+\n+ Returns\n+ list: resource's data\n+ \"\"\"\nsnap = []\nwith helpers.ensure_open(self):\nsnap.append(self.header.to_list())\n@@ -1120,14 +1150,18 @@ class Resource(Metadata):\n\"\"\"Create a resource from PETL view\"\"\"\nreturn Resource(data=view, **options)\n- def to_petl(self):\n- \"\"\"Export resource as a PETL view\"\"\"\n+ def to_petl(self, normalize=False):\n+ \"\"\"Export resource as a PETL table\"\"\"\nresource = self.to_copy()\n# Define view\nclass ResourceView(petl.Table):\ndef __iter__(self):\nwith resource:\n+ if normalize:\n+ yield resource.schema.field_names\n+ yield from (row.to_list() for row in resource.row_stream)\n+ return\nif not resource.header.missing:\nyield resource.header.labels\nyield from (row.cells for row in resource.row_stream)\n"
},
{
"change_type": "MODIFY",
"old_path": "frictionless/steps/cell.py",
"new_path": "frictionless/steps/cell.py",
"diff": "@@ -19,18 +19,18 @@ class cell_convert(Step):\n# Transform\ndef transform_resource(self, resource):\n- view = resource.to_petl()\n+ table = resource.to_petl()\nfield_name = self.get(\"fieldName\")\nfunction = self.get(\"function\")\nvalue = self.get(\"value\")\nif not field_name:\nif not function:\nfunction = lambda input: value\n- resource.data = view.convertall(function)\n+ resource.data = table.convertall(function)\nelif function:\n- resource.data = view.convert(field_name, function)\n+ resource.data = table.convert(field_name, function)\nelse:\n- resource.data = view.update(field_name, value)\n+ resource.data = table.update(field_name, value)\n# Metadata\n@@ -57,21 +57,21 @@ class cell_fill(Step):\n# Transform\ndef transform_resource(self, resource):\n- view = resource.to_petl()\n+ table = resource.to_petl()\nvalue = self.get(\"value\")\nfield_name = self.get(\"fieldName\")\ndirection = self.get(\"direction\")\nif value:\n- resource.data = view.convert(field_name, {None: value})\n+ resource.data = table.convert(field_name, {None: value})\nelif direction == \"down\":\nif field_name:\n- resource.data = view.filldown(field_name)\n+ resource.data = table.filldown(field_name)\nelse:\n- resource.data = view.filldown()\n+ resource.data = table.filldown()\nelif direction == \"right\":\n- resource.data = view.fillright()\n+ resource.data = table.fillright()\nelif direction == \"left\":\n- resource.data = view.fillleft()\n+ resource.data = table.fillleft()\n# Metadata\n@@ -97,13 +97,13 @@ class cell_format(Step):\n# Transform\ndef transform_resource(self, resource):\n- view = resource.to_petl()\n+ table = resource.to_petl()\nfield_name = self.get(\"fieldName\")\ntemplate = self.get(\"template\")\nif not field_name:\n- resource.data = view.formatall(template)\n+ resource.data = table.formatall(template)\nelse:\n- resource.data = view.format(field_name, template)\n+ resource.data = table.format(field_name, template)\n# Metadata\n@@ -130,11 +130,11 @@ class cell_interpolate(Step):\ndef transform_resource(self, resource):\ntemplate = self.get(\"template\")\nfield_name = self.get(\"fieldName\")\n- view = resource.to_petl()\n+ table = resource.to_petl()\nif not field_name:\n- resource.data = view.interpolateall(template)\n+ resource.data = table.interpolateall(template)\nelse:\n- resource.data = view.interpolate(field_name, template)\n+ resource.data = table.interpolate(field_name, template)\n# Metadata\n@@ -160,19 +160,19 @@ class cell_replace(Step):\n# Transform\ndef transform_resource(self, resource):\n- view = resource.to_petl()\n+ table = resource.to_petl()\npattern = self.get(\"pattern\")\nreplace = self.get(\"replace\")\nfield_name = self.get(\"fieldName\")\nif not field_name:\n- resource.data = view.replaceall(pattern, replace)\n+ resource.data = table.replaceall(pattern, replace)\nelse:\npattern = pattern\nfunction = petl.replace\nif pattern.startswith(\"<regex>\"):\npattern = pattern.replace(\"<regex>\", \"\")\nfunction = petl.sub\n- resource.data = function(view, field_name, pattern, replace)\n+ resource.data = function(table, field_name, pattern, replace)\n# Metadata\n@@ -196,10 +196,10 @@ class cell_set(Step):\nsuper().__init__(descriptor)\ndef transform_resource(self, resource):\n- view = resource.to_petl()\n+ table = resource.to_petl()\nvalue = self.get(\"value\")\nfield_name = self.get(\"fieldName\")\n- resource.data = view.update(field_name, value)\n+ resource.data = table.update(field_name, value)\n# Metadata\n"
},
{
"change_type": "MODIFY",
"old_path": "frictionless/steps/field.py",
"new_path": "frictionless/steps/field.py",
"diff": "@@ -36,7 +36,7 @@ class field_add(Step):\n# Transform\ndef transform_resource(self, resource):\n- view = resource.to_petl()\n+ table = resource.to_petl()\nname = self.get(\"name\")\nvalue = self.get(\"value\")\nformula = self.get(\"formula\")\n@@ -51,12 +51,12 @@ class field_add(Step):\nelse:\nresource.schema.fields.insert(index, field)\nif incremental:\n- resource.data = view.addrownumbers(field=name)\n+ resource.data = table.addrownumbers(field=name)\nelse:\nif formula:\nfunction = lambda row: simpleeval.simple_eval(formula, names=row)\nvalue = value or function\n- resource.data = view.addfield(name, value=value, index=index)\n+ resource.data = table.addfield(name, value=value, index=index)\n# Metadata\n@@ -82,12 +82,12 @@ class field_filter(Step):\n# Transform\ndef transform_resource(self, resource):\n- view = resource.to_petl()\n+ table = resource.to_petl()\nnames = self.get(\"names\")\nfor name in resource.schema.field_names:\nif name not in names:\nresource.schema.remove_field(name)\n- resource.data = view.cut(*names)\n+ resource.data = table.cut(*names)\n# Metadata\n@@ -111,12 +111,12 @@ class field_move(Step):\n# Transform\ndef transform_resource(self, resource):\n- view = resource.to_petl()\n+ table = resource.to_petl()\nname = self.get(\"name\")\nposition = self.get(\"position\")\nfield = resource.schema.remove_field(name)\nresource.schema.fields.insert(position - 1, field)\n- resource.data = view.movefield(name, position - 1)\n+ resource.data = table.movefield(name, position - 1)\n# Metadata\n@@ -140,11 +140,11 @@ class field_remove(Step):\n# Transform\ndef transform_resource(self, resource):\n- view = resource.to_petl()\n+ table = resource.to_petl()\nnames = self.get(\"names\")\nfor name in names:\nresource.schema.remove_field(name)\n- resource.data = view.cutout(*names)\n+ resource.data = table.cutout(*names)\n# Metadata\n@@ -178,7 +178,7 @@ class field_split(Step):\n# Transform\ndef transform_resource(self, resource):\n- view = resource.to_petl()\n+ table = resource.to_petl()\nname = self.get(\"name\")\nto_names = self.get(\"toNames\")\npattern = self.get(\"pattern\")\n@@ -192,7 +192,7 @@ class field_split(Step):\nif \"(\" in pattern:\nprocessor = petl.capture\nresource.data = processor(\n- view,\n+ table,\nname,\npattern,\nto_names,\n@@ -225,7 +225,7 @@ class field_unpack(Step):\n# Transform\ndef transform_resource(self, resource):\n- view = resource.to_petl()\n+ table = resource.to_petl()\nname = self.get(\"name\")\nto_names = self.get(\"toNames\")\npreserve = self.get(\"preserve\")\n@@ -235,10 +235,10 @@ class field_unpack(Step):\nif not preserve:\nresource.schema.remove_field(name)\nif field.type == \"object\":\n- processor = view.unpackdict\n+ processor = table.unpackdict\nresource.data = processor(name, to_names, includeoriginal=preserve)\nelse:\n- processor = view.unpack\n+ processor = table.unpack\nresource.data = processor(name, to_names, include_original=preserve)\n# Metadata\n@@ -277,7 +277,7 @@ class field_update(Step):\n# Transform\ndef transform_resource(self, resource):\n- view = resource.to_petl()\n+ table = resource.to_petl()\nname = self.get(\"name\")\nvalue = self.get(\"value\")\nformula = self.get(\"formula\")\n@@ -289,9 +289,9 @@ class field_update(Step):\nif formula:\nfunction = lambda val, row: simpleeval.simple_eval(formula, names=row)\nif function:\n- resource.data = view.convert(name, function)\n+ resource.data = table.convert(name, function)\nelif \"value\" in self:\n- resource.data = view.update(name, value)\n+ resource.data = table.update(name, value)\n# Metadata\n"
},
{
"change_type": "MODIFY",
"old_path": "frictionless/steps/row.py",
"new_path": "frictionless/steps/row.py",
"diff": "@@ -19,14 +19,14 @@ class row_filter(Step):\n# Transform\ndef transform_resource(self, resource):\n- view = resource.to_petl()\n+ table = resource.to_petl()\nformula = self.get(\"formula\")\nfunction = self.get(\"function\")\nif formula:\n# NOTE: review EvalWithCompoundTypes/sync with checks\nevalclass = simpleeval.EvalWithCompoundTypes\nfunction = lambda row: evalclass(names=row).eval(formula)\n- resource.data = view.select(function)\n+ resource.data = table.select(function)\n# Metadata\n@@ -52,15 +52,15 @@ class row_search(Step):\n# Transform\ndef transform_resource(self, resource):\n- view = resource.to_petl()\n+ table = resource.to_petl()\nregex = self.get(\"regex\")\nfield_name = self.get(\"fieldName\")\nnegate = self.get(\"negate\")\nsearch = petl.searchcomplement if negate else petl.search\nif field_name:\n- resource.data = search(view, field_name, regex)\n+ resource.data = search(table, field_name, regex)\nelse:\n- resource.data = search(view, regex)\n+ resource.data = search(table, regex)\n# Metadata\n@@ -98,18 +98,18 @@ class row_slice(Step):\n# Transform\ndef transform_resource(self, resource):\n- view = resource.to_petl()\n+ table = resource.to_petl()\nstart = self.get(\"start\")\nstop = self.get(\"stop\")\nstep = self.get(\"step\")\nhead = self.get(\"head\")\ntail = self.get(\"tail\")\nif head:\n- resource.data = view.head(head)\n+ resource.data = table.head(head)\nelif tail:\n- resource.data = view.tail(tail)\n+ resource.data = table.tail(tail)\nelse:\n- resource.data = view.rowslice(start, stop, step)\n+ resource.data = table.rowslice(start, stop, step)\n# Metadata\n@@ -137,10 +137,10 @@ class row_sort(Step):\n# Transform\ndef transform_resource(self, resource):\n- view = resource.to_petl()\n+ table = resource.to_petl()\nfield_names = self.get(\"fieldNames\")\nreverse = self.get(\"reverse\")\n- resource.data = view.sort(field_names, reverse=reverse)\n+ resource.data = table.sort(field_names, reverse=reverse)\n# Metadata\n@@ -165,10 +165,10 @@ class row_split(Step):\n# Transform\ndef transform_resource(self, resource):\n- view = resource.to_petl()\n+ table = resource.to_petl()\npattern = self.get(\"pattern\")\nfield_name = self.get(\"fieldName\")\n- resource.data = view.splitdown(field_name, pattern)\n+ resource.data = table.splitdown(field_name, pattern)\n# Metadata\n@@ -194,17 +194,17 @@ class row_subset(Step):\n# Transform\ndef transform_resource(self, resource):\n- view = resource.to_petl()\n+ table = resource.to_petl()\nsubset = self.get(\"subset\")\nfield_name = self.get(\"fieldName\")\nif subset == \"conflicts\":\n- resource.data = view.conflicts(field_name)\n+ resource.data = table.conflicts(field_name)\nelif subset == \"distinct\":\n- resource.data = view.distinct(field_name)\n+ resource.data = table.distinct(field_name)\nelif subset == \"duplicates\":\n- resource.data = view.duplicates(field_name)\n+ resource.data = table.duplicates(field_name)\nelif subset == \"unique\":\n- resource.data = view.unique(field_name)\n+ resource.data = table.unique(field_name)\n# Metadata\n@@ -236,15 +236,15 @@ class row_ungroup(Step):\nsuper().__init__(descriptor)\ndef transform_resource(self, resource):\n- view = resource.to_petl()\n+ table = resource.to_petl()\nselection = self.get(\"selection\")\ngroup_name = self.get(\"groupName\")\nvalue_name = self.get(\"valueName\")\nfunction = getattr(petl, f\"groupselect{selection}\")\nif selection in [\"first\", \"last\"]:\n- resource.data = function(view, group_name)\n+ resource.data = function(table, group_name)\nelse:\n- resource.data = function(view, group_name, value_name)\n+ resource.data = function(table, group_name, value_name)\n# Metadata\n"
},
{
"change_type": "MODIFY",
"old_path": "frictionless/steps/table.py",
"new_path": "frictionless/steps/table.py",
"diff": "@@ -26,7 +26,7 @@ class table_aggregate(Step):\n# Transform\ndef transform_resource(self, resource):\n- view = resource.to_petl()\n+ table = resource.to_petl()\ngroup_name = self.get(\"groupName\")\naggregation = self.get(\"aggregation\")\nfield = resource.schema.get_field(group_name)\n@@ -34,7 +34,7 @@ class table_aggregate(Step):\nresource.schema.add_field(field)\nfor name in aggregation.keys():\nresource.schema.add_field(Field(name=name))\n- resource.data = view.aggregate(group_name, aggregation)\n+ resource.data = table.aggregate(group_name, aggregation)\n# Metadata\n@@ -282,7 +282,7 @@ class table_melt(Step):\n# Transform\ndef transform_resource(self, resource):\n- view = resource.to_petl()\n+ table = resource.to_petl()\nvariables = self.get(\"variables\")\nfield_name = self.get(\"fieldName\")\nto_field_names = self.get(\"toFieldNames\")\n@@ -291,7 +291,7 @@ class table_melt(Step):\nresource.schema.add_field(field)\nfor name in to_field_names:\nresource.schema.add_field(Field(name=name))\n- resource.data = view.melt(\n+ resource.data = table.melt(\nkey=field_name,\nvariables=variables,\nvariablefield=to_field_names[0],\n@@ -415,10 +415,10 @@ class table_pivot(Step):\n# Transform\ndef transform_resource(self, resource):\n- view = resource.to_petl()\n+ table = resource.to_petl()\noptions = self.get(\"options\")\nresource.pop(\"schema\", None)\n- resource.data = view.pivot(**options)\n+ resource.data = table.pivot(**options)\nresource.infer()\n# Metadata\n@@ -436,8 +436,8 @@ class table_print(Step):\n# Transform\ndef transform_resource(self, resource):\n- view = resource.to_petl()\n- print(view.look(vrepr=str, style=\"simple\"))\n+ table = resource.to_petl()\n+ print(table.look(vrepr=str, style=\"simple\"))\n# Metadata\n@@ -466,11 +466,11 @@ class table_recast(Step):\n# Transform\ndef transform_resource(self, resource):\n- view = resource.to_petl()\n+ table = resource.to_petl()\nfield_name = self.get(\"fieldName\")\nfrom_field_names = self.get(\"fromFieldNames\")\nresource.pop(\"schema\", None)\n- resource.data = view.recast(\n+ resource.data = table.recast(\nkey=field_name,\nvariablefield=from_field_names[0],\nvaluefield=from_field_names[1],\n@@ -495,9 +495,9 @@ class table_transpose(Step):\n# Transform\ndef transform_resource(self, resource):\n- view = resource.to_petl()\n+ table = resource.to_petl()\nresource.pop(\"schema\", None)\n- resource.data = view.transpose()\n+ resource.data = table.transpose()\nresource.infer()\n# Metadata\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_resource.py",
"new_path": "tests/test_resource.py",
"diff": "@@ -2474,6 +2474,11 @@ def test_to_yaml_with_resource_data_is_not_a_list_issue_693():\nassert text == \"{}\\n\"\n+def test_resource_to_view():\n+ resource = Resource(\"data/table.csv\")\n+ assert resource.to_view()\n+\n+\n# Metadata\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Implemented resource.to_view() (#781) |
234,912 | 14.04.2021 10:46:43 | -10,800 | 1e21c8cffe5e4e11fae05660364f79f8aeba18d9 | Fixed using stdin in CLI | [
{
"change_type": "MODIFY",
"old_path": "frictionless/program/describe.py",
"new_path": "frictionless/program/describe.py",
"diff": "@@ -59,7 +59,7 @@ def program_describe(\nis_stdin = False\nif not source:\nis_stdin = True\n- source = [helpers.create_byte_stream(sys.stdin.buffer.read())]\n+ source = [sys.stdin.buffer.read()]\n# Normalize parameters\nsource = list(source) if len(source) > 1 else source[0]\n"
},
{
"change_type": "MODIFY",
"old_path": "frictionless/program/extract.py",
"new_path": "frictionless/program/extract.py",
"diff": "@@ -65,7 +65,7 @@ def program_extract(\nis_stdin = False\nif not source:\nis_stdin = True\n- source = [helpers.create_byte_stream(sys.stdin.buffer.read())]\n+ source = [sys.stdin.buffer.read()]\n# Normalize parameters\nsource = list(source) if len(source) > 1 else source[0]\n"
},
{
"change_type": "MODIFY",
"old_path": "frictionless/program/transform.py",
"new_path": "frictionless/program/transform.py",
"diff": "@@ -2,7 +2,6 @@ import sys\nimport typer\nfrom ..transform import transform\nfrom .main import program\n-from .. import helpers\nfrom . import common\n@@ -21,7 +20,7 @@ def program_transform(\nis_stdin = False\nif not source:\nis_stdin = True\n- source = helpers.create_byte_stream(sys.stdin.buffer.read())\n+ source = [sys.stdin.buffer.read()]\n# Transform source\ntry:\n"
},
{
"change_type": "MODIFY",
"old_path": "frictionless/program/validate.py",
"new_path": "frictionless/program/validate.py",
"diff": "@@ -73,7 +73,7 @@ def program_validate(\nis_stdin = False\nif not source:\nis_stdin = True\n- source = [helpers.create_byte_stream(sys.stdin.buffer.read())]\n+ source = [sys.stdin.buffer.read()]\n# Normalize parameters\nsource = list(source) if len(source) > 1 else source[0]\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Fixed using stdin in CLI (#805) |
234,912 | 22.04.2021 17:30:27 | -7,200 | 439c86ae74fa31934684f5c209f0d8b8cee4e3bc | Fixed program.transform swallows errors | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "data/issue-814.yaml",
"diff": "+tasks:\n+ - type: resource\n+ source: bad.csv\n+ steps:\n+ - code: table-print\n"
},
{
"change_type": "MODIFY",
"old_path": "frictionless/program/transform.py",
"new_path": "frictionless/program/transform.py",
"diff": "import sys\nimport typer\n+from ..exception import FrictionlessException\nfrom ..transform import transform\nfrom .main import program\nfrom . import common\n@@ -24,7 +25,13 @@ def program_transform(\n# Transform source\ntry:\n- transform(source)\n+ status = transform(source)\n+ if not status.valid:\n+ # NOTE: improve how we handle/present errors\n+ groups = [status.errors] + list(map(lambda task: task.errors, status.tasks))\n+ for group in groups:\n+ for error in group:\n+ raise FrictionlessException(error)\nexcept Exception as exception:\ntyper.secho(str(exception), err=True, fg=typer.colors.RED, bold=True)\nraise typer.Exit(1)\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/program/test_transform.py",
"new_path": "tests/program/test_transform.py",
"diff": "@@ -19,3 +19,9 @@ def test_program_transform_error_not_found():\nresult = runner.invoke(program, \"transform data/bad.yaml\")\nassert result.exit_code == 1\nassert result.stdout.count(\"No such file or directory: 'data/bad.yaml'\")\n+\n+\n+def test_program_transform_error_not_found_source_issue_814():\n+ result = runner.invoke(program, \"transform data/issue-814.yaml\")\n+ assert result.exit_code == 1\n+ assert result.stdout.count(\"No such file or directory: 'bad.csv'\")\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Fixed program.transform swallows errors (#819) |
234,880 | 23.04.2021 16:46:18 | -7,200 | cc76b5d625f2dddfa73d15fa2594acfb58065141 | Add descriptor_innerpath
* Add descriptor_innerpath
Add the option to specify the descriptor file inside the ZIP datapackage (e.g. some/folder/datapackage.json, datapackage.yaml)
* Add descriptor_innerpath docs | [
{
"change_type": "MODIFY",
"old_path": "docs/references/api-reference.md",
"new_path": "docs/references/api-reference.md",
"diff": "@@ -3763,6 +3763,11 @@ with Resource(\"data/table.csv\") as resource:\n- `descriptor` _dict|str_ - A resource descriptor provided explicitly.\nKeyword arguments will patch this descriptor if provided.\n+- `descriptor_innerpath?` _str_ - A ZIP datapackage descriptor inner path.\n+ Path to the package descriptor inside the ZIP datapackage.\n+ Example: `some/folder/datapackage.yaml`\n+ Default: `datapackage.json`\n+\n- `name?` _str_ - A Resource name according to the specs.\nIt should be a slugified name of the resource.\n"
},
{
"change_type": "MODIFY",
"old_path": "frictionless/package.py",
"new_path": "frictionless/package.py",
"diff": "@@ -41,6 +41,11 @@ class Package(Metadata):\ndescriptor (dict|str): A resource descriptor provided explicitly.\nKeyword arguments will patch this descriptor if provided.\n+ descriptor_innerpath? (str): A ZIP datapackage descriptor inner path.\n+ Path to the package descriptor inside the ZIP datapackage.\n+ Example: some/folder/datapackage.yaml\n+ Default: datapackage.json\n+\nresources? (dict|Resource[]): A list of resource descriptors.\nIt can be dicts or Resource instances.\n@@ -117,6 +122,7 @@ class Package(Metadata):\nsource=None,\n*,\ndescriptor=None,\n+ descriptor_innerpath=None,\n# Spec\nresources=None,\nid=None,\n@@ -162,7 +168,10 @@ class Package(Metadata):\n# Handle zip\nif helpers.is_zip_descriptor(descriptor):\n+ if descriptor_innerpath is None:\ndescriptor = helpers.unzip_descriptor(descriptor, \"datapackage.json\")\n+ else:\n+ descriptor = helpers.unzip_descriptor(descriptor, descriptor_innerpath)\n# Set attributes\nself.setinitial(\"resources\", resources)\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Add descriptor_innerpath (#808)
* Add descriptor_innerpath
Add the option to specify the descriptor file inside the ZIP datapackage (e.g. some/folder/datapackage.json, datapackage.yaml)
* Add descriptor_innerpath docs
Co-authored-by: roll <[email protected]> |
234,912 | 28.04.2021 16:11:46 | -7,200 | c186406d4a32ed9f5f1c8fc69e297cf6d920a203 | Make header guessing less agressive | [
{
"change_type": "MODIFY",
"old_path": "frictionless/helpers.py",
"new_path": "frictionless/helpers.py",
"diff": "@@ -248,6 +248,8 @@ def is_only_strings(cells):\nreturn False\ntry:\nfloat(cell)\n+ # We assume that a year might be a header label\n+ if len(cell) != 4:\nreturn False\nexcept Exception:\npass\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/describe/test_resource.py",
"new_path": "tests/describe/test_resource.py",
"diff": "@@ -181,3 +181,8 @@ def test_describe_resource_compression_gzip_issue_606():\ndef test_describe_resource_with_json_format_issue_827():\nresource = describe(path=\"data/table.json\")\nassert resource.name == \"table\"\n+\n+\n+def test_describe_resource_with_years_in_the_header_issue_825():\n+ resource = describe(\"data/issue-825.csv\")\n+ assert resource.schema.field_names == [\"Musei\", \"2011\", \"2010\"]\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Make header guessing less agressive (#830) |
234,912 | 04.05.2021 09:51:42 | -7,200 | 08b122e5962be53280cfa9d6ae49b3b9c690dfde | Detect whether the input is TTY or not | [
{
"change_type": "MODIFY",
"old_path": "frictionless/program/describe.py",
"new_path": "frictionless/program/describe.py",
"diff": "@@ -63,9 +63,16 @@ def program_describe(\n# Support stdin\nis_stdin = False\nif not source and not path:\n+ if not sys.stdin.isatty():\nis_stdin = True\nsource = [sys.stdin.buffer.read()]\n+ # Validate input\n+ if not source and not path:\n+ message = 'Providing \"source\" or \"path\" is required'\n+ typer.secho(message, err=True, fg=typer.colors.RED, bold=True)\n+ raise typer.Exit(1)\n+\n# Normalize parameters\nsource = list(source) if len(source) > 1 else (source[0] if source else None)\ncontrol = helpers.parse_json_string(control)\n"
},
{
"change_type": "MODIFY",
"old_path": "frictionless/program/extract.py",
"new_path": "frictionless/program/extract.py",
"diff": "@@ -67,9 +67,16 @@ def program_extract(\n# Support stdin\nis_stdin = False\nif not source and not path:\n+ if not sys.stdin.isatty():\nis_stdin = True\nsource = [sys.stdin.buffer.read()]\n+ # Validate input\n+ if not source and not path:\n+ message = 'Providing \"source\" or \"path\" is required'\n+ typer.secho(message, err=True, fg=typer.colors.RED, bold=True)\n+ raise typer.Exit(1)\n+\n# Normalize parameters\nsource = list(source) if len(source) > 1 else (source[0] if source else None)\ncontrol = helpers.parse_json_string(control)\n"
},
{
"change_type": "MODIFY",
"old_path": "frictionless/program/transform.py",
"new_path": "frictionless/program/transform.py",
"diff": "@@ -20,9 +20,16 @@ def program_transform(\n# Support stdin\nis_stdin = False\nif not source:\n+ if not sys.stdin.isatty():\nis_stdin = True\nsource = [sys.stdin.buffer.read()]\n+ # Validate input\n+ if not source:\n+ message = 'Providing \"source\" is required'\n+ typer.secho(message, err=True, fg=typer.colors.RED, bold=True)\n+ raise typer.Exit(1)\n+\n# Transform source\ntry:\nstatus = transform(source)\n"
},
{
"change_type": "MODIFY",
"old_path": "frictionless/program/validate.py",
"new_path": "frictionless/program/validate.py",
"diff": "@@ -75,9 +75,16 @@ def program_validate(\n# Support stdin\nis_stdin = False\nif not source and not path:\n+ if not sys.stdin.isatty():\nis_stdin = True\nsource = [sys.stdin.buffer.read()]\n+ # Validate input\n+ if not source and not path:\n+ message = 'Providing \"source\" or \"path\" is required'\n+ typer.secho(message, err=True, fg=typer.colors.RED, bold=True)\n+ raise typer.Exit(1)\n+\n# Normalize parameters\nsource = list(source) if len(source) > 1 else (source[0] if source else None)\ncontrol = helpers.parse_json_string(control)\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Detect whether the input is TTY or not (#839) |
234,912 | 19.05.2021 11:33:21 | -7,200 | 0948afb1566cdee6690d3cd8a848438a7486ba67 | Fixed resource.detector | [
{
"change_type": "MODIFY",
"old_path": "frictionless/package.py",
"new_path": "frictionless/package.py",
"diff": "@@ -89,14 +89,14 @@ class Package(Metadata):\ncreated? (str): The datetime on which this was created.\nThe datetime must conform to the string formats for RFC3339 datetime,\n- basepath? (str): A basepath of the resource\n- The fullpath of the resource is joined `basepath` and /path`\n-\ninnerpath? (str): A ZIP datapackage descriptor inner path.\nPath to the package descriptor inside the ZIP datapackage.\nExample: some/folder/datapackage.yaml\nDefault: datapackage.json\n+ basepath? (str): A basepath of the resource\n+ The fullpath of the resource is joined `basepath` and /path`\n+\ndetector? (Detector): File/table detector.\nFor more information, please check the Detector documentation.\n@@ -138,8 +138,8 @@ class Package(Metadata):\nimage=None,\ncreated=None,\n# Extra\n- basepath=\"\",\ninnerpath=\"datapackage.json\",\n+ basepath=\"\",\ndetector=None,\nonerror=\"ignore\",\ntrusted=False,\n@@ -470,6 +470,7 @@ class Package(Metadata):\ndescriptor,\nresources=resources,\nbasepath=self.__basepath,\n+ detector=self.__detector,\nonerror=self.__onerror,\ntrusted=self.__trusted,\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "frictionless/resource.py",
"new_path": "frictionless/resource.py",
"diff": "@@ -271,6 +271,8 @@ class Resource(Metadata):\ndef __setattr__(self, name, value):\nif name == \"basepath\":\nself.__basepath = value\n+ elif name == \"detector\":\n+ self.__detector = value\nelif name == \"onerror\":\nself.__onerror = value\nelif name == \"trusted\":\n@@ -1105,6 +1107,7 @@ class Resource(Metadata):\ndescriptor,\ndata=self.data,\nbasepath=self.__basepath,\n+ detector=self.__detector,\nonerror=self.__onerror,\ntrusted=self.__trusted,\npackage=self.__package,\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/validate/test_package.py",
"new_path": "tests/validate/test_package.py",
"diff": "@@ -2,7 +2,7 @@ import json\nimport pytest\nimport pathlib\nfrom copy import deepcopy\n-from frictionless import validate, helpers\n+from frictionless import Package, Resource, Schema, Field, Detector, validate, helpers\nIS_UNIX = not helpers.is_platform(\"windows\")\n@@ -449,3 +449,19 @@ def test_validate_package_uppercase_format_issue_494():\nreport = validate(\"data/issue-494.package.json\")\nassert report.valid\nassert report.stats[\"tasks\"] == 1\n+\n+\n+# See also: https://github.com/frictionlessdata/project/discussions/678\n+def test_validate_package_using_detector_schema_sync_issue_847():\n+ package = Package(\n+ resources=[\n+ Resource(\n+ data=[[\"f1\"], [\"v1\"], [\"v2\"], [\"v3\"]],\n+ schema=Schema(fields=[Field(name=\"f1\"), Field(name=\"f2\")]),\n+ ),\n+ ]\n+ )\n+ for resource in package.resources:\n+ resource.detector = Detector(schema_sync=True)\n+ report = validate(package)\n+ assert report.valid\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Fixed resource.detector (#848) |
234,912 | 19.05.2021 16:25:56 | -7,200 | 5b900f2c2ecc329f4aecc5af5635b127f6d0f39d | Make yaml support unicode on export | [
{
"change_type": "MODIFY",
"old_path": "frictionless/metadata.py",
"new_path": "frictionless/metadata.py",
"diff": "@@ -137,7 +137,7 @@ class Metadata(helpers.ControlledDict):\nRaises:\nFrictionlessException: on any error\n\"\"\"\n- text = yaml.dump(self.to_dict(), Dumper=IndentDumper)\n+ text = yaml.dump(self.to_dict(), allow_unicode=True, Dumper=IndentDumper)\nif path:\ntry:\nhelpers.write_file(path, text)\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Make yaml support unicode on export (#849) |
234,912 | 19.05.2021 18:54:10 | -7,200 | ac2a2a4394713cc7a2a0889782a8ad4718ef8a7a | Fixed multiple/multipart problem for validate | [
{
"change_type": "MODIFY",
"old_path": "frictionless/validate/main.py",
"new_path": "frictionless/validate/main.py",
"diff": "@@ -23,8 +23,9 @@ def validate(source=None, type=None, **options):\n\"\"\"\nif not type:\nfile = system.create_file(source, basepath=options.get(\"basepath\", \"\"))\n- if file.type in [\"table\", \"schema\", \"resource\", \"package\", \"inquiry\"]:\n- type = \"resource\" if file.type == \"table\" else file.type\n+ type = \"package\" if file.multipart else file.type\n+ if type == \"table\":\n+ type = \"resource\"\nmodule = import_module(\"frictionless.validate\")\nvalidate = getattr(module, \"validate_%s\" % type, None)\nif validate is None:\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/validate/test_main.py",
"new_path": "tests/validate/test_main.py",
"diff": "@@ -27,3 +27,8 @@ def test_validate_from_resource_instance():\nresource = Resource(\"data/table.csv\")\nreport = validate(resource)\nassert report.valid\n+\n+\n+def test_validate_multiple_files_issue_850():\n+ report = validate(\"data/package/*.csv\")\n+ assert report.stats[\"tasks\"] == 2\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Fixed multiple/multipart problem for validate (#851) |
234,912 | 28.05.2021 12:43:39 | -7,200 | 0de6f758d6af1bb1c37ae5b6f10a02377e5d0887 | Fixed helpers.join_path | [
{
"change_type": "MODIFY",
"old_path": "frictionless/helpers.py",
"new_path": "frictionless/helpers.py",
"diff": "@@ -188,7 +188,7 @@ def is_remote_path(path):\ndef join_path(basepath, path):\n- if not is_remote_path(path):\n+ if not is_remote_path(path) and not os.path.isabs(path):\nif basepath:\nseparator = os.path.sep\nif is_remote_path(basepath):\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Fixed helpers.join_path (#857) |
234,912 | 09.07.2021 11:02:29 | -10,800 | 0f9c0be3796a8a662a724a9de49b26f52ed1da9f | Limit click version | [
{
"change_type": "MODIFY",
"old_path": "setup.py",
"new_path": "setup.py",
"diff": "@@ -64,6 +64,9 @@ INSTALL_REQUIRES = [\n\"jsonschema>=2.5\",\n\"simpleeval>=0.9\",\n\"stringcase>=1.2\",\n+ # NOTE: Remove click after this issue is resolved:\n+ # https://github.com/tiangolo/typer/issues/280\n+ \"click>=7.1.1,<7.2.0\",\n\"typer[all]>=0.3\",\n\"validators>=0.18\",\n\"python-slugify>=1.2\",\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Limit click version (#878) |
234,912 | 14.07.2021 12:30:17 | -10,800 | c17c8872439df604fa67606b76b120b0e2cb8a40 | Fixed iteration performace | [
{
"change_type": "MODIFY",
"old_path": "frictionless/plugins/inline.py",
"new_path": "frictionless/plugins/inline.py",
"diff": "@@ -160,7 +160,6 @@ class InlineParser(Parser):\nyield headers\nyield [item.get(header) for header in headers]\nfor item in data:\n- # NOTE: we need to profile and optimize this check if needed\nif not isinstance(item, dict):\nerror = errors.SourceError(note=\"unsupported inline data\")\nraise FrictionlessException(error)\n@@ -170,7 +169,6 @@ class InlineParser(Parser):\nelif isinstance(item, (list, tuple)):\nyield item\nfor item in data:\n- # NOTE: we need to profile and optimize this check if needed\nif not isinstance(item, (list, tuple)):\nerror = errors.SourceError(note=\"unsupported inline data\")\nraise FrictionlessException(error)\n"
},
{
"change_type": "MODIFY",
"old_path": "frictionless/resource.py",
"new_path": "frictionless/resource.py",
"diff": "@@ -20,8 +20,7 @@ from . import config\n# NOTE:\n-# Consider making resource.stats unavailable until it's fully calculated\n-# Also, review the situation with describe function removing stats (move to infer?)\n+# Review the situation with describe function removing stats (move to infer?)\nclass Resource(Metadata):\n@@ -909,7 +908,6 @@ class Resource(Metadata):\n# Create row\nself.__row_number += 1\n- self.stats[\"rows\"] = self.__row_number\nrow = Row(\ncells,\nfield_info=field_info,\n@@ -971,6 +969,9 @@ class Resource(Metadata):\n# Yield row\nyield row\n+ # Update stats\n+ self.stats[\"rows\"] = self.__row_number\n+\n# Return row stream\nreturn row_stream()\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Fixed iteration performace (#880) |
234,912 | 30.07.2021 17:40:26 | -10,800 | 43c4dfecfe74535b87f74d6acd84a98b9984ebcc | Fixed resource.dialect profile | [
{
"change_type": "MODIFY",
"old_path": "frictionless/assets/profiles/package/fiscal.json",
"new_path": "frictionless/assets/profiles/package/fiscal.json",
"diff": "\"propertyOrder\": 50,\n\"title\": \"CSV Dialect\",\n\"description\": \"The CSV dialect descriptor.\",\n- \"type\": \"object\",\n- \"required\": [\n- \"delimiter\",\n- \"doubleQuote\"\n- ],\n+ \"type\": [\"string\", \"object\"],\n\"properties\": {\n\"delimiter\": {\n\"title\": \"Delimiter\",\n"
},
{
"change_type": "MODIFY",
"old_path": "frictionless/assets/profiles/package/tabular.json",
"new_path": "frictionless/assets/profiles/package/tabular.json",
"diff": "\"propertyOrder\": 50,\n\"title\": \"CSV Dialect\",\n\"description\": \"The CSV dialect descriptor.\",\n- \"type\": \"object\",\n- \"required\": [\n- \"delimiter\",\n- \"doubleQuote\"\n- ],\n+ \"type\": [\"string\", \"object\"],\n\"properties\": {\n\"csvddfVersion\": {\n\"title\": \"CSV Dialect schema version\",\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Fixed resource.dialect profile (#889) |
234,917 | 18.08.2021 00:55:12 | 18,000 | 02532f6b1c2796faf5ff57e6284c642f9ece6ea6 | Edit text for grammar
I found some more typos/grammar errors to fix | [
{
"change_type": "MODIFY",
"old_path": "docs/guides/basic-examples.md",
"new_path": "docs/guides/basic-examples.md",
"diff": "@@ -56,14 +56,14 @@ id,neighbor_id,name,population\n</Tabs>\n-As we can see, it's a data containing information about European countries and their populations. Also, it's easy to notice that there are two fields having a relationship based on a country's identifier: neighbor_id is a Foreign Key to id.\n+As we can see, this is data containing information about European countries and their populations. Also, it looks like there are two fields having a relationship based on a country's identifier: neighbor_id is a Foreign Key to id.\n## Describing Data\n-First of all, we're going to describe our dataset. Frictionless uses powerful [Frictionless Data Specifications](https://specs.frictionlessdata.io/). They are very handy to describe:\n-- a data table - [Table Schema](https://specs.frictionlessdata.io/table-schema/)\n-- a data resource - [Data Resource](https://specs.frictionlessdata.io/data-resource/)\n-- a data package - [Data Package](https://specs.frictionlessdata.io/data-package/)\n+First of all, we're going to describe our dataset. Frictionless uses the powerful [Frictionless Data Specifications](https://specs.frictionlessdata.io/). They are very handy to describe:\n+- a data table - using [Table Schema](https://specs.frictionlessdata.io/table-schema/)\n+- a data resource - using [Data Resource](https://specs.frictionlessdata.io/data-resource/)\n+- a data package - using [Data Package](https://specs.frictionlessdata.io/data-package/)\n- and other objects\nLet's describe the `countries` table:\n@@ -74,7 +74,7 @@ values={[{ label: 'CLI', value: 'cli'}, { label: 'Python', value: 'python'}]}>\n<TabItem value=\"cli\">\n```bash script\n-frictionless describe countries.csv # add --stats to get statistics\n+frictionless describe countries.csv # optionally add --stats to get statistics\n```\n```yaml\n# --------\n@@ -317,7 +317,7 @@ pprint(rows)\nActually, it doesn't look terrible, but in reality, data like this is not quite useful:\n- it's not possible to export this data e.g., to SQL because integers are mixed with strings\n- there is still a basically empty row we don't want to have\n-- there is a clear mistake in Germany's neighborhood!\n+- there are some mistakes in the neighbor_id column\nLet's use the metadata we save to try extracting data with the help of Frictionless Data specifications:\n@@ -593,7 +593,7 @@ id,neighbor_id,name,population\n</Tabs>\n-Basically, that's it; now, we have a valid data file and a corresponding metadata file. It can be shared with other people or stored without fear of type errors or other problems making data research not reproducible.\n+Basically, that's it; now, we have a valid data file and a corresponding metadata file. It can be shared with other people or stored without fear of type errors or other problems making research data not reproducible.\n<Tabs\ndefaultValue=\"cli\"\n@@ -627,4 +627,4 @@ countries.resource.yaml\n</Tabs>\n-In the next articles, we will explore more advanced Frictionless' functionality.\n+In the next articles, we will explore more advanced Frictionless functionality.\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Edit text for grammar (#896)
I found some more typos/grammar errors to fix |
234,912 | 27.08.2021 18:42:49 | -10,800 | 99f2b38c3e303494011cc6711238111092d8566e | Fixed ckan reading with mixed resources | [
{
"change_type": "MODIFY",
"old_path": "frictionless/plugins/ckan.py",
"new_path": "frictionless/plugins/ckan.py",
"diff": "@@ -184,7 +184,12 @@ class CkanStorage(Storage):\ndef read_package(self, **options):\npackage = Package()\nfor name in self:\n+ try:\nresource = self.read_resource(name)\n+ # We skip not tabular resources\n+ except FrictionlessException as exception:\n+ if not exception.error.note.count(\"Not Found Error\"):\n+ raise\npackage.resources.append(resource)\nreturn package\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Fixed ckan reading with mixed resources (#900) |
234,912 | 26.09.2021 10:13:57 | -10,800 | 30cdc0a049e1c9b44a3f6185bc5dea623037af86 | Fix simpleeval dependency | [
{
"change_type": "MODIFY",
"old_path": "setup.py",
"new_path": "setup.py",
"diff": "@@ -62,7 +62,10 @@ INSTALL_REQUIRES = [\n\"chardet>=3.0\",\n\"requests>=2.10\",\n\"jsonschema>=2.5\",\n- \"simpleeval>=0.9\",\n+ # TODO: recover when the issue is resolved:\n+ # https://github.com/danthedeckie/simpleeval/issues/90\n+ # \"simpleeval>=0.9\",\n+ \"simpleeval@https://github.com/roll/simpleeval/archive/e2b37a96169a219613e1ffff3f38769d682e8596.zip\",\n\"stringcase>=1.2\",\n\"typer[all]>=0.4\",\n\"validators>=0.18\",\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Fix simpleeval dependency (#916) |
234,921 | 03.10.2021 03:34:27 | 10,800 | c3a6a1831447cb4804b49a6dc707bee2cc75959a | Update describing-data.md
Fixing/removing typos; fixing relative link | [
{
"change_type": "MODIFY",
"old_path": "docs/guides/describing-data.md",
"new_path": "docs/guides/describing-data.md",
"diff": "@@ -61,7 +61,7 @@ frictionless describe your-table.csv --type resource\nfrictionless describe your-table.csv --type package\n```\n-Please take into account that file names might be used by Frictionless to detect a metadata type for furhter data extraction or validation. It's recommended to use corresponding suffixes when you save your metadata to the disc, for example, you might name your Table Schema as `table.schema.yaml`, Data Resource - `table.resource.yaml`, and Data Package - `table.package.yaml`. If there is no hint in the file name Frictionless will assume that it's a resource descriptor by default.\n+Please take into account that file names might be used by Frictionless to detect a metadata type for data extraction or validation. It's recommended to use corresponding suffixes when you save your metadata to the disk. For example, you might name your Table Schema as `table.schema.yaml`, Data Resource as `table.resource.yaml`, and Data Package as `table.package.yaml`. If there is no hint in the file name Frictionless will assume that it's a resource descriptor by default.\nFor example, if we want a Data Package descriptor for a single file:\n@@ -96,7 +96,7 @@ resources:\nTable Schema is a specification for providing a \"schema\" (similar to a database schema) for tabular data. This information includes the expected data type for each value in a column (\"string\", \"number\", \"date\", etc.), constraints on the value (\"this string can only be at most 10 characters long\"), and the expected format of the data (\"this field should only contain strings that look like email addresses\"). Table Schema can also specify relations between data tables.\n-We're going to use this file for the examples in this section. For this guide, we only use CSV files because of their demonstrativeness, but in-general Frictionless can handle data in Excel, JSON, SQL, and many other formats:\n+We're going to use this file for the examples in this section. For this guide, we only use CSV files because of their demonstrativeness, but in general Frictionless can handle data in Excel, JSON, SQL, and many other formats:\n> Download [`country-1.csv`](https://raw.githubusercontent.com/frictionlessdata/frictionless-py/master/data/country-1.csv) to reproduce the examples (right-click and \"Save link as\").\n@@ -258,7 +258,7 @@ resource.to_yaml(\"country.resource.yaml\")\nSo what we did here:\n- we set the header rows to be row number 2; as humans, we can easily see that was the proper row\n- we set the CSV Delimiter to be \";\"\n-- we reuse the schema we created [earlier](#describing-schema) as the data has the same structure and meaning\n+- we reuse the schema we created [earlier](#describing-a-schema) as the data has the same structure and meaning\n```bash script title=\"CLI\"\ncat country.resource.yaml\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Update describing-data.md (#917)
Fixing/removing typos; fixing relative link |
234,921 | 03.10.2021 03:35:17 | 10,800 | a6e77403477ab901b9a5af3e56c0c5f5cbe82bde | Update quick-start.md
Remove "data" folder from python example for the invalid.csv file, as in the CLI example | [
{
"change_type": "MODIFY",
"old_path": "docs/guides/quick-start.md",
"new_path": "docs/guides/quick-start.md",
"diff": "@@ -156,7 +156,7 @@ scheme: file\nfrom pprint import pprint\nfrom frictionless import describe\n-resource = describe('data/invalid.csv')\n+resource = describe('invalid.csv')\npprint(resource)\n```\n```\n@@ -164,7 +164,7 @@ pprint(resource)\n'format': 'csv',\n'hashing': 'md5',\n'name': 'invalid',\n- 'path': 'data/invalid.csv',\n+ 'path': 'invalid.csv',\n'profile': 'tabular-data-resource',\n'schema': {'fields': [{'name': 'id', 'type': 'integer'},\n{'name': 'name', 'type': 'string'},\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Update quick-start.md (#918)
Remove "data" folder from python example for the invalid.csv file, as in the CLI example |
234,921 | 03.10.2021 03:35:54 | 10,800 | 64aedd7eeb70eca1c38a055628b07d99244349eb | Update basic-examples.md
Remove "data" folder from source file countries.csv when creating the pipeline yaml file in CLI | [
{
"change_type": "MODIFY",
"old_path": "docs/guides/basic-examples.md",
"new_path": "docs/guides/basic-examples.md",
"diff": "@@ -491,7 +491,7 @@ values={[{ label: 'CLI', value: 'cli'}, { label: 'Python', value: 'python'}]}>\n$ cat > countries.pipeline.yaml <<EOF\ntasks:\n- type: resource\n- source: data/countries.csv\n+ source: countries.csv\nsteps:\n- code: cell-replace\nfieldName: neighbor_id\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Update basic-examples.md (#919)
Remove "data" folder from source file countries.csv when creating the pipeline yaml file in CLI |
234,921 | 03.10.2021 03:36:41 | 10,800 | c7b79bf6093eda5c8305824569c573a234b828c4 | Update basic-examples.md
Change python code to be os-independent; the previous code won't run on Windows (ls call) | [
{
"change_type": "MODIFY",
"old_path": "docs/guides/basic-examples.md",
"new_path": "docs/guides/basic-examples.md",
"diff": "@@ -613,14 +613,13 @@ countries.resource.yaml\n```python script\n-import subprocess\n+import os\n-output = subprocess.check_output('ls countries.*', shell=True)\n-print(output.decode('utf-8'))\n+files = [f for f in os.listdir('.') if os.path.isfile(f) and f.startswith('countries.')]\n+print(files)\n```\n```\n-countries.csv\n-countries.resource.yaml\n+['countries.csv', 'countries.resource.yaml']\n```\n</TabItem>\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Update basic-examples.md (#920)
Change python code to be os-independent; the previous code won't run on Windows (ls call) |
234,912 | 15.10.2021 10:28:52 | -10,800 | 34ff917dc31e5a5fc578c536b18a994261882018 | Rebased on reportbro-simpleeval | [
{
"change_type": "MODIFY",
"old_path": "setup.py",
"new_path": "setup.py",
"diff": "@@ -63,7 +63,10 @@ INSTALL_REQUIRES = [\n\"chardet>=3.0\",\n\"requests>=2.10\",\n\"jsonschema>=2.5\",\n- \"simpleeval>=0.9\",\n+ # NOTE:\n+ # We can get back to original \"simpleeval\" when the issue is resolved:\n+ # https://github.com/danthedeckie/simpleeval/issues/90\n+ \"reportbro-simpleeval>=0.9\",\n\"stringcase>=1.2\",\n\"typer[all]>=0.4\",\n\"validators>=0.18\",\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Rebased on reportbro-simpleeval (#926) |
234,912 | 15.10.2021 11:04:27 | -10,800 | b88ff6a6fc5094ababe654299ae14af3f348fbd0 | Fixed tabular-data-package | [
{
"change_type": "MODIFY",
"old_path": "frictionless/assets/profiles/package/tabular.json",
"new_path": "frictionless/assets/profiles/package/tabular.json",
"diff": "\"propertyOrder\": 40,\n\"title\": \"Table Schema\",\n\"description\": \"A Table Schema for this resource, compliant with the [Table Schema](/tableschema/) specification.\",\n- \"type\": \"object\",\n+ \"type\": [\"string\", \"object\"],\n\"required\": [\n\"fields\"\n],\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Fixed tabular-data-package (#927) |
234,917 | 25.10.2021 02:08:04 | 18,000 | 31c50b72210856c61ddf47e6b28bab5a46fd0ac4 | adds tutorial and how-to from hackathon | [
{
"change_type": "MODIFY",
"old_path": "docs/development/contributing.md",
"new_path": "docs/development/contributing.md",
"diff": "@@ -6,7 +6,7 @@ We welcome contributions from anyone! Please read the following guidelines, and\n## General Guidelines\n-We use Github as a code and issues hosting platform. To report a bug or propose a new feature, please open an issue. For pull requests, we would ask you initially create an issue and then create a pull requests linked to this issue.\n+We use Github as a code and issues hosting platform. To report a bug or propose a new feature, please open an issue. For pull requests, we would ask you initially create an issue and then create a pull requests linked to this issue. If you'd like to write a tutorial, we recommend you first read this [how-to article](https://docs.google.com/document/d/1zbWMmIeU8DUwzGaEih0JGJ-DMGug5-2UksRN1x4fvj8/edit?usp=sharing) by Frictionless contributor Meyrele.\n## Docs Contribution\n@@ -31,7 +31,7 @@ livemark sync docs/guides/basic-examples.md # update inline\nIt's possible to run this documentation portal locally. This requires Node.js 12+ installed on your computer, and can be run with the following code:\n```bash\n-cd portal\n+cd site\nnpm install\nnpm start\n```\n"
},
{
"change_type": "MODIFY",
"old_path": "site/docusaurus.config.js",
"new_path": "site/docusaurus.config.js",
"diff": "@@ -99,6 +99,11 @@ module.exports = {\nhref:\n\"https://colab.research.google.com/drive/12RmGajHamGP5wOoAhy8N7Gchn9TmVnG-\",\n},\n+ {\n+ label: \"Frictionless Excel\",\n+ href:\n+ \"https://colab.research.google.com/drive/1QHO1r0f670YOYVqODltTP7bnA9qlur-t?usp=sharing\",\n+ },\n],\n},\n{\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | adds tutorial and how-to from hackathon (#934) |
234,890 | 08.11.2021 04:22:55 | 18,000 | 5c1817f1d8d1931a6017024b24300a769f91f9a1 | Markdown formatting error in the docs
The current format makes it look like the bullet point and the paragraph below is one and the same. These however contain two different ideas. I have added a new line to fix this formatting issue. | [
{
"change_type": "MODIFY",
"old_path": "docs/guides/describing-data.md",
"new_path": "docs/guides/describing-data.md",
"diff": "@@ -324,6 +324,7 @@ To continue learning about data resources please read:\nA Data Package consists of:\n- Metadata that describes the structure and contents of the package\n- Resources such as data files that form the contents of the package\n+\nThe Data Package metadata is stored in a \"descriptor\". This descriptor is what makes a collection of data a Data Package. The structure of this descriptor is the main content of the specification below.\nIn addition to this descriptor, a data package will include other resources such as data files. The Data Package specification does NOT impose any requirements on their form or structure and can, therefore, be used for packaging any kind of data.\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Markdown formatting error in the docs (#943)
The current format makes it look like the bullet point and the paragraph below is one and the same. These however contain two different ideas. I have added a new line to fix this formatting issue. |
234,912 | 08.11.2021 14:59:53 | -7,200 | 6385dd1e76285d4b27baffc4d85a14913fd21341 | Emit an error for resource names duplication | [
{
"change_type": "MODIFY",
"old_path": "frictionless/package.py",
"new_path": "frictionless/package.py",
"diff": "@@ -712,6 +712,9 @@ class Package(Metadata):\n# Resources\nfor resource in self.resources:\nyield from resource.metadata_errors\n+ if len(self.resource_names) != len(set(self.resource_names)):\n+ note = \"names of the resources are not unique\"\n+ yield errors.PackageError(note=note)\n# Created\nif self.get(\"created\"):\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_package.py",
"new_path": "tests/test_package.py",
"diff": "@@ -1030,3 +1030,15 @@ def test_package_validation_is_not_strict_enough_issue_869():\nassert len(errors) == 2\nassert errors[0].note == 'property \"created\" is not valid \"datetime\"'\nassert errors[1].note == 'property \"contributors[].email\" is not valid \"email\"'\n+\n+\n+def test_package_validation_duplicate_resource_names_issue_942():\n+ package = Package(\n+ resources=[\n+ Resource(name=\"name\", path=\"data/table.csv\"),\n+ Resource(name=\"name\", path=\"data/table.csv\"),\n+ ]\n+ )\n+ errors = package.metadata_errors\n+ assert len(errors) == 1\n+ assert errors[0].note == \"names of the resources are not unique\"\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Emit an error for resource names duplication (#946) |
234,889 | 07.12.2021 14:10:53 | -3,600 | 0753d61fd9a048a87b3f87a472493a8bded018b9 | Use row.cells for `cell/row-error`
* Use row values for `row-error` cells
fixes
* check `cells` attr in `foreign-key-error` tests
* fixed None cell values in errors. Updated tests
* keep using row.values() instead of row.cells
* use row.cells instead of row.values() in errors | [
{
"change_type": "MODIFY",
"old_path": "frictionless/errors/cell.py",
"new_path": "frictionless/errors/cell.py",
"diff": "@@ -73,9 +73,10 @@ class CellError(RowError):\nif field_name == name:\ncell = row[field_name]\nfield_position = row.field_positions[field_number - 1]\n+ to_str = lambda v: str(v) if v is not None else \"\"\nreturn cls(\nnote=note,\n- cells=list(map(str, row.values())),\n+ cells=list(map(to_str, row.cells)),\nrow_number=row.row_number,\nrow_position=row.row_position,\ncell=str(cell),\n"
},
{
"change_type": "MODIFY",
"old_path": "frictionless/errors/row.py",
"new_path": "frictionless/errors/row.py",
"diff": "@@ -40,9 +40,10 @@ class RowError(TableError):\nReturns:\nRowError: error\n\"\"\"\n+ to_str = lambda v: str(v) if v is not None else \"\"\nreturn cls(\nnote=note,\n- cells=list(map(str, row)),\n+ cells=list(map(to_str, row.cells)),\nrow_number=row.row_number,\nrow_position=row.row_position,\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "frictionless/row.py",
"new_path": "frictionless/row.py",
"diff": "@@ -276,6 +276,7 @@ class Row(dict):\n# Prepare context\ncells = self.__cells\n+ to_str = lambda v: str(v) if v is not None else \"\"\nfields = self.__field_info[\"objects\"]\nfield_mapping = self.__field_info[\"mapping\"]\nfield_positions = self.__field_info[\"positions\"]\n@@ -311,7 +312,7 @@ class Row(dict):\nself.__errors.append(\nerrors.TypeError(\nnote=type_note,\n- cells=list(map(str, cells)),\n+ cells=list(map(to_str, cells)),\nrow_number=self.__row_number,\nrow_position=self.__row_position,\ncell=str(source),\n@@ -327,7 +328,7 @@ class Row(dict):\nself.__errors.append(\nerrors.ConstraintError(\nnote=note,\n- cells=list(map(str, cells)),\n+ cells=list(map(to_str, cells)),\nrow_number=self.__row_number,\nrow_position=self.__row_position,\ncell=str(source),\n@@ -350,7 +351,7 @@ class Row(dict):\nself.__errors.append(\nerrors.ExtraCellError(\nnote=\"\",\n- cells=list(map(str, cells)),\n+ cells=list(map(to_str, cells)),\nrow_number=self.__row_number,\nrow_position=self.__row_position,\ncell=str(cell),\n@@ -369,7 +370,7 @@ class Row(dict):\nself.__errors.append(\nerrors.MissingCellError(\nnote=\"\",\n- cells=list(map(str, cells)),\n+ cells=list(map(to_str, cells)),\nrow_number=self.__row_number,\nrow_position=self.__row_position,\ncell=\"\",\n@@ -385,7 +386,7 @@ class Row(dict):\nself.__errors = [\nerrors.BlankRowError(\nnote=\"\",\n- cells=list(map(str, cells)),\n+ cells=list(map(to_str, cells)),\nrow_number=self.__row_number,\nrow_position=self.__row_position,\n)\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/validate/test_package.py",
"new_path": "tests/validate/test_package.py",
"diff": "@@ -206,8 +206,8 @@ def test_validate_package_schema_foreign_key_self_referenced_resource_violation(\ndescriptor = deepcopy(DESCRIPTOR_FK)\ndel descriptor[\"resources\"][0][\"data\"][4]\nreport = validate(descriptor)\n- assert report.flatten([\"rowPosition\", \"fieldPosition\", \"code\"]) == [\n- [4, None, \"foreign-key-error\"],\n+ assert report.flatten([\"rowPosition\", \"fieldPosition\", \"code\", \"cells\"]) == [\n+ [4, None, \"foreign-key-error\", [\"3\", \"rome\", \"4\"]],\n]\n@@ -215,8 +215,8 @@ def test_validate_package_schema_foreign_key_internal_resource_violation():\ndescriptor = deepcopy(DESCRIPTOR_FK)\ndel descriptor[\"resources\"][1][\"data\"][4]\nreport = validate(descriptor)\n- assert report.flatten([\"rowPosition\", \"fieldPosition\", \"code\"]) == [\n- [5, None, \"foreign-key-error\"],\n+ assert report.flatten([\"rowPosition\", \"fieldPosition\", \"code\", \"cells\"]) == [\n+ [5, None, \"foreign-key-error\", [\"4\", \"rio\", \"\"]],\n]\n@@ -224,11 +224,11 @@ def test_validate_package_schema_foreign_key_internal_resource_violation_non_exi\ndescriptor = deepcopy(DESCRIPTOR_FK)\ndescriptor[\"resources\"][1][\"data\"] = [[\"label\", \"population\"], [10, 10]]\nreport = validate(descriptor)\n- assert report.flatten([\"rowPosition\", \"fieldPosition\", \"code\"]) == [\n- [2, None, \"foreign-key-error\"],\n- [3, None, \"foreign-key-error\"],\n- [4, None, \"foreign-key-error\"],\n- [5, None, \"foreign-key-error\"],\n+ assert report.flatten([\"rowPosition\", \"fieldPosition\", \"code\", \"cells\"]) == [\n+ [2, None, \"foreign-key-error\", [\"1\", \"london\", \"2\"]],\n+ [3, None, \"foreign-key-error\", [\"2\", \"paris\", \"3\"]],\n+ [4, None, \"foreign-key-error\", [\"3\", \"rome\", \"4\"]],\n+ [5, None, \"foreign-key-error\", [\"4\", \"rio\", \"\"]],\n]\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/validate/test_resource.py",
"new_path": "tests/validate/test_resource.py",
"diff": "@@ -587,8 +587,8 @@ def test_validate_schema_foreign_key_error_self_referencing_invalid():\n},\n}\nreport = validate(source)\n- assert report.flatten([\"rowPosition\", \"fieldPosition\", \"code\"]) == [\n- [6, None, \"foreign-key-error\"],\n+ assert report.flatten([\"rowPosition\", \"fieldPosition\", \"code\", \"cells\"]) == [\n+ [6, None, \"foreign-key-error\", [\"5\", \"6\", \"Rome\"]],\n]\n@@ -609,6 +609,8 @@ def test_validate_schema_unique_error_and_type_error():\n[\"a1\", 100],\n[\"a2\", \"bad\"],\n[\"a3\", 100],\n+ [\"a4\", 0],\n+ [\"a5\", 0],\n]\nschema = {\n\"fields\": [\n@@ -617,9 +619,10 @@ def test_validate_schema_unique_error_and_type_error():\n]\n}\nreport = validate(source, schema=schema)\n- assert report.flatten([\"rowPosition\", \"fieldPosition\", \"code\"]) == [\n- [3, 2, \"type-error\"],\n- [4, 2, \"unique-error\"],\n+ assert report.flatten([\"rowPosition\", \"fieldPosition\", \"code\", \"cells\"]) == [\n+ [3, 2, \"type-error\", [\"a2\", \"bad\"]],\n+ [4, 2, \"unique-error\", [\"a3\", \"100\"]],\n+ [6, 2, \"unique-error\", [\"a5\", \"0\"]],\n]\n@@ -835,8 +838,8 @@ def test_validate_detector_headers_errors():\n}\ndetector = Detector(schema_sync=True)\nreport = validate(source, schema=schema, detector=detector)\n- assert report.flatten([\"rowPosition\", \"fieldPosition\", \"code\"]) == [\n- [4, 4, \"constraint-error\"],\n+ assert report.flatten([\"rowPosition\", \"fieldPosition\", \"code\", \"cells\"]) == [\n+ [4, 4, \"constraint-error\", [\"3\", \"Smith\", \"Paul\", \"\"]],\n]\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Use row.cells for `cell/row-error` (#952)
* Use row values for `row-error` cells
fixes #951
* check `cells` attr in `foreign-key-error` tests
* fixed None cell values in errors. Updated tests
* keep using row.values() instead of row.cells
* use row.cells instead of row.values() in errors |
234,893 | 23.12.2021 09:11:44 | -3,600 | 3309b0525fda0fc3360fabcd5c0bde8785fb6d58 | Add documentation for table dimensions validator | [
{
"change_type": "MODIFY",
"old_path": "docs/guides/validation-checks.md",
"new_path": "docs/guides/validation-checks.md",
"diff": "@@ -176,3 +176,81 @@ pprint(report.flatten([\"code\", \"message\"]))\n'The row at position 4 has an error: the row constraint to conform is '\n'\"salary == bonus * 5\"']]\n```\n+\n+### Table Dimensions\n+\n+This check is used to validate if your data has expected dimensions as: exact number of rows (`num_rows`), minimum (`min_rows`) and maximum (`max_rows`) number of rows, exact number of fields (`num_fields`), minimum (`min_fields`) and maximum (`max_fields`) number of fields.\n+\n+```python script title=\"Python\"\n+from pprint import pprint\n+from frictionless import validate, checks\n+\n+source = [\n+ [\"row\", \"salary\", \"bonus\"],\n+ [2, 1000, 200],\n+ [3, 2500, 500],\n+ [4, 1300, 500],\n+ [5, 5000, 1000],\n+]\n+report = validate(source, checks=[checks.table_dimensions(num_rows=5)])\n+pprint(report.flatten([\"code\", \"message\"]))\n+```\n+```\n+[['table-dimensions-error',\n+ 'The data source does not have the required dimensions: Current number of '\n+ 'rows is 4, the required is 5']]\n+```\n+\n+You can also give multiples limits at the same time:\n+\n+```python script title=\"Python\"\n+from pprint import pprint\n+from frictionless import validate, checks\n+\n+source = [\n+ [\"row\", \"salary\", \"bonus\"],\n+ [2, 1000, 200],\n+ [3, 2500, 500],\n+ [4, 1300, 500],\n+ [5, 5000, 1000],\n+]\n+report = validate(source, checks=[checks.table_dimensions(num_rows=5, num_fields=4)])\n+pprint(report.flatten([\"code\", \"message\"]))\n+```\n+```\n+[['table-dimensions-error',\n+ 'The data source does not have the required dimensions: Current number of '\n+ 'fields is 3, the required number is 4'],\n+\n+[['table-dimensions-error',\n+ 'The data source does not have the required dimensions: Current number of '\n+ 'rows is 4, the required is 5']]\n+```\n+\n+It is possible to use de check declaratively as:\n+\n+```python script title=\"Python\"\n+from pprint import pprint\n+from frictionless import validate, checks\n+\n+source = [\n+ [\"row\", \"salary\", \"bonus\"],\n+ [2, 1000, 200],\n+ [3, 2500, 500],\n+ [4, 1300, 500],\n+ [5, 5000, 1000],\n+]\n+\n+report = validate(source, checks=[{\"code\": \"table-dimensions\", \"minFields\": 4, \"maxRows\": 3}])\n+pprint(report.flatten([\"code\", \"message\"]))\n+```\n+```\n+[['table-dimensions-error',\n+ 'The data source does not have the required dimensions: Current number of '\n+ 'fields is 3, the minimum is 4'],\n+ ['table-dimensions-error',\n+ 'The data source does not have the required dimensions: Current number of '\n+ 'rows is 4, the maximum is 3']]\n+```\n+\n+But the table dimensions check arguments `num_rows`, `min_rows`, `max_rows`, `num_fields`, `min_fields`, `max_fields` must be passed in camelCase format as the example above i.e. `numRows`, `minRows`, `maxRows`, `numFields`, `minFields` and `maxFields`.\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/references/api-reference.md",
"new_path": "docs/references/api-reference.md",
"diff": "@@ -604,7 +604,6 @@ Detect schema from fragment\n- `Schema` - schema\n-\n## Dialect\n```python\n@@ -7553,6 +7552,7 @@ Public | `from frictionless import validate_table`\n- `Report` - validation report\n+\n## validate\\_schema\n```python\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Add documentation for table dimensions validator (#961) |
234,912 | 13.01.2022 16:14:25 | -10,800 | 10730f5d180d001875942d63c1f7b0339bf0e1fe | Fixed steps.table_update with new_name | [
{
"change_type": "MODIFY",
"old_path": "frictionless/steps/field.py",
"new_path": "frictionless/steps/field.py",
"diff": "@@ -313,6 +313,8 @@ class field_update(Step):\nfunction = lambda val, row: simpleeval.simple_eval(formula, names=row)\nif function:\nresource.data = table.convert(name, function)\n+ elif new_name:\n+ resource.data = table.rename({name: new_name})\nelif \"value\" in self:\nresource.data = table.update(name, value)\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/steps/test_field.py",
"new_path": "tests/steps/test_field.py",
"diff": "@@ -410,3 +410,38 @@ def test_step_field_update_new_name():\n{\"name\": \"population\", \"type\": \"integer\"},\n]\n}\n+ assert target.read_rows() == [\n+ {\"new-name\": 1, \"name\": \"germany\", \"population\": 83},\n+ {\"new-name\": 2, \"name\": \"france\", \"population\": 66},\n+ {\"new-name\": 3, \"name\": \"spain\", \"population\": 47},\n+ ]\n+\n+\n+# Issues\n+\n+\n+def test_transform_rename_move_field_issue_953():\n+ target = transform(\n+ data=[\n+ {\"id\": 1, \"name\": \"germany\", \"population\": 83},\n+ {\"id\": 2, \"name\": \"france\", \"population\": 66},\n+ {\"id\": 3, \"name\": \"spain\", \"population\": 47},\n+ ],\n+ steps=[\n+ steps.table_normalize(),\n+ steps.field_update(name=\"name\", new_name=\"country\"),\n+ steps.field_move(name=\"country\", position=3),\n+ ],\n+ )\n+ assert target.schema == {\n+ \"fields\": [\n+ {\"name\": \"id\", \"type\": \"integer\"},\n+ {\"name\": \"population\", \"type\": \"integer\"},\n+ {\"name\": \"country\", \"type\": \"string\"},\n+ ]\n+ }\n+ assert target.read_rows() == [\n+ {\"id\": 1, \"population\": 83, \"country\": \"germany\"},\n+ {\"id\": 2, \"population\": 66, \"country\": \"france\"},\n+ {\"id\": 3, \"population\": 47, \"country\": \"spain\"},\n+ ]\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Fixed steps.table_update with new_name (#970) |
234,912 | 18.01.2022 17:35:59 | -10,800 | 4383888086765232428bae200b323105ec38dfe7 | Fixed sql plugin writing resources order
* Fixed sql plugin writing resources order
by
* Removed unused data | [
{
"change_type": "MODIFY",
"old_path": "frictionless/plugins/sql.py",
"new_path": "frictionless/plugins/sql.py",
"diff": "@@ -378,8 +378,10 @@ class SqlStorage(Storage):\nself.__metadata.create_all(tables=sql_tables)\n# Write data\n- for resource in package.resources:\n- self.__write_convert_data(resource)\n+ existent_names = list(self)\n+ for name in existent_names:\n+ if package.has_resource(name):\n+ self.__write_convert_data(package.get_resource(name))\ndef __write_convert_name(self, name):\nreturn self.__prefix + name\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/plugins/test_sql.py",
"new_path": "tests/plugins/test_sql.py",
"diff": "@@ -566,6 +566,16 @@ def test_sql_storage_postgresql_integrity(postgresql_url):\nstorage.delete_package(target.resource_names)\n+def test_sql_storage_postgresql_integrity_different_order_issue_957(postgresql_url):\n+ dialect = SqlDialect(prefix=\"prefix_\")\n+ source = Package(\"data/storage/integrity.json\")\n+ source.add_resource(source.remove_resource(\"integrity_main\"))\n+ storage = source.to_sql(postgresql_url, dialect=dialect)\n+ target = Package.from_sql(postgresql_url, dialect=dialect)\n+ assert len(target.resources) == 2\n+ storage.delete_package(target.resource_names)\n+\n+\ndef test_sql_storage_postgresql_constraints(postgresql_url):\ndialect = SqlDialect(prefix=\"prefix_\")\nsource = Package(\"data/storage/constraints.json\")\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Fixed sql plugin writing resources order (#978)
* Fixed sql plugin writing resources order
by @davidpeckham
* Removed unused data |
234,894 | 03.02.2022 03:16:31 | 18,000 | 6c120a1716abc8c5a825a88c62a91ba0935be4b1 | Allow Mapping instead of dict instance | [
{
"change_type": "MODIFY",
"old_path": "frictionless/file.py",
"new_path": "frictionless/file.py",
"diff": "import os\nimport glob\n+from collections.abc import Mapping\nfrom pathlib import Path\nfrom .helpers import cached_property\nfrom . import settings\n@@ -148,7 +149,7 @@ class File:\n# Detect type\ntype = \"table\"\nif not multipart:\n- if memory and isinstance(data, dict):\n+ if memory and isinstance(data, Mapping):\ntype = \"resource\"\nif data.get(\"fields\") is not None:\ntype = \"schema\"\n"
},
{
"change_type": "MODIFY",
"old_path": "frictionless/metadata.py",
"new_path": "frictionless/metadata.py",
"diff": "@@ -3,6 +3,7 @@ import json\nimport yaml\nimport jsonschema\nimport stringcase\n+from collections.abc import Mapping\nfrom pathlib import Path\nfrom operator import setitem\nfrom functools import partial\n@@ -189,7 +190,7 @@ class Metadata(helpers.ControlledDict):\ntry:\nif descriptor is None:\nreturn {}\n- if isinstance(descriptor, dict):\n+ if isinstance(descriptor, Mapping):\nif not self.metadata_duplicate:\nreturn descriptor\ntry:\n@@ -279,7 +280,7 @@ class Metadata(helpers.ControlledDict):\ndef metadata_to_dict(value):\nprocess = lambda value: value.to_dict() if hasattr(value, \"to_dict\") else value\n- if isinstance(value, dict):\n+ if isinstance(value, Mapping):\nvalue = {key: metadata_to_dict(process(value)) for key, value in value.items()}\nelif isinstance(value, list):\nvalue = [metadata_to_dict(process(value)) for value in value]\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_package.py",
"new_path": "tests/test_package.py",
"diff": "@@ -3,6 +3,7 @@ import json\nimport yaml\nimport pytest\nimport zipfile\n+from collections.abc import Mapping\nfrom pathlib import Path\nfrom frictionless import Package, Resource, Layout, describe_package, helpers\nfrom frictionless import FrictionlessException\n@@ -34,6 +35,28 @@ def test_package_from_dict():\n}\n+class NotADict(Mapping):\n+ def __init__(self, **kwargs):\n+ self.__dict__.update(**kwargs)\n+\n+ def __getitem__(self, key):\n+ return self.__dict__[key]\n+\n+ def __iter__(self):\n+ return iter(self.__dict__)\n+\n+ def __len__(self):\n+ return len(self.__dict__)\n+\n+\n+def test_package_from_mapping():\n+ package = Package(NotADict(name=\"name\", profile=\"data-package\"))\n+ assert package == {\n+ \"name\": \"name\",\n+ \"profile\": \"data-package\",\n+ }\n+\n+\ndef test_package_from_path():\npackage = Package(\"data/package.json\")\nassert package.name == \"name\"\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Allow Mapping instead of dict instance (#985)
Co-authored-by: roll <[email protected]> |
234,924 | 23.03.2022 22:01:04 | 0 | 35425305296f34f0ce902994151b2d7bbf0aca5b | Update validation-guide.md | [
{
"change_type": "MODIFY",
"old_path": "docs/guides/validation-guide.md",
"new_path": "docs/guides/validation-guide.md",
"diff": "@@ -224,7 +224,7 @@ As we can see, the result is in a similar format to what we have already seen, a\n> The Inquiry is an advanced concept mostly used by software integrators. For example, under the hood, Frictionless Framework uses inquiries to implement client-server validation within the built-in API. Please skip this section if this information feels unnecessary for you.\n-Inquiry is a declarative representation of a validation job. It gives you an ability to create, export, ans share arbitrary validation jobs containing a set of individual validation tasks. Tasks in the Inquiry accept the same arguments written in camelCase as the corresponding `validate` functions.\n+Inquiry is a declarative representation of a validation job. It gives you an ability to create, export, and share arbitrary validation jobs containing a set of individual validation tasks. Tasks in the Inquiry accept the same arguments written in camelCase as the corresponding `validate` functions.\nLet's create an Inquiry that includes an individual file validation and a resource validation. In this example we will use the data file, `capital-valid.csv` and the resource, `capital.resource.json` which describes the invalid data file we have already seen:\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Update validation-guide.md (#1006) |
234,912 | 19.04.2022 17:58:39 | -10,800 | 48ebe4a725b585b2d2a522e6a80676292384f0cb | Migrate to codecov-action@2 | [
{
"change_type": "MODIFY",
"old_path": ".github/workflows/general.yaml",
"new_path": ".github/workflows/general.yaml",
"diff": "@@ -41,7 +41,7 @@ jobs:\n- name: Test software\nrun: make test-ci\n- name: Report coverage\n- uses: codecov/codecov-action@v1\n+ uses: codecov/codecov-action@v2\nservices:\npostgres:\nimage: postgres:12\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Migrate to codecov-action@2 (#1037) |
234,912 | 19.04.2022 18:11:14 | -10,800 | f6704e22955a7b0f6a582e2b9f764d59fd1f1de0 | Improved DataWithErrorHandling's repr | [
{
"change_type": "MODIFY",
"old_path": "frictionless/transform/resource.py",
"new_path": "frictionless/transform/resource.py",
"diff": "@@ -81,6 +81,9 @@ class DataWithErrorHandling:\nself.data = data\nself.step = step\n+ def __repr__(self):\n+ return '<transformed-data>'\n+\ndef __iter__(self):\ntry:\nyield from self.data() if callable(self.data) else self.data\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Improved DataWithErrorHandling's repr (#1038) |
234,915 | 21.04.2022 12:29:20 | -19,620 | 9461920c3405f048cfec9508093a1f35e36b1b49 | Added pprint to metadata
* added pretty print to metadata
* added tests for Field, Inquiry, Package, Pipeline, Resource, Report
and Schema | [
{
"change_type": "MODIFY",
"old_path": "frictionless/metadata.py",
"new_path": "frictionless/metadata.py",
"diff": "@@ -11,6 +11,7 @@ from importlib import import_module\nfrom .exception import FrictionlessException\nfrom .helpers import cached_property\nfrom . import helpers\n+import pprint as pp\n# NOTE:\n@@ -83,6 +84,10 @@ class Metadata(helpers.ControlledDict):\nif value is not None:\ndict.__setitem__(self, key, value)\n+ def __repr__(self) -> str:\n+ \"\"\"Returns string representation for metadata.\"\"\"\n+ return pp.pformat(self.to_dict())\n+\n# Expand\ndef expand(self):\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_field.py",
"new_path": "tests/test_field.py",
"diff": "@@ -357,3 +357,12 @@ def test_field_set_schema():\ndef test_field_set_type():\nfield = Field(type=\"int\")\nassert field.type == \"int\"\n+\n+\n+# Issues\n+\n+\n+def test_field_pprint_1029():\n+ field = Field({\"name\": \"name\", \"type\": \"string\", \"constraints\": {\"maxLength\": 2}})\n+ expected = \"\"\"{'constraints': {'maxLength': 2}, 'name': 'name', 'type': 'string'}\"\"\"\n+ assert repr(field) == expected\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_inquiry.py",
"new_path": "tests/test_inquiry.py",
"diff": "@@ -19,3 +19,20 @@ def test_inquiry_with_task_class():\n)\nreport = inquiry.run()\nassert report.valid\n+\n+\n+# Issues\n+\n+\n+def test_inquiry_pprint_1029():\n+ inquiry = Inquiry(\n+ {\n+ \"tasks\": [\n+ {\"source\": \"data/capital-valid.csv\"},\n+ {\"source\": \"data/capital-invalid.csv\"},\n+ ]\n+ }\n+ )\n+ expected = \"\"\"{'tasks': [{'source': 'data/capital-valid.csv'},\n+ {'source': 'data/capital-invalid.csv'}]}\"\"\"\n+ assert repr(inquiry) == expected\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_metadata.py",
"new_path": "tests/test_metadata.py",
"diff": "from frictionless import Metadata\n-\n# General\n@@ -12,3 +11,27 @@ def test_descriptor():\ndef test_descriptor_from_path():\nmetadata = Metadata(\"data/schema-valid.json\")\nassert metadata[\"primaryKey\"] == \"id\"\n+\n+\n+# Issues\n+\n+\n+def test_metadata_pprint_1029():\n+ metadata = Metadata(\"data/schema-valid.json\")\n+ expected = \"\"\"{'fields': [{'constraints': {'required': True},\n+ 'description': 'The id.',\n+ 'name': 'id',\n+ 'title': 'ID',\n+ 'type': 'integer'},\n+ {'constraints': {'required': True},\n+ 'description': 'The name.',\n+ 'name': 'name',\n+ 'title': 'Name',\n+ 'type': 'string'},\n+ {'constraints': {'required': True},\n+ 'description': 'The age.',\n+ 'name': 'age',\n+ 'title': 'Age',\n+ 'type': 'integer'}],\n+ 'primaryKey': 'id'}\"\"\"\n+ assert repr(metadata) == expected\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_pipeline.py",
"new_path": "tests/test_pipeline.py",
"diff": "@@ -49,3 +49,28 @@ def test_pipeline_package():\n)\nstatus = pipeline.run()\nassert status.task.target.resource_names == [\"data\"]\n+\n+\n+# Issues\n+\n+\n+def test_pipeline_pprint_1029():\n+ pipeline = Pipeline(\n+ {\n+ \"tasks\": [\n+ {\n+ \"type\": \"resource\",\n+ \"source\": {\"path\": \"../data/transform.csv\"},\n+ \"steps\": [\n+ {\"code\": \"table-normalize\"},\n+ {\"code\": \"table-melt\", \"fieldName\": \"name\"},\n+ ],\n+ }\n+ ]\n+ }\n+ )\n+ expected = \"\"\"{'tasks': [{'source': {'path': '../data/transform.csv'},\n+ 'steps': [{'code': 'table-normalize'},\n+ {'code': 'table-melt', 'fieldName': 'name'}],\n+ 'type': 'resource'}]}\"\"\"\n+ assert repr(pipeline) == expected\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_report.py",
"new_path": "tests/test_report.py",
"diff": "@@ -100,3 +100,25 @@ def test_report_to_yaml_with_bytes_serialization_issue_836():\nreport = validate(source)\ndescriptor = report.to_yaml()\nassert \"binary\" not in descriptor\n+\n+\n+# Issues\n+\n+\n+def test_report_pprint_1029():\n+ report = validate(\n+ \"data/capital-invalid.csv\", pick_errors=[\"duplicate-label\"], time=None\n+ )\n+ expected = \"\"\"{'errors': [{'code': 'task-error',\n+ 'description': 'General task-level error.',\n+ 'message': 'The task has an error: __init__() got an unexpected '\n+ \"keyword argument 'time'\",\n+ 'name': 'Task Error',\n+ 'note': \"__init__() got an unexpected keyword argument 'time'\",\n+ 'tags': []}],\n+ 'stats': {'errors': 1, 'tasks': 0},\n+ 'tasks': [],\n+ 'time': 0.0,\n+ 'valid': False,\n+ 'version': '4.29.0'}\"\"\"\n+ assert repr(report) == expected\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_resource.py",
"new_path": "tests/test_resource.py",
"diff": "@@ -2748,3 +2748,17 @@ def test_resource_set_package():\ntest_package_2 = Package()\nresource.package = test_package_2\nassert resource.package == test_package_2\n+\n+\n+def test_resource_pprint_1029():\n+ resource = Resource(\n+ name=\"resource\",\n+ title=\"My Resource\",\n+ description=\"My Resource for the Guide\",\n+ path=\"data/table.csv\",\n+ )\n+ expected = \"\"\"{'description': 'My Resource for the Guide',\n+ 'name': 'resource',\n+ 'path': 'data/table.csv',\n+ 'title': 'My Resource'}\"\"\"\n+ assert repr(resource) == expected\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_schema.py",
"new_path": "tests/test_schema.py",
"diff": "@@ -467,3 +467,18 @@ def test_schema_not_supported_type_issue_goodatbles_304():\nschema = Schema({\"fields\": [{\"name\": \"name\"}, {\"name\": \"age\", \"type\": \"bad\"}]})\nassert schema.metadata_valid is False\nassert schema.fields[1] == {\"name\": \"age\", \"type\": \"bad\"}\n+\n+\n+def test_schema_pprint_1029():\n+ descriptor = {\n+ \"fields\": [\n+ {\"name\": \"test_1\", \"type\": \"string\", \"format\": \"default\"},\n+ {\"name\": \"test_2\", \"type\": \"string\", \"format\": \"default\"},\n+ {\"name\": \"test_3\", \"type\": \"string\", \"format\": \"default\"},\n+ ]\n+ }\n+ schema = Schema(descriptor)\n+ expected = \"\"\"{'fields': [{'format': 'default', 'name': 'test_1', 'type': 'string'},\n+ {'format': 'default', 'name': 'test_2', 'type': 'string'},\n+ {'format': 'default', 'name': 'test_3', 'type': 'string'}]}\"\"\"\n+ assert repr(schema) == expected\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Added pprint to metadata (#1039)
* added pretty print to metadata
* added tests for Field, Inquiry, Package, Pipeline, Resource, Report
and Schema |
234,917 | 27.04.2022 10:25:04 | 18,000 | 254b72ecb57e43fb5df658169f02f7b630d13a24 | adds new tutorial
* adds new tutorial
...for workshop training at TU Delft
* add button
and fix typo
* fix badge, typo | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "docs/tutorials/notebooks/frictionless-RDM-workflows.ipynb",
"diff": "+{\n+ \"cells\": [\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"id\": \"9ecb2099\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"[](https://colab.research.google.com/github/frictionlessdata/frictionless-py/blob/5d13a6db2757ca1db4a25693d7ead7087d50c7c0/docs/tutorials/notebooks/frictionless-RDM-workflows.ipynb#scrollTo=SmB2Y4_rfUUv)\\n\",\n+ \"\\n\",\n+ \"\\n\",\n+ \"## Purpose\\n\",\n+ \"To teach about using Frictionless to create a reproducible data workflow. You will start with a dataset that needs some more information, and will end with a data package that can be published!\\n\",\n+ \"\\n\",\n+ \"## Outline\\n\",\n+ \"- Start by looking at the dataset & discussing the metadata. Are there things we need to change with the dataset?\\n\",\n+ \"- Describe the data (infer metadata + a schema; edit the metadata + schema)\\n\",\n+ \"- Extract (read in the dataset according to the schema)\\n\",\n+ \"- Validate (check the data for errors)\\n\",\n+ \"- Transform (clean the data)\\n\",\n+ \"- Package (containerize the data + metadata/schema)\\n\",\n+ \"\\n\",\n+ \"## Resources\\n\",\n+ \"- Dataset: https://figshare.com/articles/dataset/Portal_Project_Teaching_Database/1314459?file=10717186\\n\",\n+ \"- Code documentation: https://framework.frictionlessdata.io/\\n\",\n+ \"- Frictionless website: https://frictionlessdata.io/\\n\",\n+ \"- Frictionless Slack if you want to join :-) https://frictionlessdata.slack.com/messages/general\\n\",\n+ \"- Jupyter Notebook intro: https://datacarpentry.org/python-ecology-lesson/jupyter_notebooks/\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"id\": \"b3ec3d45\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"So, what are we going to do today? We just saw how this dataset could be improved by having the metadata more machine readable & more easily accessible. We'll be packaging the data, doing some light cleaning, and then getting the data ready to publish! \"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"id\": \"e1beb7ec\",\n+ \"metadata\": {},\n+ \"outputs\": [],\n+ \"source\": [\n+ \"!pip install frictionless\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"id\": \"2b8bd535\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"# Describe\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"id\": \"405fb4b1\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"### Discussion\\n\",\n+ \"\\n\",\n+ \"Let's start by looking at the dataset & discussing the metadata. Are there things we need to change with the dataset?\\n\",\n+ \"\\n\",\n+ \"(Everyone to open the data set to look at it). What kind of metadata is missing from this? (what info would we need to know to be able to use this data? eg units for weight, what does the species code mean? are there specific values for missing values? (note that the missing values are kind of interesting, but confusing))\\n\",\n+ \"\\n\",\n+ \"Can you find the metadata? https://esapubs.org/archive/ecol/E090/118/Portal_rodent_metadata.htm (Walk them through how I found the metadata from the software carpentries link, which has the data archive link (https://esapubs.org/archive/ecol/E090/118/), which has a metadata file!) \\n\",\n+ \"\\n\",\n+ \"Does it have the metadata we need? (e.g. units for weight)\\n\",\n+ \"\\n\",\n+ \"### Question: Is it easy to find the info you need in this file? Do you think it would be easy for a computer to parse/find info in this file?\\n\",\n+ \"\\n\",\n+ \"We'll now use Frictionless to add some of the missing metadata.\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"id\": \"dc538394\",\n+ \"metadata\": {\n+ \"scrolled\": false\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"from frictionless import describe # import these modules\\n\",\n+ \"from pprint import pprint # pretty print\\n\",\n+ \"\\n\",\n+ \"# describe is the function that reads in data and automatically infers metadata & a schema\\n\",\n+ \"resource = describe('combined.csv')\\n\",\n+ \"# NOTE: during intro, tell them to save this csv in the right place while showing jupyter\\n\",\n+ \"\\n\",\n+ \"# resource is Frictionless terminology for 'file'\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"id\": \"4f888005\",\n+ \"metadata\": {},\n+ \"outputs\": [],\n+ \"source\": [\n+ \"#let's look at our resource descriptor\\n\",\n+ \"pprint(resource)\\n\",\n+ \"# this is JSON...\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"id\": \"d76400ac\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"### Questions:\\n\",\n+ \"- Look at the resource - what is it?\\n\",\n+ \"- What metadata has been automatically inferred?\\n\",\n+ \"- Is there other info that would be helpful to future researchers? let's add more metadata to it - manually\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"id\": \"9d2a51d1\",\n+ \"metadata\": {},\n+ \"outputs\": [],\n+ \"source\": [\n+ \"# first we'll look at just the schema\\n\",\n+ \"resource.schema\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"id\": \"bc570652\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"We can access specific fields from the schema to edit. Let's look at the hindfoot_length field.\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"id\": \"e24d7739\",\n+ \"metadata\": {},\n+ \"outputs\": [],\n+ \"source\": [\n+ \"resource.schema.get_field('hindfoot_length')\\n\",\n+ \"# get_field is one way to access information inside the schema\\n\",\n+ \"# see more examples here https://framework.frictionlessdata.io/docs/guides/framework/schema-guide#field-management\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"id\": \"1c9a3c51\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"What does hindfoot length mean? What are the units? Let's add some metadata here as a description to make this data more reusable in the future.\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"id\": \"7344ea68\",\n+ \"metadata\": {},\n+ \"outputs\": [],\n+ \"source\": [\n+ \"# now we can add a description to that field\\n\",\n+ \"resource.schema.get_field('hindfoot_length').description = \\\"Hindfoot length measured in millimeters\\\"\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"id\": \"71b21585\",\n+ \"metadata\": {},\n+ \"outputs\": [],\n+ \"source\": [\n+ \"pprint(resource.schema)\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"id\": \"0be1a681\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"We can also update other aspects of the schema, like missing values or constraints. Here's the full API for reference: https://framework.frictionlessdata.io/docs/references/api-reference#field\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"id\": \"e42c8ba5\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"### On your own\\n\",\n+ \"\\n\",\n+ \"What other descriptions should we update? Spend a few minutes updating the description for other columns using the metadata https://esapubs.org/archive/ecol/E090/118/Portal_rodent_metadata.htm\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"id\": \"b7b2aa1e\",\n+ \"metadata\": {},\n+ \"outputs\": [],\n+ \"source\": [\n+ \"# things that Lilly will manipulate on screen while everyone else works on their own\\n\",\n+ \"resource.schema.get_field('weight').description = \\\"The weight in grams\\\"\\n\",\n+ \"resource.schema.get_field('plot_type').description = \\\"Describes the experimental condition\\\"\\n\",\n+ \"resource.schema.get_field('species_id').description = \\\"See table 2 https://esapubs.org/archive/ecol/E090/118/Portal_rodent_metadata.htm\\\"\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"id\": \"145c6d9e\",\n+ \"metadata\": {},\n+ \"outputs\": [],\n+ \"source\": [\n+ \"pprint(resource.schema)\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"id\": \"b2f4b7b8\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"(Question for helpers to discuss before the workshop: Should we put people in breakout rooms to work together or just give people like 7 to 10 min in the main room to work silently?)\\n\",\n+ \"\\n\",\n+ \"### Question: \\n\",\n+ \"Share what you edited (2 - 3 min)\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"id\": \"927d4040\",\n+ \"metadata\": {},\n+ \"outputs\": [],\n+ \"source\": [\n+ \"# now we'll save the resource descriptor\\n\",\n+ \"resource.to_yaml(\\\"resource.yaml\\\")\\n\",\n+ \"# you can also use JSON if you want with '.to_json'\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"id\": \"bc4623a2\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"Check out the saved YAML file in the Jupyter Notebook Home directory\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"id\": \"2e882d9b\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"# Extract\\n\",\n+ \"\\n\",\n+ \"Now let's look at the Extract function. Extract reads in a data set, and can also manipulate the data in a few ways by forcing it to conform to a schema. To do this, we'll create a new schema in a resource descriptor and then extract the data from that descriptor.\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"id\": \"bc087ca4\",\n+ \"metadata\": {},\n+ \"outputs\": [],\n+ \"source\": [\n+ \"# first we will create a new descriptor called resource_string\\n\",\n+ \"# then we will change the schema so the data type for 'plot_id' is a string\\n\",\n+ \"# then we save this descriptor to a yaml file\\n\",\n+ \"from frictionless import extract\\n\",\n+ \"\\n\",\n+ \"resource_string = describe('combined.csv')\\n\",\n+ \"resource_string.schema.get_field('plot_id').type = \\\"string\\\"\\n\",\n+ \"resource_string.to_yaml(\\\"string_resource.yaml\\\")\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"id\": \"1be207ed\",\n+ \"metadata\": {},\n+ \"outputs\": [],\n+ \"source\": [\n+ \"# now we will extract (aka read) the data inside the descriptor file\\n\",\n+ \"data = extract(\\\"string_resource.yaml\\\")\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"id\": \"3e6448ac\",\n+ \"metadata\": {},\n+ \"outputs\": [],\n+ \"source\": [\n+ \"# let's take a look at the first few rows of the read-in dataset\\n\",\n+ \"# what do we see?\\n\",\n+ \"data[0:5]\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"id\": \"4be027fb\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"You can see that plot_id is now a string.\\n\",\n+ \"\\n\",\n+ \"### Question: \\n\",\n+ \"So, what are some instances when you might want to do this type of manipulation? (examples: The data type was inferred incorrectly; You could replace missing values; you could read in only a few lines of the data; you can read just the headers; etc). \\n\",\n+ \"\\n\",\n+ \"More info: https://framework.frictionlessdata.io/docs/guides/extracting-data \"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"id\": \"61c6b840\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"DAY 1 DONE\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"id\": \"f1479b71\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"# Validate\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"id\": \"315cf003\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"Take some time to talk about data validation: what it means, why it is important.\\n\",\n+ \"\\n\",\n+ \"What kinds of things can be validated? content + structure\\n\",\n+ \"\\n\",\n+ \"Examples of both...\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"id\": \"0bba19b9\",\n+ \"metadata\": {},\n+ \"outputs\": [],\n+ \"source\": [\n+ \"from frictionless import validate\\n\",\n+ \"\\n\",\n+ \"# create a report variable to store the validation report \\n\",\n+ \"report = validate(resource)\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"id\": \"d1dac99a\",\n+ \"metadata\": {},\n+ \"outputs\": [],\n+ \"source\": [\n+ \"pprint(report)\\n\",\n+ \"# look at the scope here to see all the built-in validation checks\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"id\": \"b4f47e55\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"Let's purposefully create an error now so we can see how the validation report changes.\\n\",\n+ \"\\n\",\n+ \"Make a change to the data file (eg remove a value & comma, or duplicate a header) & validate again\\n\",\n+ \"\\n\",\n+ \"Note that this time we are using the data file as the input this time, and frictionless is automatically inferring the metadata from that dataset.\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"id\": \"cf62ad19\",\n+ \"metadata\": {},\n+ \"outputs\": [],\n+ \"source\": [\n+ \"report_invalid = validate('combined.csv')\\n\",\n+ \"# It isn't using the schema we have edited.\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"id\": \"cf7ea0f0\",\n+ \"metadata\": {},\n+ \"outputs\": [],\n+ \"source\": [\n+ \"pprint(report_invalid)\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"id\": \"c99fb24f\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"### Question: \\n\",\n+ \"What has changed in the report? Is it what you expected?\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"id\": \"e3a1f37a\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"We can also create data constraints that limit the *content* of the data.\\n\",\n+ \"https://specs.frictionlessdata.io/table-schema/#constraints\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"id\": \"19f1b423\",\n+ \"metadata\": {},\n+ \"outputs\": [],\n+ \"source\": [\n+ \"constrained_resource = describe('combined.csv')\\n\",\n+ \"constrained_resource.schema.get_field('sex').constraints[\\\"enum\\\"] = [\\\"M\\\"]\\n\",\n+ \"# this means that only values of \\\"M\\\" are acceptable for the \\\"sex\\\" column\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"id\": \"bde6f2f5\",\n+ \"metadata\": {},\n+ \"outputs\": [],\n+ \"source\": [\n+ \"# save this descriptor\\n\",\n+ \"constrained_resource.to_yaml(\\\"constrained_resource.yaml\\\")\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"id\": \"9f67c974\",\n+ \"metadata\": {},\n+ \"outputs\": [],\n+ \"source\": [\n+ \"# create a new validation report\\n\",\n+ \"report = validate(constrained_resource)\\n\",\n+ \"pprint(report)\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"id\": \"6db60762\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"### On your own\\n\",\n+ \"\\n\",\n+ \"What other things can you validate for? Play around with the data and the schema to create validation errors!\\n\",\n+ \"We won't get into this today, but Frictionless also has a tool for continuous data validation, [Repository](https://repository.frictionlessdata.io/). A use case for Repository is if you host a dataset on GitHub, everytime that you push changes to that dataset, Repository will run validation checks via a GitHub action and will alert you if there are any errors.\\n\",\n+ \"(e.g. make sure record_id is unique)\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"id\": \"b64068a2\",\n+ \"metadata\": {},\n+ \"outputs\": [],\n+ \"source\": [\n+ \"# another example: record_id must be unique\\n\",\n+ \"# remember to change a record_id value to be a duplicate in the data file\\n\",\n+ \"constrained2_resource = describe('combined.csv')\\n\",\n+ \"constrained2_resource.schema.get_field('record_id').constraints[\\\"unique\\\"] = True\\n\",\n+ \"report = validate(constrained2_resource)\\n\",\n+ \"pprint(report)\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"id\": \"39c62e91\",\n+ \"metadata\": {},\n+ \"outputs\": [],\n+ \"source\": [\n+ \"# bonus example\\n\",\n+ \"# you can selectively print out parts of the report if you don't want it all\\n\",\n+ \"# https://framework.frictionlessdata.io/docs/guides/validation-guide#validation-report\\n\",\n+ \"report = validate(constrained2_resource, pick_errors=['unique-error'])\\n\",\n+ \"pprint(report.flatten([\\\"rowPosition\\\", \\\"fieldPosition\\\", \\\"code\\\", \\\"message\\\"]))\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"id\": \"bb7df2af\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"### Question: \\n\",\n+ \"What else did you validate? Share with the group.\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"id\": \"f119b133\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"# Transform\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"id\": \"317b063c\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"Now we'll look at the Transform function, which transforms (or cleans) the data set and metadata too.\\n\",\n+ \"https://framework.frictionlessdata.io/docs/guides/transform-guide\\n\",\n+ \"\\n\",\n+ \"Let's look at the date columns. Having 3 separate columns for dates is not standard. (What is standard?) Let's combine those columns into a new column to be more standard. We'll keep the original columns (this is a best practice to keep the original columns).\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"id\": \"25365185\",\n+ \"metadata\": {},\n+ \"outputs\": [],\n+ \"source\": [\n+ \"from frictionless import Resource, transform, steps\\n\",\n+ \"\\n\",\n+ \"# Define source resource\\n\",\n+ \"source = 'resource.yaml'\\n\",\n+ \"\\n\",\n+ \"# Apply transform steps\\n\",\n+ \"target = transform(\\n\",\n+ \" source,\\n\",\n+ \" steps=[\\n\",\n+ \" steps.field_add(name=\\\"date\\\", type=\\\"integer\\\", formula=\\\"year+'-'+month+'-'+day\\\"),\\n\",\n+ \" steps.field_update(name=\\\"date\\\", type=\\\"date\\\"),\\n\",\n+ \" ],\\n\",\n+ \")\\n\",\n+ \"\\n\",\n+ \"# the first step creates a new field that has year, month, and day combined\\n\",\n+ \"# the second step changes the data type from integer to date\\n\",\n+ \"# (we need to use this order so we can \\\"add\\\" the 3 column names)\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"id\": \"1dcfe40a\",\n+ \"metadata\": {},\n+ \"outputs\": [],\n+ \"source\": [\n+ \"# check out the new field in the schema\\n\",\n+ \"pprint(target.schema)\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"id\": \"04cab3cf\",\n+ \"metadata\": {},\n+ \"outputs\": [],\n+ \"source\": [\n+ \"# print out some of the data rows to check the new column \\n\",\n+ \"# note: this file is HUGE, so stop this cell from running forever with the STOP square button on the menu\\n\",\n+ \"pprint(target.read_rows())\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"id\": \"0c896f6f\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"### Question: \\n\",\n+ \"What are some other things you might want to transform in this dataset?\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"id\": \"b68d6580\",\n+ \"metadata\": {},\n+ \"outputs\": [],\n+ \"source\": [\n+ \"# check the validity \\n\",\n+ \"target_report = validate(target)\\n\",\n+ \"pprint(target_report)\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"id\": \"e453207d\",\n+ \"metadata\": {},\n+ \"outputs\": [],\n+ \"source\": [\n+ \"# write the data to a file called transformed.csv\\n\",\n+ \"# then we can look at the data file in whole\\n\",\n+ \"target.write('transformed.csv')\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"id\": \"6e2491b2\",\n+ \"metadata\": {},\n+ \"outputs\": [],\n+ \"source\": [\n+ \"# and we'll save the descriptor of this datafile too\\n\",\n+ \"target.to_yaml(\\\"transformed_descriptor.yaml\\\")\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"id\": \"bd366c9f\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"Now we can package it up!\\n\",\n+ \"\\n\",\n+ \"### Question: \\n\",\n+ \"Why do we want to package it? (note: we will have talked about this during the intro on day 1, so this will be a good reminder). We package it so we could have the metadata, schema + data all together in 1 file! \"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"id\": \"23592b1b\",\n+ \"metadata\": {},\n+ \"outputs\": [],\n+ \"source\": [\n+ \"from frictionless import Package\\n\",\n+ \"package = Package(resources=Resource(path='transformed.csv'), descriptor='transformed_descriptor.yaml') \\n\",\n+ \"# this package contains the data file + the descriptor file\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"id\": \"9c2dc194\",\n+ \"metadata\": {},\n+ \"outputs\": [],\n+ \"source\": [\n+ \"# let's look at the package\\n\",\n+ \"pprint(package)\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"id\": \"bde44822\",\n+ \"metadata\": {},\n+ \"outputs\": [],\n+ \"source\": [\n+ \"# save the package\\n\",\n+ \"package.to_yaml('package.yaml')\\n\",\n+ \"# package.to_json('package.json') will save as JSON instead\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"id\": \"7b8e43f2\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"Now we can publish this machine-readable packaged (contained) information in 1 place.\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"id\": \"374db83e\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"## Conclusion\\n\",\n+ \"\\n\",\n+ \"To recap, we started with a pretty clean data file that was missing some important metadata. To make the datafile more reusable, we added metadata with Frictionless Describe. Then we saw how we could use Extract to read in data according to a schema. Next, we validated the data set and metadata/schema to check for data content and structure errors. After that, we transformed the data to add a new, standardized date column. And finally, we packed the data and metadata/schema together so we can publish it!\\n\",\n+ \"\\n\",\n+ \"### Bonus Example\\n\",\n+ \"Dr. Katerina Drakoulaki will tell you all about her recent experience using the Frictionless Framework with Byzantine music data.\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"id\": \"edb119e2\",\n+ \"metadata\": {},\n+ \"outputs\": [],\n+ \"source\": []\n+ }\n+ ],\n+ \"metadata\": {\n+ \"kernelspec\": {\n+ \"display_name\": \"Python 3 (ipykernel)\",\n+ \"language\": \"python\",\n+ \"name\": \"python3\"\n+ },\n+ \"language_info\": {\n+ \"codemirror_mode\": {\n+ \"name\": \"ipython\",\n+ \"version\": 3\n+ },\n+ \"file_extension\": \".py\",\n+ \"mimetype\": \"text/x-python\",\n+ \"name\": \"python\",\n+ \"nbconvert_exporter\": \"python\",\n+ \"pygments_lexer\": \"ipython3\",\n+ \"version\": \"3.8.1\"\n+ }\n+ },\n+ \"nbformat\": 4,\n+ \"nbformat_minor\": 5\n+}\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | adds new tutorial (#1054)
* adds new tutorial
...for workshop training at TU Delft
* add button
and fix typo
* fix badge, typo |
234,915 | 02.05.2022 01:18:21 | 25,200 | d73b6190b9f09a48d83f011c0e7209f1219aea46 | Ignore openpyxl warnings
* fixed warnings in test_date
* Fixed warnings by replacing pytest.warns(None) with recwarn
* fixed warnings in test_datetime
* Fixed warning by replacing pytest.warns(None) with recwarn
* fixed warnings in test_time
Fixed warnings in test_time by replacing pytest.warns(None) with recwarn
* fixed warnings in test_excel | [
{
"change_type": "MODIFY",
"old_path": "frictionless/plugins/excel.py",
"new_path": "frictionless/plugins/excel.py",
"diff": "@@ -5,6 +5,7 @@ import atexit\nimport hashlib\nimport tempfile\nimport datetime\n+import warnings\nfrom itertools import chain\nfrom ..exception import FrictionlessException\nfrom ..metadata import Metadata\n@@ -211,6 +212,7 @@ class XlsxParser(Parser):\n# To fill merged cells we can't use read-only because\n# `sheet.merged_cell_ranges` is not available in this mode\ntry:\n+ warnings.filterwarnings(\"ignore\", category=UserWarning, module=\"openpyxl\")\nbook = openpyxl.load_workbook(\nself.loader.byte_stream,\nread_only=not dialect.fill_merged_cells,\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/plugins/test_excel.py",
"new_path": "tests/plugins/test_excel.py",
"diff": "@@ -106,7 +106,6 @@ def test_xlsx_parser_adjust_floating_point_error():\nadjust_floating_point_error=True,\n)\nlayout = Layout(skip_fields=[\"<blank>\"])\n- with pytest.warns(UserWarning):\nwith Resource(source, dialect=dialect, layout=layout) as resource:\nassert resource.read_rows()[1].cells[2] == 274.66\n@@ -115,7 +114,6 @@ def test_xlsx_parser_adjust_floating_point_error_default():\nsource = \"data/adjust-floating-point-error.xlsx\"\ndialect = ExcelDialect(preserve_formatting=True)\nlayout = Layout(skip_fields=[\"<blank>\"])\n- with pytest.warns(UserWarning):\nwith Resource(source, dialect=dialect, layout=layout) as resource:\nassert resource.read_rows()[1].cells[2] == 274.65999999999997\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_resource.py",
"new_path": "tests/test_resource.py",
"diff": "@@ -2651,11 +2651,8 @@ def test_resource_skip_rows_non_string_cell_issue_320():\nsource = \"data/issue-320.xlsx\"\ndialect = ExcelDialect(fill_merged_cells=True)\nlayout = Layout(header_rows=[10, 11, 12])\n- with pytest.warns(UserWarning):\nwith Resource(source, dialect=dialect, layout=layout) as resource:\n- assert (\n- resource.header[7] == \"Current Population Analysed % of total county Pop\"\n- )\n+ assert resource.header[7] == \"Current Population Analysed % of total county Pop\"\ndef test_resource_skip_rows_non_string_cell_issue_322():\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/types/test_date.py",
"new_path": "tests/types/test_date.py",
"diff": "@@ -40,10 +40,9 @@ from frictionless import Field\n(\"fmt:%d/%m/%y\", \"\", None),\n],\n)\n-def test_date_read_cell(format, source, target):\n- with pytest.warns(None) as recorded:\n+def test_date_read_cell(format, source, target, recwarn):\nfield = Field({\"name\": \"name\", \"type\": \"date\", \"format\": format})\ncell, notes = field.read_cell(source)\nassert cell == target\nif not format.startswith(\"fmt:\"):\n- assert recorded.list == []\n+ assert recwarn.list == []\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/types/test_datetime.py",
"new_path": "tests/types/test_datetime.py",
"diff": "@@ -54,10 +54,9 @@ from frictionless import Field\n(\"fmt:%d/%m/%y %H:%M\", \"\", None),\n],\n)\n-def test_datetime_read_cell(format, source, target):\n- with pytest.warns(None) as recorded:\n+def test_datetime_read_cell(format, source, target, recwarn):\nfield = Field({\"name\": \"name\", \"type\": \"datetime\", \"format\": format})\ncell, notes = field.read_cell(source)\nassert cell == target\nif not format.startswith(\"fmt:\"):\n- assert recorded.list == []\n+ assert recwarn.list == []\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/types/test_time.py",
"new_path": "tests/types/test_time.py",
"diff": "@@ -49,10 +49,9 @@ from frictionless import Field\n(\"fmt:%H:%M\", \"\", None),\n],\n)\n-def test_time_read_cell(format, source, target):\n- with pytest.warns(None) as recorded:\n+def test_time_read_cell(format, source, target, recwarn):\nfield = Field({\"name\": \"name\", \"type\": \"time\", \"format\": format})\ncell, notes = field.read_cell(source)\nassert cell == target\nif not format.startswith(\"fmt:\"):\n- assert recorded.list == []\n+ assert recwarn.list == []\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Ignore openpyxl warnings (#1056)
* fixed warnings in test_date
* Fixed warnings by replacing pytest.warns(None) with recwarn
* fixed warnings in test_datetime
* Fixed warning by replacing pytest.warns(None) with recwarn
* fixed warnings in test_time
Fixed warnings in test_time by replacing pytest.warns(None) with recwarn
* fixed warnings in test_excel
Co-authored-by: roll <[email protected]> |
234,917 | 02.05.2022 20:09:21 | 18,000 | 04ba7db86b20914caa04e063e1551516a2da74fd | adds RDM tutorial link | [
{
"change_type": "MODIFY",
"old_path": "site/docusaurus.config.js",
"new_path": "site/docusaurus.config.js",
"diff": "@@ -104,6 +104,11 @@ module.exports = {\nhref:\n\"https://colab.research.google.com/github/frictionlessdata/frictionless-py/blob/main/docs/tutorials/notebooks/frictionless-excel.ipynb\",\n},\n+ {\n+ label: \"Frictionless Research Data Management Workflows\",\n+ href:\n+ \"https://colab.research.google.com/github/frictionlessdata/frictionless-py/blob/main/docs/tutorials/notebooks/frictionless-RDM-workflows.ipynb\",\n+ },\n],\n},\n{\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | adds RDM tutorial link (#1057) |
234,925 | 10.05.2022 12:22:29 | -3,600 | 9ca5abc02f8a79275488fe29af9ae30a7195c21b | Improve schema detection algorithm with missing values | [
{
"change_type": "MODIFY",
"old_path": "frictionless/detector.py",
"new_path": "frictionless/detector.py",
"diff": "@@ -446,21 +446,26 @@ class Detector:\n# Infer fields\nfields = [None] * len(names)\nmax_score = [len(fragment)] * len(names)\n- treshold = len(fragment) * (self.__field_confidence - 1)\n+ threshold = len(fragment) * (self.__field_confidence - 1)\nfor cells in fragment:\nfor index, name in enumerate(names):\nif fields[index] is not None:\ncontinue\nsource = cells[index] if len(cells) > index else None\n- if source in self.__field_missing_values:\n+ is_field_missing_value = source in self.__field_missing_values\n+ if is_field_missing_value:\nmax_score[index] -= 1\n- continue\n+\nfor runner in runners[index]:\n- if runner[\"score\"] < treshold:\n+ if runner[\"score\"] < threshold:\ncontinue\n+ if not is_field_missing_value:\ntarget, notes = runner[\"field\"].read_cell(source)\nrunner[\"score\"] += 1 if not notes else -1\n- if runner[\"score\"] >= max_score[index] * self.__field_confidence:\n+\n+ if max_score[index] > 0 and runner[\"score\"] >= (\n+ max_score[index] * self.__field_confidence\n+ ):\nfield = runner[\"field\"].to_copy()\nfield.name = name\nfield.schema = schema\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_detector.py",
"new_path": "tests/test_detector.py",
"diff": "@@ -61,6 +61,25 @@ def test_schema_from_sample_confidence_full():\n}\n+def test_schema_from_sparse_sample():\n+ labels = [\"id\", \"age\", \"name\"]\n+ sample = [\n+ [\"1\", \"39\", \"Paul\"],\n+ [\"2\", \"23\", \"Jimmy\"],\n+ [\"3\", \"\", \"Jane\"],\n+ [\"4\", \"\", \"Judy\"],\n+ ]\n+ detector = Detector(field_confidence=1)\n+ schema = detector.detect_schema(sample, labels=labels)\n+ assert schema == {\n+ \"fields\": [\n+ {\"name\": \"id\", \"type\": \"integer\"},\n+ {\"name\": \"age\", \"type\": \"integer\"},\n+ {\"name\": \"name\", \"type\": \"string\"},\n+ ],\n+ }\n+\n+\ndef test_schema_infer_no_names():\nsample = [[1], [2], [3]]\ndetector = Detector()\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Improve schema detection algorithm with missing values (#1051) |
234,915 | 16.05.2022 01:00:10 | 25,200 | 9d9f281462314f9753fb938ad550f94be7d52dab | Added true/false_values to detection class.
* * Added true/false_values to detection class.
* Added tests to test true/false_values feature.
* Reduce computational complexity
Instead of setting values in two dimensional cycle
we can set them perliminary | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "data/countries-truefalsevalues.csv",
"diff": "+id,value\n+1,yes\n+2,no\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "frictionless/detector.py",
"new_path": "frictionless/detector.py",
"diff": "@@ -55,6 +55,14 @@ class Detector:\nFor more information, please check \"Describing Data\" guide.\nIt defaults to `['']`\n+ field_true_values? (str[]): String to be considered as true values.\n+ For more information, please check \"Describing Data\" guide.\n+ It defaults to `[\"true\", \"True\", \"TRUE\", \"1\"]`\n+\n+ field_false_values? (str[]): String to be considered as false values.\n+ For more information, please check \"Describing Data\" guide.\n+ It defaults to `[\"false\", \"False\", \"FALSE\", \"0\"]`\n+\nschema_sync? (bool): Whether to sync the schema.\nIf it sets to `True` the provided schema will be mapped to\nthe inferred schema. It means that, for example, you can\n@@ -79,6 +87,8 @@ class Detector:\nfield_confidence=settings.DEFAULT_FIELD_CONFIDENCE,\nfield_float_numbers=settings.DEFAULT_FLOAT_NUMBERS,\nfield_missing_values=settings.DEFAULT_MISSING_VALUES,\n+ field_true_values=settings.DEFAULT_TRUE_VALUES,\n+ field_false_values=settings.DEFAULT_FALSE_VALUES,\nschema_sync=False,\nschema_patch=None,\n):\n@@ -91,6 +101,8 @@ class Detector:\nself.__field_confidence = field_confidence\nself.__field_float_numbers = field_float_numbers\nself.__field_missing_values = field_missing_values\n+ self.__field_true_values = field_true_values\n+ self.__field_false_values = field_false_values\nself.__schema_sync = schema_sync\nself.__schema_patch = schema_patch\n@@ -257,6 +269,42 @@ class Detector:\n\"\"\"\nself.__field_missing_values = value\n+ @property\n+ def field_true_values(self) -> List[str]:\n+ \"\"\"Returns detector fields true values list.\n+\n+ Returns:\n+ str[]: detector fields true values list\n+ \"\"\"\n+ return self.__field_true_values\n+\n+ @field_true_values.setter\n+ def field_true_values(self, value: List[str]):\n+ \"\"\"Sets detector fields true values list.\n+\n+ Parameters:\n+ value (str[]): detector fields true values list\n+ \"\"\"\n+ self.__field_true_values = value\n+\n+ @property\n+ def field_false_values(self) -> List[str]:\n+ \"\"\"Returns detector fields false values list.\n+\n+ Returns:\n+ str[]: detector fields false values list\n+ \"\"\"\n+ return self.__field_false_values\n+\n+ @field_false_values.setter\n+ def field_false_values(self, value: List[str]):\n+ \"\"\"Sets detector fields false values list.\n+\n+ Parameters:\n+ value (str[]): detector fields false values list\n+ \"\"\"\n+ self.__field_false_values = value\n+\n@property\ndef schema_sync(self) -> bool:\n\"\"\"Returns detector schema_sync flag value.\n@@ -437,6 +485,11 @@ class Detector:\nfield = Field(candidate)\nif field.type == \"number\" and self.__field_float_numbers:\nfield.float_number = True\n+ elif field.type == \"boolean\":\n+ if self.__field_true_values != settings.DEFAULT_TRUE_VALUES:\n+ field.true_values = self.__field_true_values\n+ if self.__field_false_values != settings.DEFAULT_FALSE_VALUES:\n+ field.false_values = self.__field_false_values\nrunner_fields.append(field)\nfor index, name in enumerate(names):\nrunners.append([])\n@@ -455,14 +508,12 @@ class Detector:\nis_field_missing_value = source in self.__field_missing_values\nif is_field_missing_value:\nmax_score[index] -= 1\n-\nfor runner in runners[index]:\nif runner[\"score\"] < threshold:\ncontinue\nif not is_field_missing_value:\ntarget, notes = runner[\"field\"].read_cell(source)\nrunner[\"score\"] += 1 if not notes else -1\n-\nif max_score[index] > 0 and runner[\"score\"] >= (\nmax_score[index] * self.__field_confidence\n):\n"
},
{
"change_type": "MODIFY",
"old_path": "frictionless/field.py",
"new_path": "frictionless/field.py",
"diff": "@@ -31,6 +31,8 @@ class Field(Metadata):\nmissing_values? (str[]): missing values\nconstraints? (dict): constraints\nrdf_type? (str): RDF type\n+ true_values? (str[]): true values\n+ false_values? (str[]): false values\nschema? (Schema): parent schema object\nRaises:\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_detector.py",
"new_path": "tests/test_detector.py",
"diff": "-from frictionless import Detector\n-\n+from frictionless import Detector, Resource\n# General\n@@ -166,3 +165,16 @@ def test_detector_set_schema_patch():\nassert detector.schema_patch == {\"fields\": {\"id\": {\"type\": \"string\"}}}\ndetector.schema_patch = {\"fields\": {\"age\": {\"type\": \"int\"}}}\nassert detector.schema_patch == {\"fields\": {\"age\": {\"type\": \"int\"}}}\n+\n+\n+def test_detector_true_false_values():\n+ detector = Detector(field_true_values=[\"yes\"], field_false_values=[\"no\"])\n+ with Resource(\n+ \"data/countries-truefalsevalues.csv\",\n+ detector=detector,\n+ ) as resource:\n+ assert resource.schema.get_field(\"value\").type == \"boolean\"\n+ assert resource.read_rows() == [\n+ {\"id\": 1, \"value\": True},\n+ {\"id\": 2, \"value\": False},\n+ ]\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Added true/false_values to detection class. (#1074)
* * Added true/false_values to detection class.
* Added tests to test true/false_values feature.
* Reduce computational complexity
Instead of setting values in two dimensional cycle
we can set them perliminary
Co-authored-by: roll <[email protected]>
Co-authored-by: roll <[email protected]> |
234,915 | 17.05.2022 00:19:29 | 25,200 | 9e194fcc07024707f35fdd3c77d757fd2f045dc4 | Added more text explanation to the tutorial | [
{
"change_type": "MODIFY",
"old_path": "docs/tutorials/notebooks/tutorial-markdown-export-feature.ipynb",
"new_path": "docs/tutorials/notebooks/tutorial-markdown-export-feature.ipynb",
"diff": "\"source\": [\n\"# Get Data: Land usage by country\\n\",\n\"\\n\",\n- \"We will use the landuse data by country: https://en.wikipedia.org/wiki/Land_use_statistics_by_country\"\n+ \"We will use the landuse data by country: https://en.wikipedia.org/wiki/Land_use_statistics_by_country . The following code parses the html data using '**.wikitable.sortable**' selector and returns the resource object. \\n\",\n+ \"\\n\",\n+ \"to_view() property of resource object displays the data in table view.\"\n]\n},\n{\n\"cell_type\": \"markdown\",\n\"metadata\": {},\n\"source\": [\n- \"**Transform to package**\"\n+ \"**Transform to package**\\n\",\n+ \"\\n\",\n+ \"We will use the transform feature of frictionless to further describe our metadata using additional properties: '**type**' and '**description**' . \"\n]\n},\n{\n\")\"\n]\n},\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"Here we package our resource into a container named \\\"farmers\\\" which is a frictionless Package class and gave a description to it. They are metadata for package. \\n\",\n+ \"\\n\",\n+ \"In this example, we have only one resource but Package can be used to combine multiple resources. \"\n+ ]\n+ },\n{\n\"cell_type\": \"code\",\n\"execution_count\": 6,\n\"# Pretty print metadata\"\n]\n},\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"Now you do not have to use pprint to format the metadata!. All the metadata are automatically converted to pretty print format which displays the metadata in readable and pretty way.\"\n+ ]\n+ },\n{\n\"cell_type\": \"code\",\n\"execution_count\": 7,\n\"# Get Markdown\"\n]\n},\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"Metadata now supports conversion to markdown format using the new feature '**to_markdown**.'\"\n+ ]\n+ },\n{\n\"cell_type\": \"code\",\n\"execution_count\": 9,\n\"# Export table to excel\"\n]\n},\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"The schema metadata can now be exported to excel using function '**to_excel_template**'\"\n+ ]\n+ },\n{\n\"cell_type\": \"code\",\n\"execution_count\": 12,\n\"# Excel Stats\"\n]\n},\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"In excel stats we can now see additional information about excel: \\\"hash\\\" and \\\"bytes\\\". Lets open an excel sheet that we created in above step using **to_excel_template**. \\n\",\n+ \"\\n\",\n+ \"And add few data to it.\"\n+ ]\n+ },\n{\n\"cell_type\": \"code\",\n\"execution_count\": 13,\n\"fertilizer_producers_wb.save(filename='fertilizer_producers.xlsx')\"\n]\n},\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"Now we can then use '**.infer**' and '**.stats**' property to view information of the excel file. Stats now includes hash and bytes data of excel sheet as shown in the example\"\n+ ]\n+ },\n{\n\"cell_type\": \"code\",\n\"execution_count\": 14,\n\"name\": \"stdout\",\n\"output_type\": \"stream\",\n\"text\": [\n- \"{'hash': '49ca5e7ec006172f29d379a4aeb3a1dd', 'bytes': 6305, 'fields': 2, 'rows': 2}\\n\"\n+ \"{'hash': '1d3f7cfb0efa4c0bfc7f702739ded4a5', 'bytes': 6307, 'fields': 2, 'rows': 2}\\n\"\n]\n}\n],\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Added more text explanation to the tutorial (#1071)
Co-authored-by: Lilly Winfree <[email protected]> |
234,904 | 24.05.2022 04:08:27 | 10,800 | 5e250029ba89220c9a95418c07fa975706f50c20 | Fix import in Pandas tutorial
the `Package` class was imported, but the `Resource` class was being used. | [
{
"change_type": "MODIFY",
"old_path": "docs/tutorials/formats/pandas-tutorial.md",
"new_path": "docs/tutorials/formats/pandas-tutorial.md",
"diff": "@@ -17,7 +17,7 @@ pip install 'frictionless[pandas]' # for zsh shell\nYou can read a Pandas dataframe:\n```python title=\"Python\"\n-from frictionless import Package\n+from frictionless import Resource\nresource = Resource(df)\npprint(resource.read_rows())\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Fix import in Pandas tutorial (#1092)
the `Package` class was imported, but the `Resource` class was being used. |
234,893 | 30.05.2022 16:16:31 | -7,200 | deed41df1da3e1ed21fe5cbb681bee9e25370818 | dd frictionless logo to be used in open graph image | [
{
"change_type": "MODIFY",
"old_path": "site/docusaurus.config.js",
"new_path": "site/docusaurus.config.js",
"diff": "@@ -50,7 +50,7 @@ module.exports = {\n],\nthemeConfig: {\nsidebarCollapsible: true,\n- image: \"img/docusaurus.png\",\n+ image: \"img/logo-bright2.png\",\nprism: {\ntheme: require(\"prism-react-renderer/themes/github\"),\ndarkTheme: require(\"prism-react-renderer/themes/dracula\"),\n"
},
{
"change_type": "ADD",
"old_path": "site/static/img/logo-bright2.png",
"new_path": "site/static/img/logo-bright2.png",
"diff": "Binary files /dev/null and b/site/static/img/logo-bright2.png differ\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | dd frictionless logo to be used in open graph image (#1096) |
234,904 | 30.05.2022 11:18:44 | 10,800 | 62346c0985fca4ddee41a1f09b748376fa83e2ea | Add a new compression argument to `to_zip`
* Add a new compression argument to to_zip
Fix by implemented the solution suggested there
* Add compression argument to dosctring | [
{
"change_type": "MODIFY",
"old_path": "frictionless/package/package.py",
"new_path": "frictionless/package/package.py",
"diff": "@@ -602,18 +602,21 @@ class Package(Metadata):\n\"\"\"\nreturn Package(descriptor=path, **options)\n- def to_zip(self, path, *, encoder_class=None):\n+ def to_zip(self, path, *, encoder_class=None, compression=zipfile.ZIP_DEFLATED):\n\"\"\"Save package to a zip\nParameters:\npath (str): target path\nencoder_class (object): json encoder class\n+ compression (int): the ZIP compression method to use when\n+ writing the archive. Possible values are the ones supported\n+ by Python's `zipfile` module.\nRaises:\nFrictionlessException: on any error\n\"\"\"\ntry:\n- with zipfile.ZipFile(path, \"w\") as archive:\n+ with zipfile.ZipFile(path, \"w\", compression=compression) as archive:\npackage_descriptor = self.to_dict()\nfor index, resource in enumerate(self.resources):\ndescriptor = package_descriptor[\"resources\"][index]\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Add a new compression argument to `to_zip` (#1104)
* Add a new compression argument to to_zip
Fix #1103 by implemented the solution suggested there
* Add compression argument to dosctring |
234,904 | 09.06.2022 02:32:52 | 10,800 | b25c896cfa2c0ffa16cac84b0e10e0c17af4f989 | Replace references to deprecated functions
fixes | [
{
"change_type": "MODIFY",
"old_path": "docs/guides/describing-data.md",
"new_path": "docs/guides/describing-data.md",
"diff": "@@ -32,7 +32,7 @@ Now that we have a general understanding of what \"describing data\" is, we can di\n- **data validation**: metadata helps to reveal problems in your data during early stages of your workflow\n- **data publication**: metadata provides additional information that your data doesn't include\n-These are not the only positives of having metadata, but they are two of the most important. Please continue reading to learn how Frictionless helps to achieve these advantages by describing your data. This guide will discuss the main `describe` functions (`describe`, `describe_schema`, `describe_resource`, `describe_package`) and will then go into more detail about how to create and edit metadata in Frictionless.\n+These are not the only positives of having metadata, but they are two of the most important. Please continue reading to learn how Frictionless helps to achieve these advantages by describing your data. This guide will discuss the main `describe` functions (`describe`, `Schema.describe`, `Resource.describe`, `Package.describe`) and will then go into more detail about how to create and edit metadata in Frictionless.\nFor the following examples, you will need to have Frictionless installed. See our [Quick Start Guide](https://framework.frictionlessdata.io/docs/guides/quick-start) if you need help.\n@@ -46,9 +46,9 @@ The `describe` functions are the main Frictionless tool for describing data. In\nThe frictionless framework provides 4 different `describe` functions in Python:\n- `describe`: detects the source type and returns Data Resource or Data Package metadata\n-- `describe_schema`: always returns Table Schema metadata\n-- `describe_resource`: always returns Data Resource metadata\n-- `describe_package`: always returns Data Package metadata\n+- `Schema.describe`: always returns Table Schema metadata\n+- `Resource.describe`: always returns Data Resource metadata\n+- `Package.describe`: always returns Data Package metadata\nAs described in more detail in the [Introduction](https://framework.frictionlessdata.io/docs/guides/introduction), a resource is a single file, such as a data file, and a package is a set of files, such as a data file and a schema.\n@@ -115,9 +115,9 @@ id,neighbor_id,name,population\nLet's get a Table Schema using the Frictionless framework (note: this example uses YAML for the schema format, but Frictionless also supports JSON format):\n```python script title=\"Python\"\n-from frictionless import describe_schema\n+from frictionless import Schema\n-schema = describe_schema(\"country-1.csv\")\n+schema = Schema.describe(\"country-1.csv\")\nschema.to_yaml(\"country.schema.yaml\") # use schema.to_json for JSON\n```\n@@ -141,9 +141,9 @@ fields:\nAs we can see, we were able to infer basic metadata from our data file. But describing data doesn't end here - we can provide additional information that we discussed earlier:\n```python script title=\"Python\"\n-from frictionless import describe_schema\n+from frictionless import Schema\n-schema = describe_schema(\"country-1.csv\")\n+schema = Schema.describe(\"country-1.csv\")\nschema.get_field(\"id\").title = \"Identifier\"\nschema.get_field(\"neighbor_id\").title = \"Identifier of the neighbor\"\nschema.get_field(\"name\").title = \"Name of the country\"\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Replace references to deprecated functions (#1129)
fixes #1128 |
234,915 | 27.06.2022 01:11:04 | 25,200 | 7dbdbf16efd7d3572597096600ca7dd1cd5bfeb3 | Fixes to_pandas conversion without types
Replaced field type check using Frictionless Field property instead of dict keys
Added tests | [
{
"change_type": "MODIFY",
"old_path": "frictionless/plugins/pandas/parser.py",
"new_path": "frictionless/plugins/pandas/parser.py",
"diff": "@@ -176,11 +176,11 @@ class PandasParser(Parser):\n# Bug: #1109\nfor field in source.schema.fields:\nif (\n- field[\"type\"] == \"integer\"\n- and field[\"name\"] in dataframe.columns\n- and str(dataframe.dtypes[field[\"name\"]]) != \"int64\"\n+ field.type == \"integer\"\n+ and field.name in dataframe.columns\n+ and str(dataframe.dtypes[field.name]) != \"int64\"\n):\n- dataframe[field[\"name\"]] = dataframe[field[\"name\"]].astype(\"Int64\")\n+ dataframe[field.name] = dataframe[field.name].astype(\"Int64\")\ntarget.data = dataframe\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/plugins/pandas/test_parser.py",
"new_path": "tests/plugins/pandas/test_parser.py",
"diff": "@@ -74,6 +74,42 @@ def test_pandas_nan_in_integer_csv_column():\nassert all(df.dtypes.values == pd.array([pd.Int64Dtype(), float, object]))\n+def test_pandas_nan_with_field_type_information_1143():\n+ descriptor = {\n+ \"dialect\": {\"delimiter\": \",\"},\n+ \"name\": \"issue-1109\",\n+ \"path\": \"data/issue-1109.csv\",\n+ \"schema\": {\n+ \"fields\": [\n+ {\"name\": \"int\", \"type\": \"integer\"},\n+ {\"name\": \"number\", \"type\": \"number\"},\n+ {\"name\": \"string\", \"type\": \"string\"},\n+ ]\n+ },\n+ }\n+ res = Resource(descriptor)\n+ df = res.to_pandas()\n+ assert all(df.dtypes.values == pd.array([pd.Int64Dtype(), float, object]))\n+\n+\n+def test_pandas_nan_without_field_type_information_1143():\n+ descriptor = {\n+ \"dialect\": {\"delimiter\": \",\"},\n+ \"name\": \"issue-1109\",\n+ \"path\": \"data/issue-1109.csv\",\n+ \"schema\": {\n+ \"fields\": [\n+ {\"name\": \"int\"},\n+ {\"name\": \"number\"},\n+ {\"name\": \"string\"},\n+ ]\n+ },\n+ }\n+ res = Resource(descriptor)\n+ df = res.to_pandas()\n+ assert all(df.dtypes.values == pd.array([object, object, object]))\n+\n+\ndef test_pandas_parser_write_types():\nsource = Package(\"data/storage/types.json\").get_resource(\"types\")\ntarget = source.write(format=\"pandas\")\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Fixes to_pandas conversion without types (#1149)
Replaced field type check using Frictionless Field property instead of dict keys
Added tests |
234,915 | 08.07.2022 12:31:02 | -19,620 | 1aff5be21c6a04a85bcbf45630a6de36a4ab1a11 | Fixed parser to create datetim64 for date columns | [
{
"change_type": "MODIFY",
"old_path": "frictionless/plugins/pandas/parser.py",
"new_path": "frictionless/plugins/pandas/parser.py",
"diff": "@@ -174,6 +174,7 @@ class PandasParser(Parser):\n# If the column is of type float instead of integer, convert it to the type\n# Int64 from pandas that supports NaN.\n# Bug: #1109\n+ # Bug: #1138 create datetime64 for date columns\nfor field in source.schema.fields:\nif (\nfield.type == \"integer\"\n@@ -182,6 +183,13 @@ class PandasParser(Parser):\n):\ndataframe[field.name] = dataframe[field.name].astype(\"Int64\")\n+ if (\n+ field.type == \"date\"\n+ and field.name in dataframe.columns\n+ and str(dataframe.dtypes[field.name]) != \"date\"\n+ ):\n+ dataframe[field.name] = pd.to_datetime(dataframe[field.name])\n+\ntarget.data = dataframe\ndef __write_convert_type(self, type=None):\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Fixed parser to create datetim64 for date columns (#1165) |
234,892 | 25.08.2022 10:55:15 | -7,200 | c57e2a54c267504e83d9893a73dbd92b9dc8d9dc | Update 08-22-frictionless-framework-v5.md
Correcting a couple of typos | [
{
"change_type": "MODIFY",
"old_path": "blog/2022/08-22-frictionless-framework-v5.md",
"new_path": "blog/2022/08-22-frictionless-framework-v5.md",
"diff": "@@ -15,11 +15,11 @@ Since the initial Frictionless Framework release we'd been collecting feedback a\n## Improved Metadata\n-This year we started working on Frictionless Application, at the same time, as were thinking about next steps for [Frictionless Standards](https://specs.frictionlessdata.io/). For both we need well-defined and easy-to-understand metadata model as paritally it's already published as standards like Table Schema and partially it's going to be publised as standards like File Dialect and possibly validation/transform metadata.\n+This year we started working on the Frictionless Application, at the same time, we were thinking about next steps for the [Frictionless Standards](https://specs.frictionlessdata.io/). For both we need well-defined and an easy-to-understand metadata model. Partially it's already published as standards like Table Schema and partially it's going to be publised as standards like File Dialect and possibly validation/transform metadata.\n### Dialect\n-In v4 of the framework we had Control/Dialect/Layout concpets to describe resource details related to different formats and schemes, as well as tabular details like header rows. In v5 it's merged into the only one concept called Dialect which is going to be standartised as File Dialect spec. Here is an example:\n+In v4 of the framework we had Control/Dialect/Layout concepts to describe resource details related to different formats and schemes, as well as tabular details like header rows. In v5 it's merged into the only one concept called Dialect which is going to be standartised as a File Dialect spec. Here is an example:\n```yaml tabs=YAML\nheader: true\n@@ -51,7 +51,7 @@ dialect = Dialect.from_descriptor({\"header\": True, \"delimiter\": ';'})\nprint(dialect)\n```\n-For performance and codebase maintainability reasons some marginal Layout features have been removed completely such as `skip/pick/limit/offsetFields/etc`. It's possible to achieve the same results using the Pipeline concept as a part of transformation workflow.\n+For performance and codebase maintainability reasons some marginal Layout features have been removed completely such as `skip/pick/limit/offsetFields/etc`. It's possible to achieve the same results using the Pipeline concept as a part of the transformation workflow.\nRead an article about [Dialect Class](../../docs/framework/dialect.html) for more information.\n@@ -148,7 +148,7 @@ checklist: checklist.yaml\npipeline: pipeline.yaml\n```\n-In this case the validation/transformation will use it by default providing an ability to ship validation rules and transformation pipelines within resources and packages.This is an important development for data publishers who wants to define what they consider to be valid for their datasets as well as sharing a raw data with a cleaning pipeline steps:\n+In this case the validation/transformation will use it by default providing an ability to ship validation rules and transformation pipelines within resources and packages. This is an important development for data publishers who want to define what they consider to be valid for their datasets as well as sharing raw data with a cleaning pipeline steps:\n```bash tabs=CLI\nfrictionless validate resource.yaml # will use the checklist above\n@@ -219,7 +219,7 @@ Read an article about [Inquiry Class](../../docs/framework/inquiry.html) for mor\n### Report\n-The Report concpet has been significantly simplified by removing the `resource` property from `reportTask`. It's been replaced by `name/type/place` properties. Also `report.time` is now `report.stats.seconds`. The `report/reportTask.warnings: List[str]` have been added to provide non-error information like reached limits:\n+The Report concept has been significantly simplified by removing the `resource` property from `reportTask`. It's been replaced by `name/type/place` properties. Also `report.time` is now `report.stats.seconds`. The `report/reportTask.warnings: List[str]` have been added to provide non-error information like reached limits:\n```bash script tabs=CLI output=yaml\nfrictionless validate table.csv --yaml\n@@ -253,7 +253,7 @@ The new v5 version still supports old notation in descriptors for backward-compa\n## Improved Model\n-It's been many years when Frictionless were mixing declarative metadata and object model for historical reasons. Since the first implementation of `datapackage` library we used different approaches to sync internal state provide both interfaces descriptor and object model. In Frictionless Framework v4 this technique had been taken to a really sofisticated level with special observables dictionary classes. It was quite smart and nice-to-use for quick prototyping in REPL but it was really hard to maintain and error-prone.\n+It's been many years that Frictionless were mixing declarative metadata and object model for historical reasons. Since the first implementation of `datapackage` library we used different approaches to sync internal state to provide both interfaces descriptor and object model. In Frictionless Framework v4 this technique had been taken to a really sofisticated level with special observables dictionary classes. It was quite smart and nice-to-use for quick prototyping in REPL but it was really hard to maintain and error-prone.\nIn Framework v5 we finally decided to follow the \"right way\" for handling this problem and split descriptors and object model completely.\n@@ -271,7 +271,7 @@ fields:\n### Object Model\n-The difference comes here we we create a metadata instance based on this descriptor. In v4 all the metadata classes were a sublasses of the dict class providing a mix between a descriptor and object model for state management. In v5 there is a clear boundary between descriptor and object model. All the state are managed as it should be in a normal Python class using class attributes:\n+The difference comes here we we create a metadata instance based on this descriptor. In v4 all the metadata classes were a subclasses of the dict class providing a mix between a descriptor and object model for state management. In v5 there is a clear boundary between descriptor and object model. All the state are managed as it should be in a normal Python class using class attributes:\n```python tabs=Python\nfrom frictionless import Schema\n@@ -289,7 +289,7 @@ There are a few important traits of the new model:\n- it's not possible to mix dicts and classes in methods like `package.add_resource`\n- it's not possible to export an invalid descriptor\n-This separation might make one to add a few additional lines of code but it gives us much less fragile programs in the end. It's especially important for software integrators who want to be sure that they write working code. At the same time, for quick prototyping and discovery Frictionless still provides high-level actions like `validate` function that are more forgiving regarding user input.\n+This separation might make one to add a few additional lines of code, but it gives us much less fragile programs in the end. It's especially important for software integrators who want to be sure that they write working code. At the same time, for quick prototyping and discovery Frictionless still provides high-level actions like `validate` function that are more forgiving regarding user input.\n### Static Typing\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Update 08-22-frictionless-framework-v5.md (#1236)
Correcting a couple of typos |
234,915 | 29.08.2022 11:41:24 | -19,620 | 426c9e712fce91b84ecd0586f3eb4b8b5b83f8f1 | Added tests to test multiple package errors | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "data/multiple-errors.package.json",
"diff": "+{\n+ \"name\": \"test-tabulator\",\n+ \"created\" : \"test\",\n+ \"resources\": [\n+ { \"name\": \"first-resource\",\n+ \"path\": \"table.xls\",\n+ \"schema\": {\n+ \"fields\": [\n+ {\n+ \"name\": \"id\",\n+ \"type\": \"number\"\n+ },\n+ {\n+ \"name\": \"name\",\n+ \"type\": \"string\"\n+ }\n+ ]\n+ }\n+ },\n+ {\"name\": \"number-two\", \"path\": \"table-reverse.csv\",\n+ \"schema\": {\n+ \"fields\": [\n+ {\n+ \"name\": \"id\",\n+ \"type\": \"integer\"\n+ },\n+ {\n+ \"name\": \"name\",\n+ \"type\": \"string\"\n+ }\n+ ]\n+ }\n+ },\n+ {\"name\": \"number-two\", \"path\": \"table-reversee.csv\",\n+ \"schema\": {\n+ \"fields\": [\n+ {\n+ \"name\": \"id\",\n+ \"type\": \"integer\"\n+ },\n+ {\n+ \"name\": \"name\",\n+ \"type\": \"string\"\n+ }\n+ ]\n+ }\n+ }\n+ ]\n+ }\n+\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/actions/validate/test_package.py",
"new_path": "tests/actions/validate/test_package.py",
"diff": "@@ -151,6 +151,20 @@ def test_validate_package_with_schema_as_string():\nassert report.valid\n+def test_validate_package_multiple_package_errors():\n+ report = validate(\"data/multiple-errors.package.json\")\n+ assert report.flatten([\"type\", \"message\"]) == [\n+ [\n+ \"package-error\",\n+ \"The data package has an error: names of the resources are not unique\",\n+ ],\n+ [\n+ \"package-error\",\n+ 'The data package has an error: property \"created\" is not valid \"datetime\"',\n+ ],\n+ ]\n+\n+\n# Schema\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Added tests to test multiple package errors (#1241) |
234,922 | 05.09.2022 02:28:58 | 14,400 | b2ea24118965ee6bbcd7a745ce43e37d44bd92a6 | Replace old discord links for slack links
* Update README.md
Removes deprecated discord link for slack.
* Update livemark.yaml
Removes deprecated discord link for slack | [
{
"change_type": "MODIFY",
"old_path": "README.md",
"new_path": "README.md",
"diff": "[](https://pypi.python.org/pypi/frictionless)\n[](https://zenodo.org/badge/latestdoi/28409905)\n[](https://github.com/frictionlessdata/frictionless-py)\n-[](https://discordapp.com/invite/Sewv6av)\n+[](https://join.slack.com/t/frictionlessdata/shared_invite/zt-17kpbffnm-tRfDW_wJgOw8tJVLvZTrBg)\nData management framework for Python that provides functionality to describe, extract, validate, and transform tabular data (DEVT Framework). It supports a great deal of data schemes and formats, as well as provides popular platforms integrations. The framework is powered by the lightweight yet comprehensive [Frictionless Standards](https://specs.frictionlessdata.io/).\n"
},
{
"change_type": "MODIFY",
"old_path": "livemark.yaml",
"new_path": "livemark.yaml",
"diff": "@@ -28,7 +28,7 @@ links:\n- name: Frictionless\npath: https://frictionlessdata.io\n- name: Support\n- path: https://discord.com/channels/695635777199145130/695635777199145133\n+ path: https://join.slack.com/t/frictionlessdata/shared_invite/zt-17kpbffnm-tRfDW_wJgOw8tJVLvZTrBg\npages:\nitems:\n- path: index\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Replace old discord links for slack links (#1240)
* Update README.md
Removes deprecated discord link for slack.
* Update livemark.yaml
Removes deprecated discord link for slack |
234,895 | 20.10.2022 16:39:17 | -7,200 | b1d1d1207e61989e03bf4086c7fd698261b3c013 | Fix field_update with formula argument | [
{
"change_type": "MODIFY",
"old_path": "frictionless/steps/field/field_update.py",
"new_path": "frictionless/steps/field/field_update.py",
"diff": "@@ -36,14 +36,16 @@ class field_update(Step):\ndef transform_resource(self, resource):\nfunction = self.function\n+ pass_row = False\ntable = resource.to_petl()\ndescriptor = deepcopy(self.descriptor) or {}\nnew_name = descriptor.get(\"name\")\nresource.schema.update_field(self.name, descriptor)\nif self.formula:\nfunction = lambda _, row: simpleeval.simple_eval(self.formula, names=row)\n+ pass_row = True\nif function:\n- resource.data = table.convert(self.name, function) # type: ignore\n+ resource.data = table.convert(self.name, function, pass_row=pass_row) # type: ignore\nelif self.value:\nresource.data = table.update(self.name, self.value) # type: ignore\nelif new_name:\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/steps/field/test_field_update.py",
"new_path": "tests/steps/field/test_field_update.py",
"diff": "@@ -9,6 +9,7 @@ def test_step_field_update():\npipeline = Pipeline(\nsteps=[\nsteps.field_update(name=\"id\", function=str, descriptor={\"type\": \"string\"}),\n+ steps.field_update(name=\"population\", formula=\"int(population)*2\"),\n],\n)\ntarget = source.transform(pipeline)\n@@ -20,9 +21,9 @@ def test_step_field_update():\n]\n}\nassert target.read_rows() == [\n- {\"id\": \"1\", \"name\": \"germany\", \"population\": 83},\n- {\"id\": \"2\", \"name\": \"france\", \"population\": 66},\n- {\"id\": \"3\", \"name\": \"spain\", \"population\": 47},\n+ {\"id\": \"1\", \"name\": \"germany\", \"population\": 83 * 2},\n+ {\"id\": \"2\", \"name\": \"france\", \"population\": 66 * 2},\n+ {\"id\": \"3\", \"name\": \"spain\", \"population\": 47 * 2},\n]\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Fix field_update with formula argument (#1247)
Co-authored-by: Shashi Gharti <[email protected]> |
234,915 | 21.10.2022 18:39:00 | -19,620 | a694f90b081bb7d473fb49a0d11fe45ae08f43d1 | Fixes reading parquet from remote resource
* Fixes reading parquet from remote resource
It converts the remote path to _io.BytesIO object
Reference taken from
* creates handle only for remote files
* Restored resource file | [
{
"change_type": "MODIFY",
"old_path": "frictionless/formats/parquet/parser.py",
"new_path": "frictionless/formats/parquet/parser.py",
"diff": "from __future__ import annotations\n+\n+import os\nfrom ...platform import platform\nfrom ...resource import Resource\nfrom .control import ParquetControl\n@@ -25,7 +27,13 @@ class ParquetParser(Parser):\ndef read_cell_stream_create(self):\ncontrol = ParquetControl.from_dialect(self.resource.dialect)\n- file = platform.fastparquet.ParquetFile(self.resource.normpath)\n+ handle = self.resource.normpath\n+ if not os.path.isfile(self.resource.normpath):\n+ handles = platform.pandas.io.common.get_handle(\n+ self.resource.normpath, \"rb\", is_text=False\n+ )\n+ handle = handles.handle\n+ file = platform.fastparquet.ParquetFile(handle)\nfor group, df in enumerate(file.iter_row_groups(**control.to_python()), start=1):\nwith Resource(data=df, format=\"pandas\") as resource:\nfor line, cells in enumerate(resource.cell_stream, start=1):\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Fixes reading parquet from remote resource (#1260)
* Fixes reading parquet from remote resource
It converts the remote path to _io.BytesIO object
Reference taken from https://github.com/pandas-dev/pandas/blob/main/pandas/io/parquet.py#L322
* creates handle only for remote files
* Restored resource file |
234,915 | 27.10.2022 12:37:36 | -19,620 | 693ef882e956a9c2d6d837496106fb97073150e3 | Fixed bug in table-write step while using foreign key | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "data/package-with-pipeline-multiple-tables-to-write.json",
"diff": "+{\n+ \"name\": \"petites_villes_de_demain\",\n+ \"resources\": [\n+ {\n+ \"name\": \"commune\",\n+ \"type\": \"table\",\n+ \"path\": \"sqlite:///database.db\",\n+ \"format\": \"sql\",\n+ \"mediatype\": \"application/sql\",\n+ \"dialect\": {\n+ \"sql\": {\n+ \"table\": \"commune\"\n+ }\n+ },\n+ \"schema\": {\n+ \"fields\": [\n+ {\n+ \"name\": \"insee_com\",\n+ \"type\": \"string\",\n+ \"constraints\": {\n+ \"required\": true\n+ }\n+ }\n+ ],\n+ \"primaryKey\": [\n+ \"insee_com\"\n+ ]\n+ }\n+ },\n+ {\n+ \"name\": \"diffusion-zonages-zrr-cog2021\",\n+ \"type\": \"table\",\n+ \"path\": \"https://github.com/fdtester/temp-files/blob/main/diffusion-zonages-zrr-cog2021.xls?raw=true\",\n+ \"scheme\": \"https\",\n+ \"format\": \"xls\",\n+ \"encoding\": \"utf-8\",\n+ \"mediatype\": \"application/vnd.ms-excel\",\n+ \"dialect\": {\n+ \"headerRows\": [\n+ 6\n+ ]\n+ },\n+ \"schema\": {\n+ \"fields\": [\n+ {\n+ \"name\": \"CODGEO\",\n+ \"type\": \"string\"\n+ },\n+ {\n+ \"name\": \"LIBGEO\",\n+ \"type\": \"string\"\n+ },\n+ {\n+ \"name\": \"ZRR_SIMP\",\n+ \"type\": \"string\"\n+ },\n+ {\n+ \"name\": \"ZONAGE_ZRR\",\n+ \"type\": \"string\"\n+ }\n+ ],\n+ \"foreignKeys\": [\n+ {\n+ \"fields\": [\n+ \"CODGEO\"\n+ ],\n+ \"reference\": {\n+ \"resource\": \"commune\",\n+ \"fields\": [\n+ \"insee_com\"\n+ ]\n+ }\n+ }\n+ ]\n+ },\n+ \"pipeline\": {\n+ \"steps\": [\n+ {\n+ \"type\": \"table-write\",\n+ \"path\": \"sqlite:///database.db\",\n+ \"dialect\": {\n+ \"sql\": {\n+ \"table\": \"zrr\"\n+ }\n+ }\n+ }\n+ ]\n+ }\n+ },\n+ {\n+ \"name\": \"diffusion-zonages-zrr-cog2021-1\",\n+ \"type\": \"table\",\n+ \"path\": \"https://github.com/fdtester/temp-files/blob/main/diffusion-zonages-zrr-cog2021.xls?raw=true\",\n+ \"scheme\": \"https\",\n+ \"format\": \"xls\",\n+ \"encoding\": \"utf-8\",\n+ \"mediatype\": \"application/vnd.ms-excel\",\n+ \"dialect\": {\n+ \"headerRows\": [\n+ 6\n+ ]\n+ },\n+ \"schema\": {\n+ \"fields\": [\n+ {\n+ \"name\": \"CODGEO\",\n+ \"type\": \"string\"\n+ },\n+ {\n+ \"name\": \"LIBGEO\",\n+ \"type\": \"string\"\n+ },\n+ {\n+ \"name\": \"ZRR_SIMP\",\n+ \"type\": \"string\"\n+ },\n+ {\n+ \"name\": \"ZONAGE_ZRR\",\n+ \"type\": \"string\"\n+ }\n+ ],\n+ \"foreignKeys\": [\n+ {\n+ \"fields\": [\n+ \"CODGEO\"\n+ ],\n+ \"reference\": {\n+ \"resource\": \"commune\",\n+ \"fields\": [\n+ \"insee_com\"\n+ ]\n+ }\n+ }\n+ ]\n+ },\n+ \"pipeline\": {\n+ \"steps\": [\n+ {\n+ \"type\": \"table-write\",\n+ \"path\": \"sqlite:///database.db\",\n+ \"dialect\": {\n+ \"sql\": {\n+ \"table\": \"zrr1\"\n+ }\n+ }\n+ }\n+ ]\n+ }\n+ }\n+ ]\n+}\n\\ No newline at end of file\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "data/package-with-pipeline.json",
"diff": "+{\n+ \"name\": \"petites_villes_de_demain\",\n+ \"resources\": [\n+ {\n+ \"name\": \"commune\",\n+ \"type\": \"table\",\n+ \"path\": \"sqlite:///database.db\",\n+ \"format\": \"sql\",\n+ \"mediatype\": \"application/sql\",\n+ \"dialect\": {\n+ \"sql\": {\n+ \"table\": \"commune\"\n+ }\n+ },\n+ \"schema\": {\n+ \"fields\": [\n+ {\n+ \"name\": \"insee_com\",\n+ \"type\": \"string\",\n+ \"constraints\": {\n+ \"required\": true\n+ }\n+ }\n+ ],\n+ \"primaryKey\": [\n+ \"insee_com\"\n+ ]\n+ }\n+ },\n+ {\n+ \"name\": \"diffusion-zonages-zrr-cog2021\",\n+ \"type\": \"table\",\n+ \"path\": \"https://github.com/fdtester/temp-files/blob/main/diffusion-zonages-zrr-cog2021.xls?raw=true\",\n+ \"scheme\": \"https\",\n+ \"format\": \"xls\",\n+ \"encoding\": \"utf-8\",\n+ \"mediatype\": \"application/vnd.ms-excel\",\n+ \"dialect\": {\n+ \"headerRows\": [\n+ 6\n+ ]\n+ },\n+ \"schema\": {\n+ \"fields\": [\n+ {\n+ \"name\": \"CODGEO\",\n+ \"type\": \"string\"\n+ },\n+ {\n+ \"name\": \"LIBGEO\",\n+ \"type\": \"string\"\n+ },\n+ {\n+ \"name\": \"ZRR_SIMP\",\n+ \"type\": \"string\"\n+ },\n+ {\n+ \"name\": \"ZONAGE_ZRR\",\n+ \"type\": \"string\"\n+ }\n+ ],\n+ \"foreignKeys\": [\n+ {\n+ \"fields\": [\n+ \"CODGEO\"\n+ ],\n+ \"reference\": {\n+ \"resource\": \"commune\",\n+ \"fields\": [\n+ \"insee_com\"\n+ ]\n+ }\n+ }\n+ ]\n+ },\n+ \"pipeline\": {\n+ \"steps\": [\n+ {\n+ \"type\": \"table-write\",\n+ \"path\": \"sqlite:///database.db\",\n+ \"dialect\": {\n+ \"sql\": {\n+ \"table\": \"zrr\"\n+ }\n+ }\n+ }\n+ ]\n+ }\n+ }\n+ ]\n+}\n\\ No newline at end of file\n"
},
{
"change_type": "MODIFY",
"old_path": "frictionless/formats/sql/storage.py",
"new_path": "frictionless/formats/sql/storage.py",
"diff": "@@ -178,16 +178,22 @@ class SqlStorage(Storage):\n# Write\ndef write_resource(self, resource, *, force=False):\n- package = Package(resources=[resource])\n- self.write_package(package, force=force)\n+ package = resource.package or Package(resources=[resource])\n+ ignore_resources = []\n+ for resource in package.resources:\n+ if resource.format == \"sql\" and not resource.data:\n+ ignore_resources.append(resource.name)\n+ self.write_package(package, ignore_resources, force=force)\n- def write_package(self, package, force=False):\n+ def write_package(self, package, ignore_resources=None, force=False):\nexistent_names = list(self)\n# Check existent\ndelete_names = []\nfor resource in package.resources:\nif resource.name in existent_names:\n+ if ignore_resources and resource.name in ignore_resources:\n+ continue\nif not force:\nnote = f'Resource \"{resource.name}\" already exists'\nraise FrictionlessException(note)\n@@ -200,6 +206,8 @@ class SqlStorage(Storage):\nsql_tables = []\nself.delete_package(delete_names)\nfor resource in package.resources:\n+ if ignore_resources and resource.name in ignore_resources:\n+ continue\nif not resource.has_schema:\nresource.infer()\nsql_table = self.__write_convert_schema(resource)\n@@ -209,6 +217,8 @@ class SqlStorage(Storage):\n# Write data\nexistent_names = list(self)\nfor name in existent_names:\n+ if ignore_resources and name in ignore_resources:\n+ continue\nif package.has_resource(name):\nself.__write_convert_data(package.get_resource(name))\n"
},
{
"change_type": "MODIFY",
"old_path": "frictionless/steps/table/table_write.py",
"new_path": "frictionless/steps/table/table_write.py",
"diff": "@@ -2,6 +2,7 @@ from __future__ import annotations\nimport attrs\nfrom ...pipeline import Step\nfrom ...resource import Resource\n+from ...dialect import Dialect\[email protected](kw_only=True)\n@@ -20,7 +21,10 @@ class table_write(Step):\n# Transform\ndef transform_resource(self, resource):\n- target = Resource(path=self.path, type=\"table\")\n+ dialect = None\n+ if \"dialect\" in self.custom:\n+ dialect = Dialect.from_descriptor(self.custom[\"dialect\"])\n+ target = Resource(path=self.path, type=\"table\", dialect=dialect)\nresource.write(target)\n# Metadata\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/resource/transform/test_general.py",
"new_path": "tests/resource/transform/test_general.py",
"diff": "from frictionless import Resource, Pipeline, steps\n+from frictionless import Package\n# General\n@@ -48,3 +49,87 @@ def test_resource_transform_cell_set():\n{\"id\": 2, \"name\": \"france\", \"population\": 100},\n{\"id\": 3, \"name\": \"spain\", \"population\": 100},\n]\n+\n+\n+def test_resource_transform_table_creation_with_foreign_key(sqlite_url):\n+ # write table\n+ resource = Resource(\n+ {\n+ \"name\": \"commune\",\n+ \"schema\": {\n+ \"fields\": [{\"name\": \"insee_com\", \"type\": \"string\"}],\n+ \"primaryKey\": [\"insee_com\"],\n+ },\n+ \"data\": [[\"insee_com\"], [\"01001\"], [\"01002\"]],\n+ }\n+ )\n+ source = Package(resources=[resource])\n+ source.to_sql(sqlite_url)\n+\n+ # write table using pipeline step\n+ package = Package(\"data/package-with-pipeline.json\")\n+ package.resources[0].path = sqlite_url\n+ package.resources[1].pipeline.steps[0].path = sqlite_url # type: ignore\n+ for resource in package.resources:\n+ if resource.pipeline:\n+ resource.transform()\n+\n+ # read tables\n+ target = Package.from_sql(sqlite_url)\n+ assert target.get_resource(\"zrr\").schema.to_descriptor() == {\n+ \"fields\": [\n+ {\"name\": \"CODGEO\", \"type\": \"string\"},\n+ {\"name\": \"LIBGEO\", \"type\": \"string\"},\n+ {\"name\": \"ZRR_SIMP\", \"type\": \"string\"},\n+ {\"name\": \"ZONAGE_ZRR\", \"type\": \"string\"},\n+ ],\n+ \"foreignKeys\": [\n+ {\n+ \"fields\": [\"CODGEO\"],\n+ \"reference\": {\"resource\": \"commune\", \"fields\": [\"insee_com\"]},\n+ }\n+ ],\n+ }\n+\n+\n+def test_resource_transform_multiple_table_creation_with_foreign_key(sqlite_url):\n+ # write table\n+ resource = Resource(\n+ {\n+ \"name\": \"commune\",\n+ \"schema\": {\n+ \"fields\": [{\"name\": \"insee_com\", \"type\": \"string\"}],\n+ \"primaryKey\": [\"insee_com\"],\n+ },\n+ \"data\": [[\"insee_com\"], [\"01001\"], [\"01002\"]],\n+ }\n+ )\n+ source = Package(resources=[resource])\n+ source.to_sql(sqlite_url)\n+\n+ # write table using pipeline step\n+ package = Package(\"data/package-with-pipeline-multiple-tables-to-write.json\")\n+ package.resources[0].path = sqlite_url\n+ package.resources[1].pipeline.steps[0].path = sqlite_url # type: ignore\n+ package.resources[2].pipeline.steps[0].path = sqlite_url # type: ignore\n+\n+ for resource in package.resources:\n+ if resource.pipeline:\n+ resource.transform()\n+\n+ # read tables\n+ target = Package.from_sql(sqlite_url)\n+ assert target.get_resource(\"zrr\").schema.to_descriptor() == {\n+ \"fields\": [\n+ {\"name\": \"CODGEO\", \"type\": \"string\"},\n+ {\"name\": \"LIBGEO\", \"type\": \"string\"},\n+ {\"name\": \"ZRR_SIMP\", \"type\": \"string\"},\n+ {\"name\": \"ZONAGE_ZRR\", \"type\": \"string\"},\n+ ],\n+ \"foreignKeys\": [\n+ {\n+ \"fields\": [\"CODGEO\"],\n+ \"reference\": {\"resource\": \"commune\", \"fields\": [\"insee_com\"]},\n+ }\n+ ],\n+ }\n"
}
]
| Python | MIT License | frictionlessdata/frictionless-py | Fixed bug in table-write step while using foreign key (#1265) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.