repo
stringclasses 856
values | pull_number
int64 3
127k
| instance_id
stringlengths 12
58
| issue_numbers
sequencelengths 1
5
| base_commit
stringlengths 40
40
| patch
stringlengths 67
1.54M
| test_patch
stringlengths 0
107M
| problem_statement
stringlengths 3
307k
| hints_text
stringlengths 0
908k
| created_at
timestamp[s] |
---|---|---|---|---|---|---|---|---|---|
Netflix/lemur | 245 | Netflix__lemur-245 | [
"243"
] | 35f9f59c573bec8beeacd28b092de3306b6d918b | diff --git a/lemur/auth/views.py b/lemur/auth/views.py
--- a/lemur/auth/views.py
+++ b/lemur/auth/views.py
@@ -9,7 +9,7 @@
import base64
import requests
-from flask import g, Blueprint, current_app
+from flask import Blueprint, current_app
from flask.ext.restful import reqparse, Resource, Api
from flask.ext.principal import Identity, identity_changed
@@ -100,9 +100,6 @@ def post(self):
return dict(message='The supplied credentials are invalid'), 401
- def get(self):
- return {'username': g.current_user.username, 'roles': [r.name for r in g.current_user.roles]}
-
class Ping(Resource):
"""
| Internal Server Error hitting auth/login API
Hitting the `/auth/login` API with a GET request returns an HTTP 500 error. The resource needs to be authenticated.
| After looking at this I believe that route can just be removed. It is not called, /me is used instead.
See: https://github.com/Netflix/lemur/blob/master/lemur/users/views.py#L356
| 2016-02-16T22:59:36 |
|
Netflix/lemur | 264 | Netflix__lemur-264 | [
"246"
] | dfaf45344ccc07753e0633d84bf8303373587cdf | diff --git a/lemur/certificates/service.py b/lemur/certificates/service.py
--- a/lemur/certificates/service.py
+++ b/lemur/certificates/service.py
@@ -144,7 +144,7 @@ def mint(issuer_options):
if not issuer_options.get('csr'):
csr, private_key = create_csr(issuer_options)
else:
- csr = issuer_options.get('csr')
+ csr = str(issuer_options.get('csr'))
private_key = None
issuer_options['creator'] = g.user.email
diff --git a/lemur/certificates/views.py b/lemur/certificates/views.py
--- a/lemur/certificates/views.py
+++ b/lemur/certificates/views.py
@@ -232,7 +232,7 @@ def post(self):
"owner": "[email protected]",
"description": "test",
"selectedAuthority": "timetest2",
- "csr",
+ "csr": "----BEGIN CERTIFICATE REQUEST-----...",
"authority": {
"body": "-----BEGIN...",
"name": "timetest2",
| Trouble with submitting a CSR to the verisign plugin
There is an issue were accepting a CSR from the UI as opposed to having Lemur generate the CSR is causing Unicode errors:
```
2016-02-19 21:19:04,405 ERROR: 45 [in /apps/lemur/lemur/common/utils.py:60]
Traceback (most recent call last):
File "/apps/lemur/lemur/common/utils.py", line 46, in wrapper
resp = f(*args, **kwargs)
File "/apps/lemur/lemur/certificates/views.py", line 387, in post
return service.create(**args)
File "/apps/lemur/lemur/certificates/service.py", line 247, in create
cert, private_key, cert_chain = mint(kwargs)
File "/apps/lemur/lemur/certificates/service.py", line 151, in mint
cert_body, cert_chain = issuer.create_certificate(csr, issuer_options)
File "/apps/lemur/lemur/plugins/lemur_verisign/plugin.py", line 156, in create_certificate
response = self.session.post(url, data=data)
File "/apps/python/local/lib/python2.7/site-packages/requests/sessions.py", line 511, in post
return self.request('POST', url, data=data, json=json, **kwargs)
File "/apps/python/local/lib/python2.7/site-packages/requests/sessions.py", line 454, in request
prep = self.prepare_request(req)
File "/apps/python/local/lib/python2.7/site-packages/requests/sessions.py", line 388, in prepare_request
hooks=merge_hooks(request.hooks, self.hooks),
File "/apps/python/local/lib/python2.7/site-packages/requests/models.py", line 296, in prepare
self.prepare_body(data, files, json)
File "/apps/python/local/lib/python2.7/site-packages/requests/models.py", line 450, in prepare_body
body = self._encode_params(data)
File "/apps/python/local/lib/python2.7/site-packages/requests/models.py", line 97, in _encode_params
return urlencode(result, doseq=True)
File "/usr/lib/python2.7/urllib.py", line 1338, in urlencode
v = quote_plus(v)
File "/usr/lib/python2.7/urllib.py", line 1293, in quote_plus
s = quote(s, safe + ' ')
File "/usr/lib/python2.7/urllib.py", line 1288, in quote
return ''.join(map(quoter, s))
KeyError: 45
```
Most likely the CSR contains bytes that are not able to be decoded by urllib.
| 2016-04-01T16:19:54 |
||
Netflix/lemur | 265 | Netflix__lemur-265 | [
"257"
] | d0ec925ca36901548cca75ab08f376646573c106 | diff --git a/lemur/notifications/views.py b/lemur/notifications/views.py
--- a/lemur/notifications/views.py
+++ b/lemur/notifications/views.py
@@ -376,7 +376,7 @@ def put(self, notification_id):
:statuscode 200: no error
"""
self.reqparse.add_argument('label', type=str, location='json', required=True)
- self.reqparse.add_argument('plugin', type=dict, location='json', required=True)
+ self.reqparse.add_argument('notificationOptions', type=list, location='json')
self.reqparse.add_argument('active', type=bool, location='json')
self.reqparse.add_argument('certificates', type=list, default=[], location='json')
self.reqparse.add_argument('description', type=str, location='json')
@@ -385,7 +385,7 @@ def put(self, notification_id):
return service.update(
notification_id,
args['label'],
- args['plugin']['pluginOptions'],
+ args['notificationOptions'],
args['description'],
args['active'],
args['certificates']
| Disabling and then re-enabling notifications causes notifications to go unsent
There is a bug in the way that a notification is 're-enabled' such that the interval value (number of days) becomes unset when the notification is marked as active again.
| 2016-04-01T16:51:22 |
||
Netflix/lemur | 267 | Netflix__lemur-267 | [
"261"
] | d0ec925ca36901548cca75ab08f376646573c106 | diff --git a/lemur/authorities/service.py b/lemur/authorities/service.py
--- a/lemur/authorities/service.py
+++ b/lemur/authorities/service.py
@@ -103,6 +103,10 @@ def create(kwargs):
# the owning dl or role should have this authority associated with it
owner_role = role_service.get_by_name(kwargs['ownerEmail'])
+
+ if not owner_role:
+ owner_role = role_service.create(kwargs['ownerEmail'])
+
owner_role.authority = authority
g.current_user.authorities.append(authority)
| Create new roles for unknown owners
Currently when you create an authority with an unknown owner we get an error because we assumed that the owner is creating the authority.
This is not always the case as sometimes teams will create authorities on the behalf of other teams. We should just go ahead an create an owner_role if one does not exist.
```
2016-03-31 16:21:39,507 ERROR: 'NoneType' object has no attribute 'authority' [in /apps/lemur/lemur/common/utils.py:60]
Traceback (most recent call last):
File "/apps/lemur/lemur/common/utils.py", line 46, in wrapper
resp = f(*args, **kwargs)
File "/apps/lemur/lemur/authorities/views.py", line 201, in post
return service.create(args)
File "/apps/lemur/lemur/authorities/service.py", line 106, in create
owner_role.authority = authority
AttributeError: 'NoneType' object has no attribute 'authority'
```
| 2016-04-01T16:59:06 |
||
Netflix/lemur | 271 | Netflix__lemur-271 | [
"195"
] | 675d10c8a660c61c0049bf4e94a3860a3081ff09 | diff --git a/lemur/certificates/views.py b/lemur/certificates/views.py
--- a/lemur/certificates/views.py
+++ b/lemur/certificates/views.py
@@ -357,6 +357,7 @@ def post(self):
self.reqparse.add_argument('replacements', type=list, default=[], location='json')
self.reqparse.add_argument('validityStart', type=str, location='json') # TODO validate
self.reqparse.add_argument('validityEnd', type=str, location='json') # TODO validate
+ self.reqparse.add_argument('validityYears', type=int, location='json') # TODO validate
self.reqparse.add_argument('authority', type=valid_authority, location='json', required=True)
self.reqparse.add_argument('description', type=str, location='json')
self.reqparse.add_argument('country', type=str, location='json', required=True)
diff --git a/lemur/plugins/lemur_verisign/plugin.py b/lemur/plugins/lemur_verisign/plugin.py
--- a/lemur/plugins/lemur_verisign/plugin.py
+++ b/lemur/plugins/lemur_verisign/plugin.py
@@ -82,6 +82,12 @@ def process_options(options):
data['specificEndDate'] = str(end_date)
data['validityPeriod'] = period
+ elif options.get('validityYears'):
+ if options['validityYears'] in [1, 2]:
+ data['validityPeriod'] = str(options['validityYears']) + 'Y'
+ else:
+ raise Exception("Verisign issued certificates cannot exceed two years in validity")
+
return data
| Support validity period selection in years
Some CAs (DigiCert is one) only allow you to specify validity periods in integers (1, 2 or 3 years). There are no specific date selections allowed. Would be nice if there was a way in the GUI to quickly select one of these options. Implementation flexible.
| Verisign/Symantec does something similar but they do allow for the ability to specify the end date. Does digicert disallow a given end date at all?
https://github.com/Netflix/lemur/blob/master/lemur/plugins/lemur_verisign/plugin.py#L100
So after reviewing the API they do support a specific end-date if you want to end it earlier that the standard 1,2,3 years. Suppose this is is more of a usability request. Right now we're having to select dates that fall into the 1/2 year bounds. Would be nice to be able to select an integer 1 or 2 or 3.
Got it. UI wise I think we could do a dropdown with 1, 2, 3 years and then an advanced box for users who want to specify an end date.
That would be perfect!
On Thu, Dec 31, 2015 at 2:54 PM kevgliss [email protected] wrote:
> Got it. UI wise I think we could do a dropdown with 1, 2, 3 years and then
> an advanced box for users who want to specify an end date.
>
> —
> Reply to this email directly or view it on GitHub
> https://github.com/Netflix/lemur/issues/195#issuecomment-168258235.
| 2016-04-01T21:28:16 |
|
Netflix/lemur | 290 | Netflix__lemur-290 | [
"289"
] | b463fcf61bd815eac3611790621cf1652ff70e0a | diff --git a/lemur/plugins/lemur_java/plugin.py b/lemur/plugins/lemur_java/plugin.py
--- a/lemur/plugins/lemur_java/plugin.py
+++ b/lemur/plugins/lemur_java/plugin.py
@@ -10,10 +10,11 @@
from flask import current_app
+from cryptography.fernet import Fernet
+
from lemur.utils import mktempfile, mktemppath
from lemur.plugins.bases import ExportPlugin
from lemur.plugins import lemur_java as java
-from lemur.common.utils import get_psuedo_random_string
def run_process(command):
@@ -29,6 +30,7 @@ def run_process(command):
if p.returncode != 0:
current_app.logger.debug(" ".join(command))
current_app.logger.error(stderr)
+ current_app.logger.error(stdout)
raise Exception(stderr)
@@ -85,39 +87,36 @@ def create_truststore(cert, chain, jks_tmp, alias, passphrase):
])
-def create_keystore(cert, jks_tmp, key, alias, passphrase):
- with mktempfile() as key_tmp:
- with open(key_tmp, 'w') as f:
- f.write(key)
-
- # Create PKCS12 keystore from private key and public certificate
- with mktempfile() as cert_tmp:
- with open(cert_tmp, 'w') as f:
- f.write(cert)
-
- with mktempfile() as p12_tmp:
- run_process([
- "openssl",
- "pkcs12",
- "-export",
- "-name", alias,
- "-in", cert_tmp,
- "-inkey", key_tmp,
- "-out", p12_tmp,
- "-password", "pass:{}".format(passphrase)
- ])
-
- # Convert PKCS12 keystore into a JKS keystore
- run_process([
- "keytool",
- "-importkeystore",
- "-destkeystore", jks_tmp,
- "-srckeystore", p12_tmp,
- "-srcstoretype", "PKCS12",
- "-alias", alias,
- "-srcstorepass", passphrase,
- "-deststorepass", passphrase
- ])
+def create_keystore(cert, chain, jks_tmp, key, alias, passphrase):
+ # Create PKCS12 keystore from private key and public certificate
+ with mktempfile() as cert_tmp:
+ with open(cert_tmp, 'w') as f:
+ f.writelines([key + "\n", cert + "\n", chain + "\n"])
+
+ with mktempfile() as p12_tmp:
+ run_process([
+ "openssl",
+ "pkcs12",
+ "-export",
+ "-nodes",
+ "-name", alias,
+ "-in", cert_tmp,
+ "-out", p12_tmp,
+ "-password", "pass:{}".format(passphrase)
+ ])
+
+ # Convert PKCS12 keystore into a JKS keystore
+ run_process([
+ "keytool",
+ "-importkeystore",
+ "-destkeystore", jks_tmp,
+ "-srckeystore", p12_tmp,
+ "-srcstoretype", "pkcs12",
+ "-deststoretype", "JKS",
+ "-alias", alias,
+ "-srcstorepass", passphrase,
+ "-deststorepass", passphrase
+ ])
class JavaTruststoreExportPlugin(ExportPlugin):
@@ -165,7 +164,7 @@ def export(self, body, chain, key, options, **kwargs):
if self.get_option('passphrase', options):
passphrase = self.get_option('passphrase', options)
else:
- passphrase = get_psuedo_random_string()
+ passphrase = Fernet.generate_key()
with mktemppath() as jks_tmp:
create_truststore(body, chain, jks_tmp, alias, passphrase)
@@ -215,7 +214,7 @@ def export(self, body, chain, key, options, **kwargs):
if self.get_option('passphrase', options):
passphrase = self.get_option('passphrase', options)
else:
- passphrase = get_psuedo_random_string()
+ passphrase = Fernet.generate_key()
if self.get_option('alias', options):
alias = self.get_option('alias', options)
@@ -226,8 +225,7 @@ def export(self, body, chain, key, options, **kwargs):
if not key:
raise Exception("Unable to export, no private key found.")
- create_truststore(body, chain, jks_tmp, alias, passphrase)
- create_keystore(body, jks_tmp, key, alias, passphrase)
+ create_keystore(body, chain, jks_tmp, key, alias, passphrase)
with open(jks_tmp, 'rb') as f:
raw = f.read()
| Java-export Plugin not including intermediates during JKS Keystore export.
The plugin fails to include intermediates during the export process.
| 2016-04-21T23:22:53 |
||
Netflix/lemur | 292 | Netflix__lemur-292 | [
"285"
] | 2c6d494c32d23fc19bf90d0b9aa0618c611c4d90 | diff --git a/lemur/manage.py b/lemur/manage.py
--- a/lemur/manage.py
+++ b/lemur/manage.py
@@ -30,7 +30,7 @@
from lemur.plugins.lemur_aws import elb
-from lemur.sources.service import sync
+from lemur.sources.service import sync as source_sync
from lemur import create_app
@@ -189,7 +189,7 @@ def generate_settings():
@manager.option('-s', '--sources', dest='labels')
-def sync_sources(labels):
+def sync(labels):
"""
Attempts to run several methods Certificate discovery. This is
run on a periodic basis and updates the Lemur datastore with the
@@ -218,9 +218,9 @@ def sync_sources(labels):
labels = labels.split(",")
if labels[0] == 'all':
- sync()
+ source_sync()
else:
- sync(labels=labels)
+ source_sync(labels=labels)
sys.stdout.write(
"[+] Finished syncing sources. Run Time: {time}\n".format(
| Document periodic tasks or jobs
There are a few pieces of functionality that are run periodically that should be called out in the documentation. Short list:
-notifiy
-check_revoked
-sync_sources
| 2016-04-25T18:34:33 |
||
Netflix/lemur | 302 | Netflix__lemur-302 | [
"301"
] | f919b7360e79b406da97de9c4a24026631df8761 | diff --git a/lemur/plugins/lemur_openssl/plugin.py b/lemur/plugins/lemur_openssl/plugin.py
--- a/lemur/plugins/lemur_openssl/plugin.py
+++ b/lemur/plugins/lemur_openssl/plugin.py
@@ -33,11 +33,12 @@ def run_process(command):
raise Exception(stderr)
-def create_pkcs12(cert, p12_tmp, key, alias, passphrase):
+def create_pkcs12(cert, chain, p12_tmp, key, alias, passphrase):
"""
Creates a pkcs12 formated file.
:param cert:
- :param jks_tmp:
+ :param chain:
+ :param p12_tmp:
:param key:
:param alias:
:param passphrase:
@@ -49,7 +50,7 @@ def create_pkcs12(cert, p12_tmp, key, alias, passphrase):
# Create PKCS12 keystore from private key and public certificate
with mktempfile() as cert_tmp:
with open(cert_tmp, 'w') as f:
- f.write(cert)
+ f.writelines([cert + "\n", chain + "\n"])
run_process([
"openssl",
@@ -119,7 +120,7 @@ def export(self, body, chain, key, options, **kwargs):
with mktemppath() as output_tmp:
if type == 'PKCS12 (.p12)':
- create_pkcs12(body, output_tmp, key, alias, passphrase)
+ create_pkcs12(body, chain, output_tmp, key, alias, passphrase)
extension = "p12"
else:
raise Exception("Unable to export, unsupported type: {0}".format(type))
| Chain Certificate is not exporting
Hi Team,
While extracting .p12 formatted file (which was exported from Lemur) we cannot find the Chain file on the same. Could you please let us know if we need to perform any additional step to download the .p12 along with the chain.
Thanks,
Akash John
| Verified this as a bug. Will look into a fix.
| 2016-05-04T23:57:02 |
|
Netflix/lemur | 368 | Netflix__lemur-368 | [
"367"
] | 5193342b3ad3dbd87110da3300cddb65000daa78 | diff --git a/lemur/auth/permissions.py b/lemur/auth/permissions.py
--- a/lemur/auth/permissions.py
+++ b/lemur/auth/permissions.py
@@ -27,21 +27,9 @@ def __init__(self):
super(SensitiveDomainPermission, self).__init__(RoleNeed('admin'))
-class ViewKeyPermission(Permission):
- def __init__(self, certificate_id, owner):
- c_need = CertificateCreatorNeed(certificate_id)
- super(ViewKeyPermission, self).__init__(c_need, RoleNeed(owner), RoleNeed('admin'))
-
-
-class UpdateCertificatePermission(Permission):
- def __init__(self, certificate_id, owner):
- c_need = CertificateCreatorNeed(certificate_id)
- super(UpdateCertificatePermission, self).__init__(c_need, RoleNeed(owner), RoleNeed('admin'))
-
-
class CertificatePermission(Permission):
- def __init__(self, certificate_id, roles):
- needs = [RoleNeed('admin'), CertificateCreatorNeed(certificate_id)]
+ def __init__(self, certificate_id, owner, roles):
+ needs = [RoleNeed('admin'), CertificateCreatorNeed(certificate_id), RoleNeed(owner)]
for r in roles:
needs.append(CertificateOwnerNeed(str(r)))
diff --git a/lemur/certificates/views.py b/lemur/certificates/views.py
--- a/lemur/certificates/views.py
+++ b/lemur/certificates/views.py
@@ -15,7 +15,7 @@
from lemur.common.utils import paginated_parser
from lemur.auth.service import AuthenticatedResource
-from lemur.auth.permissions import ViewKeyPermission, AuthorityPermission, CertificatePermission
+from lemur.auth.permissions import AuthorityPermission, CertificatePermission
from lemur.certificates import service
from lemur.certificates.schemas import certificate_input_schema, certificate_output_schema, \
@@ -399,9 +399,8 @@ def get(self, certificate_id):
if not cert:
return dict(message="Cannot find specified certificate"), 404
- role = role_service.get_by_name(cert.owner)
-
- permission = ViewKeyPermission(certificate_id, getattr(role, 'name', None))
+ owner_role = role_service.get_by_name(cert.owner)
+ permission = CertificatePermission(cert.id, owner_role, [x.name for x in cert.roles])
if permission.can():
response = make_response(jsonify(key=cert.private_key), 200)
@@ -581,7 +580,8 @@ def put(self, certificate_id, data=None):
"""
cert = service.get(certificate_id)
- permission = CertificatePermission(cert.id, [x.name for x in cert.roles])
+ owner_role = role_service.get_by_name(cert.owner)
+ permission = CertificatePermission(cert.id, owner_role, [x.name for x in cert.roles])
if permission.can():
return service.update(
@@ -864,7 +864,8 @@ def post(self, certificate_id, data=None):
"""
cert = service.get(certificate_id)
- permission = CertificatePermission(cert.id, [x.name for x in cert.roles])
+ owner_role = role_service.get_by_name(cert.owner)
+ permission = CertificatePermission(cert.id, owner_role, [x.name for x in cert.roles])
options = data['plugin']['plugin_options']
plugin = data['plugin']['plugin_object']
| Roles associated with a certificate should be allowed to view key material.
Any role associated with a given certificate should be allowed to view it's key material as an owner.
| 2016-06-23T20:30:25 |
||
Netflix/lemur | 387 | Netflix__lemur-387 | [
"366"
] | 300e2d0b7d295a102ae3e1fbac6d614de26faede | diff --git a/lemur/manage.py b/lemur/manage.py
--- a/lemur/manage.py
+++ b/lemur/manage.py
@@ -864,7 +864,7 @@ def run(self, window):
def main():
manager.add_command("start", LemurServer())
- manager.add_command("runserver", Server(host='127.0.0.1'))
+ manager.add_command("runserver", Server(host='127.0.0.1', threaded=True))
manager.add_command("clean", Clean())
manager.add_command("show_urls", ShowUrls())
manager.add_command("db", MigrateCommand)
| Lemur should re-direct you to 'next' page after login.
Lemur should do the right thing and re-direct back to the correct page after a login prompt.
| 2016-07-04T20:03:32 |
||
Netflix/lemur | 455 | Netflix__lemur-455 | [
"432"
] | c367e4f73ff5ccb47611595dee2ac81a3bc5e022 | diff --git a/lemur/certificates/models.py b/lemur/certificates/models.py
--- a/lemur/certificates/models.py
+++ b/lemur/certificates/models.py
@@ -27,6 +27,7 @@
def get_or_increase_name(name):
+ name = '-'.join(name.strip().split(' '))
count = Certificate.query.filter(Certificate.name.ilike('{0}%'.format(name))).count()
if count >= 1:
| A custom cert name with spaces causes AWS Upload failures
Creating a cert with a custom name that has spaces, such as: `My Certificate` will not properly get uploaded to AWS.
-- Potential Fixes:
1. Prevent spaces in custom names
2. Allow custom cert names to be editable
3. If spaces are allowed, the AWS uploader plugin needs to upload it in a way that can work properly.
| I don't like having the AWS plugin dictate core Lemur functionality. That being said, I don't particularly like having whitespace in custom names either. How do we feel about replacing any whitespace found in a certificate's custom name with a `-`? This would align it better with the generated names as well.
+1 to remove whitespace.
| 2016-10-15T11:56:18 |
|
Netflix/lemur | 457 | Netflix__lemur-457 | [
"412"
] | fb178866f4751ed8c5818f2b7c66364d1b466b71 | diff --git a/lemur/roles/views.py b/lemur/roles/views.py
--- a/lemur/roles/views.py
+++ b/lemur/roles/views.py
@@ -108,7 +108,9 @@ def post(self, data=None):
"description": "this is role3",
"username": null,
"password": null,
- "users": []
+ "users": [
+ {'id': 1}
+ ]
}
**Example response**:
diff --git a/lemur/schemas.py b/lemur/schemas.py
--- a/lemur/schemas.py
+++ b/lemur/schemas.py
@@ -24,23 +24,51 @@
from lemur.notifications.models import Notification
-def fetch_object(model, field, value):
- try:
- return model.query.filter(getattr(model, field) == value).one()
- except NoResultFound:
- raise ValidationError('Unable to find {model} with {field}: {data}'.format(model=model, field=field, data=value))
+def get_object_attribute(data, many=False):
+ if many:
+ ids = [d.get('id') for d in data]
+ names = [d.get('name') for d in data]
+
+ if None in ids:
+ if None in names:
+ raise ValidationError('Associated object require a name or id.')
+ else:
+ return 'name'
+ return 'id'
+ else:
+ if data.get('id'):
+ return 'id'
+ elif data.get('name'):
+ return 'name'
+ else:
+ raise ValidationError('Associated object require a name or id.')
-def fetch_objects(model, field, values):
- values = [v[field] for v in values]
- items = model.query.filter(getattr(model, field).in_(values)).all()
- found = [getattr(i, field) for i in items]
- diff = set(values).symmetric_difference(set(found))
+def fetch_objects(model, data, many=False):
+ attr = get_object_attribute(data, many=many)
- if diff:
- raise ValidationError('Unable to locate {model} with {field} {diff}'.format(model=model, field=field, diff=",".join([list(diff)])))
+ if many:
+ values = [v[attr] for v in data]
+ items = model.query.filter(getattr(model, attr).in_(values)).all()
+ found = [getattr(i, attr) for i in items]
+ diff = set(values).symmetric_difference(set(found))
- return items
+ if diff:
+ raise ValidationError('Unable to locate {model} with {attr} {diff}'.format(
+ model=model,
+ attr=attr,
+ diff=",".join(list(diff))))
+
+ return items
+
+ else:
+ try:
+ return model.query.filter(getattr(model, attr) == data[attr]).one()
+ except NoResultFound:
+ raise ValidationError('Unable to find {model} with {attr}: {data}'.format(
+ model=model,
+ attr=attr,
+ data=data[attr]))
class AssociatedAuthoritySchema(LemurInputSchema):
@@ -49,68 +77,52 @@ class AssociatedAuthoritySchema(LemurInputSchema):
@post_load
def get_object(self, data, many=False):
- if data.get('id'):
- return fetch_object(Authority, 'id', data['id'])
-
- elif data.get('name'):
- return fetch_object(Authority, 'name', data['name'])
+ return fetch_objects(Authority, data, many=many)
class AssociatedRoleSchema(LemurInputSchema):
- id = fields.Int(required=True)
+ id = fields.Int()
name = fields.String()
@post_load
def get_object(self, data, many=False):
- if many:
- return fetch_objects(Role, 'id', data)
- else:
- return fetch_object(Role, 'id', data['id'])
+ return fetch_objects(Role, data, many=many)
class AssociatedDestinationSchema(LemurInputSchema):
- id = fields.Int(required=True)
+ id = fields.Int()
name = fields.String()
@post_load
def get_object(self, data, many=False):
- if many:
- return fetch_objects(Destination, 'id', data)
- else:
- return fetch_object(Destination, 'id', data['id'])
+ return fetch_objects(Destination, data, many=many)
class AssociatedNotificationSchema(LemurInputSchema):
- id = fields.Int(required=True)
+ id = fields.Int()
+ name = fields.String()
@post_load
def get_object(self, data, many=False):
- if many:
- return fetch_objects(Notification, 'id', data)
- else:
- return fetch_object(Notification, 'id', data['id'])
+ return fetch_objects(Notification, data, many=many)
class AssociatedCertificateSchema(LemurInputSchema):
- id = fields.Int(required=True)
+ id = fields.Int()
+ name = fields.String()
@post_load
def get_object(self, data, many=False):
- if many:
- return fetch_objects(Certificate, 'id', data)
- else:
- return fetch_object(Certificate, 'id', data['id'])
+ return fetch_objects(Certificate, data, many=many)
class AssociatedUserSchema(LemurInputSchema):
- id = fields.Int(required=True)
+ id = fields.Int()
+ name = fields.String()
@post_load
def get_object(self, data, many=False):
- if many:
- return fetch_objects(User, 'id', data)
- else:
- return fetch_object(User, 'id', data['id'])
+ return fetch_objects(User, data, many=many)
class PluginInputSchema(LemurInputSchema):
diff --git a/lemur/users/views.py b/lemur/users/views.py
--- a/lemur/users/views.py
+++ b/lemur/users/views.py
@@ -108,7 +108,9 @@ def post(self, data=None):
"username": "user3",
"email": "[email protected]",
"active": true,
- "roles": []
+ "roles": [
+ {'id': 1} - or - {'name': 'myRole'}
+ ]
}
**Example response**:
@@ -199,7 +201,9 @@ def put(self, user_id, data=None):
"username": "user1",
"email": "[email protected]",
"active": false,
- "roles": []
+ "roles": [
+ {'id': 1} - or - {'name': 'myRole'}
+ ]
}
**Example response**:
| diff --git a/lemur/tests/test_schemas.py b/lemur/tests/test_schemas.py
new file mode 100644
--- /dev/null
+++ b/lemur/tests/test_schemas.py
@@ -0,0 +1,58 @@
+import pytest
+from marshmallow.exceptions import ValidationError
+
+from lemur.tests.factories import RoleFactory
+
+
+def test_get_object_attribute():
+ from lemur.schemas import get_object_attribute
+
+ with pytest.raises(ValidationError):
+ get_object_attribute({})
+
+ with pytest.raises(ValidationError):
+ get_object_attribute([{}], many=True)
+
+ with pytest.raises(ValidationError):
+ get_object_attribute([{}, {'id': 1}], many=True)
+
+ with pytest.raises(ValidationError):
+ get_object_attribute([{}, {'name': 'test'}], many=True)
+
+ assert get_object_attribute({'name': 'test'}) == 'name'
+ assert get_object_attribute({'id': 1}) == 'id'
+ assert get_object_attribute([{'name': 'test'}], many=True) == 'name'
+ assert get_object_attribute([{'id': 1}], many=True) == 'id'
+
+
+def test_fetch_objects(session):
+ from lemur.roles.models import Role
+ from lemur.schemas import fetch_objects
+
+ role = RoleFactory()
+ role1 = RoleFactory()
+ session.commit()
+
+ data = {'id': role.id}
+ found_role = fetch_objects(Role, data)
+ assert found_role == role
+
+ data = {'name': role.name}
+ found_role = fetch_objects(Role, data)
+ assert found_role == role
+
+ data = [{'id': role.id}, {'id': role1.id}]
+ found_roles = fetch_objects(Role, data, many=True)
+ assert found_roles == [role, role1]
+
+ data = [{'name': role.name}, {'name': role1.name}]
+ found_roles = fetch_objects(Role, data, many=True)
+ assert found_roles == [role, role1]
+
+ with pytest.raises(ValidationError):
+ data = [{'name': 'blah'}, {'name': role1.name}]
+ fetch_objects(Role, data, many=True)
+
+ with pytest.raises(ValidationError):
+ data = {'name': 'nah'}
+ fetch_objects(Role, data)
| Error when trying to pass name of role rather than ID
Expectation is that I would be able to do something like this:
```
{
"active": true,
"username":"newuser"
"password":"newpass"
"email": "[email protected]",
"roles": [ "admin", "RANDOM_CA_owner" ]
}
```
But seeing the following error:
```
AttributeError: 'unicode' object has no attribute 'items'
```
| I will take a look at this. Currently roles are expected to be objects and only support ID lookups.
e.g.
```
"roles": [{"name": "admin", "id": 123}]
```
I could see how this could be annoying depending on your use case.
I will try to at least support:
```
"roles": [{"name": "admin"}]
```
Which would save the client a lookup. I will update the documentation regardless.
That would be great, thanks!
One other thing that the docs could use (are they public? if so happy to send a PR) would be an example of passing a role in:
```
POST /users HTTP/1.1
Host: example.com
Accept: application/json, text/javascript
{
"username": "user3",
"email": "[email protected]",
"active": true,
"roles": []
}
```
Err, is the 'docs/' folder what gets sent to lemur.readthedocs.io?
Yep everything under /docs eventually makes its way to lemur.readthedocs.io.
| 2016-10-16T10:56:07 |
Netflix/lemur | 458 | Netflix__lemur-458 | [
"415"
] | d31c9b19ce311f1b717c29df50a75503980efe81 | diff --git a/lemur/certificates/schemas.py b/lemur/certificates/schemas.py
--- a/lemur/certificates/schemas.py
+++ b/lemur/certificates/schemas.py
@@ -77,11 +77,25 @@ def ensure_dates(self, data):
class CertificateEditInputSchema(CertificateSchema):
notify = fields.Boolean()
+ owner = fields.String()
destinations = fields.Nested(AssociatedDestinationSchema, missing=[], many=True)
notifications = fields.Nested(AssociatedNotificationSchema, missing=[], many=True)
replacements = fields.Nested(AssociatedCertificateSchema, missing=[], many=True)
roles = fields.Nested(AssociatedRoleSchema, missing=[], many=True)
+ @post_load
+ def enforce_notifications(self, data):
+ """
+ Ensures that when an owner changes, default notifications are added for the new owner.
+ Old owner notifications are retained unless explicitly removed.
+ :param data:
+ :return:
+ """
+ if data['owner']:
+ notification_name = "DEFAULT_{0}".format(data['owner'].split('@')[0].upper())
+ data['notifications'] += notification_service.create_default_expiration_notifications(notification_name, [data['owner']])
+ return data
+
class CertificateNestedOutputSchema(LemurOutputSchema):
__envelope__ = False
| diff --git a/lemur/tests/test_certificates.py b/lemur/tests/test_certificates.py
--- a/lemur/tests/test_certificates.py
+++ b/lemur/tests/test_certificates.py
@@ -9,6 +9,14 @@
INTERNAL_VALID_LONG_STR, INTERNAL_VALID_SAN_STR, PRIVATE_KEY_STR
+def test_certificate_edit_schema(session):
+ from lemur.certificates.schemas import CertificateEditInputSchema
+
+ input_data = {'owner': '[email protected]'}
+ data, errors = CertificateEditInputSchema().load(input_data)
+ assert len(data['notifications']) == 3
+
+
def test_authority_identifier_schema():
from lemur.schemas import AuthorityIdentifierSchema
input_data = {'useAuthorityCert': True}
| Owner change does not modify notifications.
When a certificate changes ownership, we should try to also update the notifications by removing the old defaults owners and applying new defaults.
| 2016-10-18T06:20:42 |
|
Netflix/lemur | 487 | Netflix__lemur-487 | [
"486"
] | f141ae78f39642d520a37290959fb6e62e01b84e | diff --git a/lemur/certificates/views.py b/lemur/certificates/views.py
--- a/lemur/certificates/views.py
+++ b/lemur/certificates/views.py
@@ -146,6 +146,38 @@ def post(self, data=None):
Host: example.com
Accept: application/json, text/javascript
+ {
+ "owner": "[email protected]",
+ "commonName": "test.example.net",
+ "country": "US",
+ "extensions": {
+ "subAltNames": {
+ "names": [
+ {
+ "nameType": "DNSName",
+ "value": "*.test.example.net"
+ },
+ {
+ "nameType": "DNSName",
+ "value": "www.test.example.net"
+ }
+ ]
+ }
+ },
+ "replaces": [{
+ "id": 1
+ },
+ "validityEnd": "2026-01-01T08:00:00.000Z",
+ "authority": {
+ "name": "verisign"
+ },
+ "organization": "Netflix, Inc.",
+ "location": "Los Gatos",
+ "state": "California",
+ "validityStart": "2016-11-11T04:19:48.000Z",
+ "organizationalUnit": "Operations"
+ }
+
**Example response**:
@@ -193,7 +225,9 @@ def post(self, data=None):
"id": 1090,
"name": "*.test.example.net"
}],
- "replaces": [],
+ "replaces": [{
+ "id": 1
+ }],
"name": "WILDCARD.test.example.net-SymantecCorporation-20160603-20180112",
"roles": [{
"id": 464,
| Add example requests
Add example requests for certificate creation.
| 2016-11-11T20:54:08 |
||
Netflix/lemur | 596 | Netflix__lemur-596 | [
"588"
] | 71ddbb409ce3d49e9fb33bea297ea1d777d95539 | diff --git a/lemur/common/managers.py b/lemur/common/managers.py
--- a/lemur/common/managers.py
+++ b/lemur/common/managers.py
@@ -60,11 +60,14 @@ def all(self):
results.append(cls())
else:
results.append(cls)
+
except InvalidConfiguration as e:
current_app.logger.warning("Plugin '{0}' may not work correctly. {1}".format(class_name, e))
+
except Exception as e:
current_app.logger.exception("Unable to import {0}. Reason: {1}".format(cls_path, e))
continue
+
self.cache = results
return results
diff --git a/lemur/factory.py b/lemur/factory.py
--- a/lemur/factory.py
+++ b/lemur/factory.py
@@ -92,6 +92,7 @@ def configure_app(app, config=None):
"""
# respect the config first
if config and config != 'None':
+ app.config['CONFIG_PATH'] = config
app.config.from_object(from_file(config))
else:
try:
@@ -103,6 +104,9 @@ def configure_app(app, config=None):
else:
app.config.from_object(from_file(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'default.conf.py')))
+ # we don't use this
+ app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
+
def configure_extensions(app):
"""
diff --git a/lemur/manage.py b/lemur/manage.py
--- a/lemur/manage.py
+++ b/lemur/manage.py
@@ -119,7 +119,6 @@
# modify this if you are not using a local database
SQLALCHEMY_DATABASE_URI = 'postgresql://lemur:lemur@localhost:5432/lemur'
-
# AWS
#LEMUR_INSTANCE_PROFILE = 'Lemur'
@@ -372,11 +371,10 @@ def run(self, *args, **kwargs):
app = WSGIApplication()
- # run startup tasks on an app like object
- pre_app = create_app(kwargs.get('config'))
- validate_conf(pre_app, REQUIRED_VARIABLES)
+ # run startup tasks on a app like object
+ validate_conf(current_app, REQUIRED_VARIABLES)
- app.app_uri = 'lemur:create_app(config="{0}")'.format(kwargs.get('config'))
+ app.app_uri = 'lemur:create_app(config="{0}")'.format(current_app.config.get('CONFIG_PATH'))
return app.run()
| allow lemur init to use alternate config
When installing we can define an alternate path for the `lemur create_config` with the -c arg. Can we get `lemur init` to accept an alternate path? Or is it always expected to have configuration items (config, keys, etc) under ~/.lemur/ ?
thanks
| That seems reasonable.
thanks
Looking into this it seems we already support this. For any given command you should be able to pass '-c' and point to the configuration for which you want use.
In your case something like:
lemur -c <path-to_config> init
Should use the configuration as passed.
Can you try that out and let me know how it goes?
So having already performed init, it appears to work. Though when I run `lemur -c /path/to/config start` it tries to use the ~/.lemur/ path.
Starting from scratch in a temp dir and fresh database all seems well until I try to start. Renaming the config found in ~/.lemur and trying to start I get the following
> : mv ~/.lemur/lemur.conf.py ~/.lemur/lemur.conf.py.bak
(lemur) jguarini-ld1:/tmp/lemur: lemur -c /tmp/lemur/config/lemur.conf.py start -b 0.0.0.0:8001
/tmp/lemur/lib/python3.5/site-packages/flask_sqlalchemy/__init__.py:800: UserWarning: SQLALCHEMY_TRACK_MODIFICATIONS adds significant overhead and will be disabled by default in the future. Set it to True to suppress this warning.
warnings.warn('SQLALCHEMY_TRACK_MODIFICATIONS adds significant overhead and will be disabled by default in the future. Set it to True to suppress this warning.')
Plugin 'DigiCertIssuerPlugin' may not work correctly. Required variable 'DIGICERT_API_KEY' is not set in Lemur's conf.
Plugin 'DigiCertCISIssuerPlugin' may not work correctly. Required variable 'DIGICERT_CIS_API_KEY' is not set in Lemur's conf.
/tmp/lemur/lib/python3.5/site-packages/flask_sqlalchemy/__init__.py:800: UserWarning: SQLALCHEMY_TRACK_MODIFICATIONS adds significant overhead and will be disabled by default in the future. Set it to True to suppress this warning.
warnings.warn('SQLALCHEMY_TRACK_MODIFICATIONS adds significant overhead and will be disabled by default in the future. Set it to True to suppress this warning.')
Plugin 'DigiCertIssuerPlugin' may not work correctly. Required variable 'DIGICERT_API_KEY' is not set in Lemur's conf.
Plugin 'DigiCertCISIssuerPlugin' may not work correctly. Required variable 'DIGICERT_CIS_API_KEY' is not set in Lemur's conf.
Traceback (most recent call last):
File "/tmp/lemur/bin/lemur", line 11, in <module>
load_entry_point('lemur', 'console_scripts', 'lemur')()
File "/tmp/lemur/lemur/manage.py", line 636, in main
manager.run()
File "/tmp/lemur/lib/python3.5/site-packages/flask_script/__init__.py", line 412, in run
result = self.handle(sys.argv[0], sys.argv[1:])
File "/tmp/lemur/lib/python3.5/site-packages/flask_script/__init__.py", line 383, in handle
res = handle(*args, **config)
File "/tmp/lemur/lib/python3.5/site-packages/flask_script/commands.py", line 216, in __call__
return self.run(*args, **kwargs)
File "/tmp/lemur/lemur/manage.py", line 377, in run
validate_conf(pre_app, REQUIRED_VARIABLES)
File "/tmp/lemur/lemur/common/utils.py", line 99, in validate_conf
raise InvalidConfiguration("Required variable '{var}' is not set in Lemur's conf.".format(var=var))
lemur.exceptions.InvalidConfiguration: Required variable 'LEMUR_SECURITY_TEAM_EMAIL' is not set in Lemur's conf.
thanks for looking into this
Ugh, That is an ugly amount of output for a simple command. Let me see if I can clean that up. | 2016-12-13T23:52:50 |
|
Netflix/lemur | 621 | Netflix__lemur-621 | [
"594"
] | beba2ba0922aef35baee58a676b43107b2e71b96 | diff --git a/lemur/certificates/schemas.py b/lemur/certificates/schemas.py
--- a/lemur/certificates/schemas.py
+++ b/lemur/certificates/schemas.py
@@ -175,6 +175,7 @@ class CertificateOutputSchema(LemurOutputSchema):
authority = fields.Nested(AuthorityNestedOutputSchema)
roles = fields.Nested(RoleNestedOutputSchema, many=True)
endpoints = fields.Nested(EndpointNestedOutputSchema, many=True, missing=[])
+ replaced = fields.Nested(CertificateNestedOutputSchema, many=True)
class CertificateUploadInputSchema(CertificateCreationSchema):
diff --git a/lemur/certificates/views.py b/lemur/certificates/views.py
--- a/lemur/certificates/views.py
+++ b/lemur/certificates/views.py
@@ -99,6 +99,7 @@ def get(self):
"name": "*.test.example.net"
}],
"replaces": [],
+ "replaced": [],
"name": "WILDCARD.test.example.net-SymantecCorporation-20160603-20180112",
"roles": [{
"id": 464,
@@ -520,6 +521,7 @@ def get(self, certificate_id):
"name": "*.test.example.net"
}],
"replaces": [],
+ "replaced": [],
"name": "WILDCARD.test.example.net-SymantecCorporation-20160603-20180112",
"roles": [{
"id": 464,
@@ -720,6 +722,7 @@ def get(self, notification_id):
"name": "*.test.example.net"
}],
"replaces": [],
+ "replaced": [],
"name": "WILDCARD.test.example.net-SymantecCorporation-20160603-20180112",
"roles": [{
"id": 464,
@@ -824,6 +827,7 @@ def get(self, certificate_id):
"name": "*.test.example.net"
}],
"replaces": [],
+ "replaced": [],
"name": "WILDCARD.test.example.net-SymantecCorporation-20160603-20180112",
"roles": [{
"id": 464,
| Add 'replacedBy' field in certificate response.
In addition to the 'replaces' field, we should also have a field of 'replacedBy' so we can easily determine if a certificate has been replaced and should no longer be used.
| yes, having a field that links/points to the most recent version of the cert would be very helpful. Say if cert 2 is replaced by cert 4 with is then renewed/reissued by cert 55. The replaced by field in cert 2 should either point to cert 55 or show that it was replaced by cert 4 then 55. ie. Replaced By: 4 -> 55
This would help to show the history of the cert along with what the current version is. I see that in the API output cert 55 contains the certs it replaces which is handy.
Field has been named `replaced`. | 2016-12-20T22:20:37 |
|
Netflix/lemur | 622 | Netflix__lemur-622 | [
"577"
] | beba2ba0922aef35baee58a676b43107b2e71b96 | diff --git a/lemur/certificates/service.py b/lemur/certificates/service.py
--- a/lemur/certificates/service.py
+++ b/lemur/certificates/service.py
@@ -9,7 +9,7 @@
from datetime import timedelta
from flask import current_app
-from sqlalchemy import func, or_
+from sqlalchemy import func, or_, cast, Boolean
from cryptography import x509
from cryptography.hazmat.backends import default_backend
@@ -294,6 +294,8 @@ def render(args):
elif 'destination' in terms:
query = query.filter(Certificate.destinations.any(Destination.id == terms[1]))
+ elif 'notify' in filt:
+ query = query.filter(Certificate.notify == cast(terms[1], Boolean))
elif 'active' in filt:
query = query.filter(Certificate.active == terms[1])
elif 'cn' in terms:
| Fix notification filter in UI.
| 2016-12-20T22:20:45 |
||
Netflix/lemur | 706 | Netflix__lemur-706 | [
"702"
] | fc957b63ffd94813cac4cc699565b67fb6e58900 | diff --git a/lemur/certificates/service.py b/lemur/certificates/service.py
--- a/lemur/certificates/service.py
+++ b/lemur/certificates/service.py
@@ -9,7 +9,7 @@
from datetime import timedelta
from flask import current_app
-from sqlalchemy import func, or_, not_, cast, Boolean
+from sqlalchemy import func, or_, not_, cast, Boolean, Integer
from cryptography import x509
from cryptography.hazmat.backends import default_backend
@@ -299,6 +299,8 @@ def render(args):
Certificate.domains.any(Domain.name.ilike('%{0}%'.format(terms[1])))
)
)
+ elif 'id' in terms:
+ query = query.filter(Certificate.id == cast(terms[1], Integer))
else:
query = database.filter(query, Certificate, terms)
| Better expose the certificate ID in the Lemur UI
Our application integrated with Lemur using its API and we use Lemur's primary key -- the certificate id -- throughout our system.
I'd like to be able to search and filter by certificate ID in the UI.
This is especially important for frequently-renewed certificates that may have tens or hundreds of entries in Lemur.
| 2017-03-03T23:02:03 |
||
Netflix/lemur | 707 | Netflix__lemur-707 | [
"684"
] | 310e1d4501678f4091ad37d2e7fdaf95c1df435c | diff --git a/lemur/migrations/versions/131ec6accff5_.py b/lemur/migrations/versions/131ec6accff5_.py
--- a/lemur/migrations/versions/131ec6accff5_.py
+++ b/lemur/migrations/versions/131ec6accff5_.py
@@ -16,7 +16,7 @@
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
- op.add_column('certificates', sa.Column('rotation', sa.Boolean(), nullable=True))
+ op.add_column('certificates', sa.Column('rotation', sa.Boolean(), nullable=False, server_default=False))
op.add_column('endpoints', sa.Column('last_updated', sa.DateTime(), server_default=sa.text('now()'), nullable=False))
# ### end Alembic commands ###
| Ensure rotation column == 'False' during migration.
Null values creates problems during validation.
| 2017-03-03T23:14:59 |
||
Netflix/lemur | 713 | Netflix__lemur-713 | [
"712"
] | 038beafb5ec2d8b7a7a7a712d7bcc103e5250b78 | diff --git a/lemur/schemas.py b/lemur/schemas.py
--- a/lemur/schemas.py
+++ b/lemur/schemas.py
@@ -35,6 +35,9 @@ def validate_options(options):
interval = get_plugin_option('interval', options)
unit = get_plugin_option('unit', options)
+ if not interval and not unit:
+ return
+
if interval == 'month':
unit *= 30
| diff --git a/lemur/tests/test_authorities.py b/lemur/tests/test_authorities.py
--- a/lemur/tests/test_authorities.py
+++ b/lemur/tests/test_authorities.py
@@ -13,7 +13,7 @@ def test_authority_input_schema(client, role):
'owner': '[email protected]',
'description': 'An example authority.',
'commonName': 'AnExampleAuthority',
- 'pluginName': {'slug': 'verisign-issuer'},
+ 'plugin': {'slug': 'verisign-issuer', 'plugin_options': [{'name': 'test', 'value': 'blah'}]},
'type': 'root',
'signingAlgorithm': 'sha256WithRSA',
'keyType': 'RSA2048',
| Getting error while creating authority with lemur_cryptography plugin
I added a comment to the offending commit code here: https://github.com/Netflix/lemur/commit/d53f64890cb656765bc1c18f4b8442ee3a592f47
Upon creating an authority certificate with the lemur_cryptography plugin here, I get an error because unit is None and cannot be compared to 90. Is it reasonable to make an validation assumption that unit should be 0 if undefined?
I haven't quite been able to trace how this validation function is called, so I'm not entirely clear on what it is doing or why it's involved in the creation of an authority certificate, but not in the creation of a certificate signed by an authority.
Here's the error I get upon submitting an authority for creation.
```
2017-03-07 01:44:41,352 ERROR: Exception on /api/1/authorities [POST] [in /home/lemur/venv/lib/python3.4/site-packages/flask/app.py:1560]
Traceback (most recent call last):
File "/home/lemur/venv/lib/python3.4/site-packages/flask/app.py", line 1612, in full_dispatch_request
rv = self.dispatch_request()
File "/home/lemur/venv/lib/python3.4/site-packages/flask/app.py", line 1598, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/lemur/venv/lib/python3.4/site-packages/flask_restful/__init__.py", line 477, in wrapper
resp = resource(*args, **kwargs)
File "/home/lemur/venv/lib/python3.4/site-packages/flask/views.py", line 84, in view
return self.dispatch_request(*args, **kwargs)
File "/home/lemur/venv/lib/python3.4/site-packages/flask_restful/__init__.py", line 587, in dispatch_request
resp = meth(*args, **kwargs)
File "/home/lemur/app/lemur/auth/service.py", line 110, in decorated_function
return f(*args, **kwargs)
File "/home/lemur/app/lemur/common/schema.py", line 150, in decorated_function
data, errors = input_schema.load(request_data)
File "/home/lemur/venv/lib/python3.4/site-packages/marshmallow/schema.py", line 578, in load
result, errors = self._do_load(data, many, partial=partial, postprocess=True)
File "/home/lemur/venv/lib/python3.4/site-packages/marshmallow/schema.py", line 658, in _do_load
index_errors=self.opts.index_errors,
File "/home/lemur/venv/lib/python3.4/site-packages/marshmallow/marshalling.py", line 295, in deserialize
index=(index if index_errors else None)
File "/home/lemur/venv/lib/python3.4/site-packages/marshmallow/marshalling.py", line 68, in call_and_store
value = getter_func(data)
File "/home/lemur/venv/lib/python3.4/site-packages/marshmallow/marshalling.py", line 288, in <lambda>
data
File "/home/lemur/venv/lib/python3.4/site-packages/marshmallow/fields.py", line 265, in deserialize
output = self._deserialize(value, attr, data)
File "/home/lemur/venv/lib/python3.4/site-packages/marshmallow/fields.py", line 465, in _deserialize
data, errors = self.schema.load(value)
File "/home/lemur/venv/lib/python3.4/site-packages/marshmallow/schema.py", line 578, in load
result, errors = self._do_load(data, many, partial=partial, postprocess=True)
File "/home/lemur/venv/lib/python3.4/site-packages/marshmallow/schema.py", line 658, in _do_load
index_errors=self.opts.index_errors,
File "/home/lemur/venv/lib/python3.4/site-packages/marshmallow/marshalling.py", line 295, in deserialize
index=(index if index_errors else None)
File "/home/lemur/venv/lib/python3.4/site-packages/marshmallow/marshalling.py", line 68, in call_and_store
value = getter_func(data)
File "/home/lemur/venv/lib/python3.4/site-packages/marshmallow/marshalling.py", line 288, in <lambda>
data
File "/home/lemur/venv/lib/python3.4/site-packages/marshmallow/fields.py", line 266, in deserialize
self._validate(output)
File "/home/lemur/venv/lib/python3.4/site-packages/marshmallow/fields.py", line 196, in _validate
r = validator(value)
File "/home/lemur/app/lemur/schemas.py", line 44, in validate_options
if unit > 90:
TypeError: unorderable types: NoneType() > int()
```
| You're right, I hadn't considered the fact that plugin validation happens in quite a bit more places than just notifications. Not sure why none of the other tests caught this, I will create a PR and add a few more tests to cover it. | 2017-03-07T04:19:04 |
Netflix/lemur | 739 | Netflix__lemur-739 | [
"738"
] | dd39b9ebe851e9734470e23d29a22aee2a967f37 | diff --git a/lemur/authorities/schemas.py b/lemur/authorities/schemas.py
--- a/lemur/authorities/schemas.py
+++ b/lemur/authorities/schemas.py
@@ -70,7 +70,7 @@ def ensure_dates(self, data):
class AuthorityUpdateSchema(LemurInputSchema):
owner = fields.Email(required=True)
description = fields.String()
- active = fields.Boolean()
+ active = fields.Boolean(missing=True)
roles = fields.Nested(AssociatedRoleSchema(many=True))
diff --git a/lemur/authorities/service.py b/lemur/authorities/service.py
--- a/lemur/authorities/service.py
+++ b/lemur/authorities/service.py
@@ -29,9 +29,7 @@ def update(authority_id, description=None, owner=None, active=None, roles=None):
if roles:
authority.roles = roles
- if active:
- authority.active = active
-
+ authority.active = active
authority.description = description
authority.owner = owner
return database.update(authority)
| [Web-UI] Active button for authority is not working
I have list of authorities created on the lemur. I want to disable creation of new certificates using one of the authority. Clicking on **active** button against that authority should make that authority inactive which is not the case.
Even the PUT call to the `http://example.com/api/1/authorities/5` with active key's value set to false returns true in response. Please find the request and response for the API in the file given below.
[active.txt](https://github.com/Netflix/lemur/files/878813/active.txt)
| 2017-03-29T16:44:34 |
||
Netflix/lemur | 765 | Netflix__lemur-765 | [
"729"
] | 5fb675344554c5e6bd0903af3d118cec5c8cee22 | diff --git a/lemur/manage.py b/lemur/manage.py
--- a/lemur/manage.py
+++ b/lemur/manage.py
@@ -222,7 +222,7 @@ def run(self, password):
sys.stderr.write("[!] Passwords do not match!\n")
sys.exit(1)
- user_service.create("lemur", password, 'lemur@nobody', True, None, [admin_role])
+ user_service.create("lemur", password, '[email protected]', True, None, [admin_role])
sys.stdout.write("[+] Created the user 'lemur' and granted it the 'admin' role!\n")
else:
| [Web UI] Invalid Certificates basic info
Tab 'Certificates' -> choose any certificate-> Tab 'BASIC INFO'
> Not Before: Invalid date
> Not After: Invalid date
> Signing Algorithm: [Empty] ---->'Signature Algorithm'?
| Can you view your certificate in the public certificate field? What kind of certificate are you issuing?
Hi @kevgliss , I am also facing the same issue. Basic Info for the created cert is not displayed properly. Following are the fields which are not displayed at all or getting displayed incorrectly:
1. Creator
2. Not Before --- Shows "invalid date"
3. Not After --- Shows "invalid date"
4. San --- A cross is displayed
5. Signing algorithm --- Nothing is displayed
6. Validity - Unknown is diplayed
7. Chain is also not displayed
Note : I am able to see public certificate and private key field with their respective values for that cert. Attaching image with this comment. I can also see the "not_before" key's value in the response from the lemur server as "2017-03-23T12:00:00+00:00" (when checked using developer tool in chrome browser).


Could you provide me the the full json response for that certificate (minus the private key)?
Hi @kevgliss , I got the creator, not before, not after and signing algorithm fields to work. I suspect the problem was with the camel casing to underscore conversion. I changed in view.tpl.html file and got these fields to show the info. Please see the attached screenshot for the same.

I am still facing issues with the chain field though. And it seems that the problem here might be related to chain not getting saved in the db(I might be wrong) as there is "null" value returned corresponding to chain field. Attaching the sample response with this comment.
[sampleResponse.txt](https://github.com/Netflix/lemur/files/874660/sampleResponse.txt)
Are you setting `CFSSL_INTERMEDIATE`? See:
http://lemur.readthedocs.io/en/latest/administration.html#cfssl-issuer-plugin
and
https://github.com/Netflix/lemur/blob/master/lemur/plugins/lemur_cfssl/plugin.py#L52
> camel casing to underscore conversion
Could you explain this a little more, what did you change in the view.tpl.html?
> Are you setting CFSSL_INTERMEDIATE? See:
http://lemur.readthedocs.io/en/latest/administration.html#cfssl-issuer-plugin
and
https://github.com/Netflix/lemur/blob/master/lemur/plugins/lemur_cfssl/plugin.py#L52
Yes I have set that variable properly in **lemur.conf.py**. Please find the attached screenshot for the same.

> Could you explain this a little more, what did you change in the view.tpl.html?
I have made changes in **lemur/static/dist/angular/certificates/view/view.tpl.html** file. Not sure if they are valid but it solved my issue.
Changed following piece of line
`<strong>Not Before</strong> <span class="pull-right" uib-tooltip="{{ certificate.notBefore }}">{{ momentService.createMoment(certificate.notBefore) }}`
to
`<strong>Not Before</strong> <span class="pull-right" uib-tooltip="{{ certificate.notBefore }}">{{ momentService.createMoment(certificate.not_before) }}`
Thats interesting... the response you sent over has a key of `notBefore` not `not_before` the javascript doesn't do much other than assign these values to an object. Are you on master or some other branch?
I am experiencing this issue as well. Let me know if there is information I can provide to help.

@RickB17 Can you confirm you are on master? Do you see the value key as `not_before` or is it `notBefore` in your JSON response?
@kevgliss Yes, I'm on master. It is being returned at ```not_before```.

Are there other underscores as keys or just `not_after` and `not_before`? Sorry for all the questions but I have not yet been able to reproduce this locally.
@kevgliss Looks like there are a few other keys. ```validity_end```, ```validity_start```, and ```path_length```


@kevgliss attached are the outputs from ```pip freeze``` from both environments (working and non-working). Lots of version differences between the environment that works and the one that doesn't.
**Working Environment**

[works.txt](https://github.com/Netflix/lemur/files/907468/works.txt)
**Non-Working Environment**

[no-works.txt](https://github.com/Netflix/lemur/files/907469/no-works.txt)
I mirrored your non-working environment and was unable to reproduce. Could you issue a `git pull` or a `git log` on your non-working environment to rule out any code changes.
```pip freeze``` for this specific instance (it's different than the original non-working one I posted, but is the same one that I shared the json responses from earlier:
[pip_freeze.txt](https://github.com/Netflix/lemur/files/907773/pip_freeze.txt)
```git log``` output
[git_log.txt](https://github.com/Netflix/lemur/files/907770/git_log.txt)
I get this same issue where the output of the api shows as snake case rather than camel case as well. Trying to investigate it at the moment. I get this on both the master branch and the 0.5.0 release. The strange thing is that it seemed to be working for a while where it was showing camel case then it changed back to snake case. It seems like a bit of a strange one.
Edit:
I'm able to reproduce this using a fresh clone of: https://github.com/Netflix/lemur-docker
So it's as @RickB17 suggested, it's related to the version, when changing from `marshmallow v2.13.4` to `marshmallow 2.4.0` we see the api response camelized. Something seems to have changed in between versions that's caused it to no longer parse things correctly. I would recommend changing back to version `2.4.0` until we find out what's wrong so that email notifications and views work correctly. This bug seems to effect mail templates as well.
Also worth mentioning I tried to upgrade to the latest version but it also has this problem.
Here is the PR if you agree: https://github.com/Netflix/lemur/pull/761
I've been able to confirm this issue via the docker image via the 0.5 tags as well as on master. I think the workaround is okay for now but I'd like to try and determine the root cause. I'm still not able to replicate it anywhere other than that docker image; locally, production, etc. all seem to work fine for me with 2.13.4.
Unfortunately, my docker foo isn't quite up to par and it is quite annoying to troubleshoot this from within the docker container itself. What I have found so far is that when we see issues, we don't see any certificate data enter here:
https://github.com/Netflix/lemur/blob/master/lemur/common/schema.py#L90
We can, however, see other pieces of data correctly receive the serialization (notifications, roles, etc.,). This would lead me to believe that there is some interaction going on between marshmallow and the certificate schema (https://github.com/Netflix/lemur/blob/master/lemur/common/schema.py#L90) that I don't quite understand yet.
So I've narrowed it down to be an issues that occurred between version `2.10.0` -> `2.10.1` the change log mentions the following changes:
```
- Fix behavior when using validate.Equal(False) (#484). Thanks @pktangyue for reporting and thanks @tuukkamustonen for the fix.
- Fix strict behavior when errors are raised in pre_dump/post_dump processors (#521). Thanks @tvuotila for the catch and patch.
- Fix validation of nested fields on dumping (#528). Thanks again @tvuotila.
```
Will keep looking into it.
🎉 🎉 🎉 Found it 🎉 🎉 🎉
So between version `2.10.0` and `2.10.1` it seems that when an error is raised with the schema it will no longer run the `post_dump` function. There is an error raised because the default lemur email address is not a valid email address as it is `lemur@nobody` if it was`[email protected]` this would not be an issue. So to fix it in your environment you need to set your lemur user to have a valid email address otherwise all hell breaks loose 😈 .
Note: This is only an issue if you've used the lemur user to create things
@kevgliss would you be able to change the defaults?
@RickB17 you should now be able to fix it for yourself by just updating your lemur user to have a valid email.
Well this was timely.. thanks @treacher, modifying Lemur user to have an email address via web UI worked for me. 👍
Ah, that explains why I can't recreate it! Good find. | 2017-04-27T16:42:49 |
|
Netflix/lemur | 766 | Netflix__lemur-766 | [
"756"
] | 75787d20bc3a6a2ba45bb61a9f8bb4d64ad7ce9a | diff --git a/lemur/factory.py b/lemur/factory.py
--- a/lemur/factory.py
+++ b/lemur/factory.py
@@ -153,7 +153,7 @@ def configure_logging(app):
app.logger.addHandler(handler)
stream_handler = StreamHandler()
- stream_handler.setLevel(app.config.get('LOG_LEVEL'))
+ stream_handler.setLevel(app.config.get('LOG_LEVEL', 'DEBUG'))
app.logger.addHandler(stream_handler)
| Set lemur to log to stdout
When running lemur inside docker I would like to have it log everything to `stdout` so that I can forward logs to splunk. At the moment `lemur.config.py` has a `LEMUR_LOG` parameter that expects a filename. Is there a way to configure lemur to log to stdout instead of a file?
| Looks like it is configured to also stream to stdout but for some reason it does not have a predefined `LOG_LEVEL` like the file handler one: https://github.com/Netflix/lemur/blob/02991c70a9878f8ba9a10da13a18532affaa1cb5/lemur/factory.py#L155 | 2017-04-27T17:06:02 |
|
Netflix/lemur | 785 | Netflix__lemur-785 | [
"743"
] | 7257e791ff3aa9e4d02c051383b3320b2504afc9 | diff --git a/lemur/migrations/versions/5e680529b666_.py b/lemur/migrations/versions/5e680529b666_.py
--- a/lemur/migrations/versions/5e680529b666_.py
+++ b/lemur/migrations/versions/5e680529b666_.py
@@ -15,16 +15,12 @@
def upgrade():
- ### commands auto generated by Alembic - please adjust! ###
op.add_column('endpoints', sa.Column('sensitive', sa.Boolean(), nullable=True))
op.add_column('endpoints', sa.Column('source_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'endpoints', 'sources', ['source_id'], ['id'])
- ### end Alembic commands ###
def downgrade():
- ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'endpoints', type_='foreignkey')
op.drop_column('endpoints', 'source_id')
op.drop_column('endpoints', 'sensitive')
- ### end Alembic commands ###
\ No newline at end of file
diff --git a/lemur/migrations/versions/8ae67285ff14_.py b/lemur/migrations/versions/8ae67285ff14_.py
new file mode 100644
--- /dev/null
+++ b/lemur/migrations/versions/8ae67285ff14_.py
@@ -0,0 +1,24 @@
+"""Modifies the unique index for the certificate replacements
+
+Revision ID: 8ae67285ff14
+Revises: 5e680529b666
+Create Date: 2017-05-10 11:56:13.999332
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '8ae67285ff14'
+down_revision = '5e680529b666'
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade():
+ op.drop_index('certificate_replacement_associations_ix')
+ op.create_index('certificate_replacement_associations_ix', 'certificate_replacement_associations', ['replaced_certificate_id', 'certificate_id'], unique=True)
+
+
+def downgrade():
+ op.drop_index('certificate_replacement_associations_ix')
+ op.create_index('certificate_replacement_associations_ix', 'certificate_replacement_associations', ['certificate_id', 'certificate_id'], unique=True)
diff --git a/lemur/models.py b/lemur/models.py
--- a/lemur/models.py
+++ b/lemur/models.py
@@ -53,7 +53,7 @@
ForeignKey('certificates.id', ondelete='cascade'))
)
-Index('certificate_replacement_associations_ix', certificate_replacement_associations.c.certificate_id, certificate_replacement_associations.c.certificate_id)
+Index('certificate_replacement_associations_ix', certificate_replacement_associations.c.replaced_certificate_id, certificate_replacement_associations.c.certificate_id)
roles_authorities = db.Table('roles_authorities',
Column('authority_id', Integer, ForeignKey('authorities.id')),
| Replacing multiple certs causes integrity error.
When creating a new cert, if you tell Lemur it will be replacing multiple certs, you get the following error:
> (psycopg2.IntegrityError) duplicate key value violates unique constraint "certificate_replacement_associations_ix" DETAIL: Key (certificate_id, certificate_id)=(XXXXX, XXXXX) already exists. [SQL: 'INSERT INTO certificate_replacement_associations (replaced_certificate_id, certificate_id) VALUES (%(replaced_certificate_id)s, %(certificate_id)s)'] [parameters: ({'replaced_certificate_id': YYYYY, 'certificate_id': XXXXX}, {'replaced_certificate_id': ZZZ, 'certificate_id': XXXXX})]
Either the UI should restrict the replace to a single cert or the DB model should support multiple.
| This is caused by the following unique constraint:
https://github.com/Netflix/lemur/blob/master/lemur/migrations/versions/412b22cb656a_.py#L37
This shouldn't ever be triggered as the certificate creates a new id upon creation and is then associated. One possible explanation might be the way SQLAlchemy is evaluating the IDs, we might need to commit the certificate before attempting to associate the replaced certificates. | 2017-05-10T19:04:48 |
|
Netflix/lemur | 796 | Netflix__lemur-796 | [
"792"
] | 97dceb5623e243b185f261642c10bcb82dc91209 | diff --git a/lemur/domains/schemas.py b/lemur/domains/schemas.py
--- a/lemur/domains/schemas.py
+++ b/lemur/domains/schemas.py
@@ -15,7 +15,7 @@
class DomainInputSchema(LemurInputSchema):
id = fields.Integer()
name = fields.String(required=True)
- sensitive = fields.Boolean()
+ sensitive = fields.Boolean(missing=False)
certificates = fields.Nested(AssociatedCertificateSchema, many=True, missing=[])
| Adding domain fails on unselectable "sensitive"
Client side

Server side
```
May 12 09:05:48 lemur supervisord: lemur-web [2017-05-12 09:05:48,892] ERROR in schema: 'sensitive'
May 12 09:05:48 lemur supervisord: lemur-web Traceback (most recent call last):
May 12 09:05:48 lemur supervisord: lemur-web File "/var/www/lemur/lemur/common/schema.py", line 158, in decorated_function
May 12 09:05:48 lemur supervisord: lemur-web resp = f(*args, **kwargs)
May 12 09:05:48 lemur supervisord: lemur-web File "/var/www/lemur/lemur/domains/views.py", line 126, in post
May 12 09:05:48 lemur supervisord: lemur-web return service.create(data['name'], data['sensitive'])
May 12 09:05:48 lemur supervisord: lemur-web KeyError: 'sensitive'
May 12 09:05:48 lemur supervisord: lemur-web
May 12 09:05:48 lemur supervisord: lemur-web 'sensitive'
May 12 09:05:48 lemur supervisord: lemur-web Traceback (most recent call last):
May 12 09:05:48 lemur supervisord: lemur-web File "/var/www/lemur/lemur/common/schema.py", line 158, in decorated_function
May 12 09:05:48 lemur supervisord: lemur-web resp = f(*args, **kwargs)
May 12 09:05:48 lemur supervisord: lemur-web File "/var/www/lemur/lemur/domains/views.py", line 126, in post
May 12 09:05:48 lemur supervisord: lemur-web return service.create(data['name'], data['sensitive'])
May 12 09:05:48 lemur supervisord: lemur-web KeyError: 'sensitive'
```
| Made a PR for this is at #795 -- but can't search for the domain in the UI.
^^
This was just the python fix, didn't touch the UI part. | 2017-05-16T01:34:26 |
|
Netflix/lemur | 804 | Netflix__lemur-804 | [
"767"
] | 7ad471a8103823d92269b7268363aa70d69fb0d0 | diff --git a/lemur/auth/views.py b/lemur/auth/views.py
--- a/lemur/auth/views.py
+++ b/lemur/auth/views.py
@@ -164,17 +164,17 @@ def post(self):
algo = header_data['alg']
break
else:
- return dict(message='Key not found'), 403
+ return dict(message='Key not found'), 401
# validate your token based on the key it was signed with
try:
jwt.decode(id_token, secret.decode('utf-8'), algorithms=[algo], audience=args['clientId'])
except jwt.DecodeError:
- return dict(message='Token is invalid'), 403
+ return dict(message='Token is invalid'), 401
except jwt.ExpiredSignatureError:
- return dict(message='Token has expired'), 403
+ return dict(message='Token has expired'), 401
except jwt.InvalidTokenError:
- return dict(message='Token is invalid'), 403
+ return dict(message='Token is invalid'), 401
user_params = dict(access_token=access_token, schema='profile')
diff --git a/lemur/certificates/views.py b/lemur/certificates/views.py
--- a/lemur/certificates/views.py
+++ b/lemur/certificates/views.py
@@ -271,7 +271,7 @@ def post(self, data=None):
data['creator'] = g.user
return service.create(**data)
- return dict(message="You are not authorized to use {0}".format(data['authority'].name)), 403
+ return dict(message="You are not authorized to use the authority: {0}".format(data['authority'].name)), 403
class CertificatesUpload(AuthenticatedResource):
| Return error other than 401 when a user doesn't have access to issuer.
Right now we return a 401 when a user attempts to use an issuer they don't have access to. During a clone operation, if an issuer is no longer available this status code, sends the user into a login redirect loop.
| 2017-05-18T20:13:14 |
||
Netflix/lemur | 805 | Netflix__lemur-805 | [
"277"
] | 307a73c75282d7281ff136c2e6ad5b74d271c205 | diff --git a/lemur/plugins/lemur_aws/iam.py b/lemur/plugins/lemur_aws/iam.py
--- a/lemur/plugins/lemur_aws/iam.py
+++ b/lemur/plugins/lemur_aws/iam.py
@@ -53,7 +53,7 @@ def create_arn_from_cert(account_number, region, certificate_name):
@sts_client('iam')
@retry(retry_on_exception=retry_throttled, stop_max_attempt_number=7, wait_exponential_multiplier=100)
-def upload_cert(name, body, private_key, cert_chain=None, **kwargs):
+def upload_cert(name, body, private_key, path, cert_chain=None, **kwargs):
"""
Upload a certificate to AWS
@@ -61,12 +61,14 @@ def upload_cert(name, body, private_key, cert_chain=None, **kwargs):
:param body:
:param private_key:
:param cert_chain:
+ :param path:
:return:
"""
client = kwargs.pop('client')
try:
if cert_chain:
return client.upload_server_certificate(
+ Path=path,
ServerCertificateName=name,
CertificateBody=str(body),
PrivateKey=str(private_key),
@@ -74,6 +76,7 @@ def upload_cert(name, body, private_key, cert_chain=None, **kwargs):
)
else:
return client.upload_server_certificate(
+ Path=path,
ServerCertificateName=name,
CertificateBody=str(body),
PrivateKey=str(private_key)
diff --git a/lemur/plugins/lemur_aws/plugin.py b/lemur/plugins/lemur_aws/plugin.py
--- a/lemur/plugins/lemur_aws/plugin.py
+++ b/lemur/plugins/lemur_aws/plugin.py
@@ -161,6 +161,12 @@ class AWSDestinationPlugin(DestinationPlugin):
'required': True,
'validation': '/^[0-9]{12,12}$/',
'helpMessage': 'Must be a valid AWS account number!',
+ },
+ {
+ 'name': 'path',
+ 'type': 'str',
+ 'default': '/',
+ 'helpMessage': 'Path to upload certificate.'
}
]
@@ -172,6 +178,7 @@ class AWSDestinationPlugin(DestinationPlugin):
def upload(self, name, body, private_key, cert_chain, options, **kwargs):
iam.upload_cert(name, body, private_key,
+ self.get_option('path', options),
cert_chain=cert_chain,
account_number=self.get_option('accountNumber', options))
| Allow aws plugin to upload certificates to cloudfront.
Only real change needed is the ability to set the "path" parameter when uploading the certificate to IAM.
I would envision this as an option for the AWS destination plugin and have explicit sources for certificates. Additionally we may need to modify the the name of the cloudfront certificate, if the certificate is used by both ELB and cloudfront as there will be a name collision otherwise.
| 2017-05-18T20:42:07 |
||
Netflix/lemur | 818 | Netflix__lemur-818 | [
"816"
] | 249ab23df4ea78eda1abfefd8a2bdb065c5f7a88 | diff --git a/lemur/common/fields.py b/lemur/common/fields.py
--- a/lemur/common/fields.py
+++ b/lemur/common/fields.py
@@ -327,7 +327,12 @@ def _serialize(self, value, attr, obj):
name_type = 'DNSName'
elif isinstance(name, x509.IPAddress):
- name_type = 'IPAddress'
+ if isinstance(value, ipaddress.IPv4Network):
+ name_type = 'IPNetwork'
+ else:
+ name_type = 'IPAddress'
+
+ value = str(value)
elif isinstance(name, x509.UniformResourceIdentifier):
name_type = 'uniformResourceIdentifier'
| TypeError: IPv4Address('192.168.0.1') is not JSON serializable
During create new certificate with Subj Alt Name IPAddress
`2017-05-25 16:48:16,503 ERROR: Exception on /api/1/certificates [GET] [in /opt/lemur/lib/python3.4/site-packages/flask/app.py:1560]
Traceback (most recent call last):
File "/opt/lemur/lib/python3.4/site-packages/flask/app.py", line 1612, in full_dispatch_request
rv = self.dispatch_request()
File "/opt/lemur/lib/python3.4/site-packages/flask/app.py", line 1598, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/opt/lemur/lib/python3.4/site-packages/flask_restful/__init__.py", line 481, in wrapper
return self.make_response(data, code, headers=headers)
File "/opt/lemur/lib/python3.4/site-packages/flask_restful/__init__.py", line 510, in make_response
resp = self.representations[mediatype](data, *args, **kwargs)
File "/opt/lemur/lib/python3.4/site-packages/flask_restful/representations/json.py", line 20, in output_json
dumped = dumps(data, **settings) + "\n"
File "/usr/lib64/python3.4/json/__init__.py", line 230, in dumps
return _default_encoder.encode(obj)
File "/usr/lib64/python3.4/json/encoder.py", line 192, in encode
chunks = self.iterencode(o, _one_shot=True)
File "/usr/lib64/python3.4/json/encoder.py", line 250, in iterencode
return _iterencode(o, 0)
File "/usr/lib64/python3.4/json/encoder.py", line 173, in default
raise TypeError(repr(o) + " is not JSON serializable")
TypeError: IPv4Address('192.168.0.1') is not JSON serializable
`

| 2017-05-26T17:38:36 |
||
Netflix/lemur | 924 | Netflix__lemur-924 | [
"923"
] | ba29bbe3beb909d1291e132c424530d4391ae9a7 | diff --git a/lemur/manage.py b/lemur/manage.py
--- a/lemur/manage.py
+++ b/lemur/manage.py
@@ -251,7 +251,7 @@ def run(self, password):
days=days
))
- policy_service.create(days=days)
+ policy_service.create(days=days, name='default')
sys.stdout.write("[/] Done!\n")
| Missing 'default' rotation policy
When trying to create a certificate, the error message is displayed:
`{"_schema":"Unable to find <class 'lemur.policies.models.RotationPolicy'> with name: default"}`
| 2017-09-15T08:55:27 |
||
Netflix/lemur | 1,030 | Netflix__lemur-1030 | [
"990"
] | 7b8df16c9e1d71476ca99bcf995fc175117f8d5e | diff --git a/lemur/auth/views.py b/lemur/auth/views.py
--- a/lemur/auth/views.py
+++ b/lemur/auth/views.py
@@ -7,7 +7,6 @@
"""
import jwt
import base64
-import sys
import requests
from flask import Blueprint, current_app
@@ -28,6 +27,173 @@
api = Api(mod)
+def exchange_for_access_token(code, redirect_uri, client_id, secret, access_token_url=None, verify_cert=True):
+ """
+ Exchanges authorization code for access token.
+
+ :param code:
+ :param redirect_uri:
+ :param client_id:
+ :param secret:
+ :param access_token_url:
+ :param verify_cert:
+ :return:
+ :return:
+ """
+ # take the information we have received from the provider to create a new request
+ params = {
+ 'grant_type': 'authorization_code',
+ 'scope': 'openid email profile address',
+ 'code': code,
+ 'redirect_uri': redirect_uri,
+ 'client_id': client_id
+ }
+
+ # the secret and cliendId will be given to you when you signup for the provider
+ token = '{0}:{1}'.format(client_id, secret)
+
+ basic = base64.b64encode(bytes(token, 'utf-8'))
+ headers = {
+ 'Content-Type': 'application/x-www-form-urlencoded',
+ 'authorization': 'basic {0}'.format(basic.decode('utf-8'))
+ }
+
+ # exchange authorization code for access token.
+ r = requests.post(access_token_url, headers=headers, params=params, verify=verify_cert)
+ if r.status_code == 400:
+ r = requests.post(access_token_url, headers=headers, data=params, verify=verify_cert)
+ id_token = r.json()['id_token']
+ access_token = r.json()['access_token']
+
+ return id_token, access_token
+
+
+def validate_id_token(id_token, client_id, jwks_url):
+ """
+ Ensures that the token we receive is valid.
+
+ :param id_token:
+ :param client_id:
+ :param jwks_url:
+ :return:
+ """
+ # fetch token public key
+ header_data = fetch_token_header(id_token)
+
+ # retrieve the key material as specified by the token header
+ r = requests.get(jwks_url)
+ for key in r.json()['keys']:
+ if key['kid'] == header_data['kid']:
+ secret = get_rsa_public_key(key['n'], key['e'])
+ algo = header_data['alg']
+ break
+ else:
+ return dict(message='Key not found'), 401
+
+ # validate your token based on the key it was signed with
+ try:
+ jwt.decode(id_token, secret.decode('utf-8'), algorithms=[algo], audience=client_id)
+ except jwt.DecodeError:
+ return dict(message='Token is invalid'), 401
+ except jwt.ExpiredSignatureError:
+ return dict(message='Token has expired'), 401
+ except jwt.InvalidTokenError:
+ return dict(message='Token is invalid'), 401
+
+
+def retrieve_user(user_api_url, access_token):
+ """
+ Fetch user information from provided user api_url.
+
+ :param user_api_url:
+ :param access_token:
+ :return:
+ """
+ user_params = dict(access_token=access_token, schema='profile')
+
+ # retrieve information about the current user.
+ r = requests.get(user_api_url, params=user_params)
+ profile = r.json()
+
+ user = user_service.get_by_email(profile['email'])
+ metrics.send('successful_login', 'counter', 1)
+ return user, profile
+
+
+def create_user_roles(profile):
+ """Creates new roles based on profile information.
+
+ :param profile:
+ :return:
+ """
+ roles = []
+
+ # update their google 'roles'
+ for group in profile['googleGroups']:
+ role = role_service.get_by_name(group)
+ if not role:
+ role = role_service.create(group, description='This is a google group based role created by Lemur', third_party=True)
+ if not role.third_party:
+ role = role_service.set_third_party(role.id, third_party_status=True)
+ roles.append(role)
+
+ role = role_service.get_by_name(profile['email'])
+
+ if not role:
+ role = role_service.create(profile['email'], description='This is a user specific role', third_party=True)
+ if not role.third_party:
+ role = role_service.set_third_party(role.id, third_party_status=True)
+
+ roles.append(role)
+
+ # every user is an operator (tied to a default role)
+ if current_app.config.get('LEMUR_DEFAULT_ROLE'):
+ default = role_service.get_by_name(current_app.config['LEMUR_DEFAULT_ROLE'])
+ if not default:
+ default = role_service.create(current_app.config['LEMUR_DEFAULT_ROLE'], description='This is the default Lemur role.')
+ if not default.third_party:
+ role_service.set_third_party(default.id, third_party_status=True)
+ roles.append(default)
+
+ return roles
+
+
+def update_user(user, profile, roles):
+ """Updates user with current profile information and associated roles.
+
+ :param user:
+ :param profile:
+ :param roles:
+ """
+
+ # if we get an sso user create them an account
+ if not user:
+ user = user_service.create(
+ profile['email'],
+ get_psuedo_random_string(),
+ profile['email'],
+ True,
+ profile.get('thumbnailPhotoUrl'),
+ roles
+ )
+
+ else:
+ # we add 'lemur' specific roles, so they do not get marked as removed
+ for ur in user.roles:
+ if not ur.third_party:
+ roles.append(ur)
+
+ # update any changes to the user
+ user_service.update(
+ user.id,
+ profile['email'],
+ profile['email'],
+ True,
+ profile.get('thumbnailPhotoUrl'), # profile isn't google+ enabled
+ roles
+ )
+
+
class Login(Resource):
"""
Provides an endpoint for Lemur's basic authentication. It takes a username and password
@@ -140,126 +306,70 @@ def __init__(self):
self.reqparse = reqparse.RequestParser()
super(Ping, self).__init__()
- def post(self):
- self.reqparse.add_argument('clientId', type=str, required=True, location='json')
- self.reqparse.add_argument('redirectUri', type=str, required=True, location='json')
- self.reqparse.add_argument('code', type=str, required=True, location='json')
-
+ def get(self):
+ self.reqparse.add_argument('code', type=str, required=True, location='args')
args = self.reqparse.parse_args()
- # take the information we have received from the provider to create a new request
- params = {
- 'client_id': args['clientId'],
- 'grant_type': 'authorization_code',
- 'scope': 'openid email profile address',
- 'redirect_uri': args['redirectUri'],
- 'code': args['code']
- }
-
# you can either discover these dynamically or simply configure them
access_token_url = current_app.config.get('PING_ACCESS_TOKEN_URL')
user_api_url = current_app.config.get('PING_USER_API_URL')
+ client_id = current_app.config.get('PING_CLIENT_ID')
+ redirect_url = current_app.config.get('PING_REDIRECT_URI')
- # the secret and cliendId will be given to you when you signup for the provider
- token = '{0}:{1}'.format(args['clientId'], current_app.config.get("PING_SECRET"))
+ secret = current_app.config.get('PING_SECRET')
- basic = base64.b64encode(bytes(token, 'utf-8'))
- headers = {'authorization': 'basic {0}'.format(basic.decode('utf-8'))}
+ id_token, access_token = exchange_for_access_token(
+ args['code'],
+ redirect_url,
+ client_id,
+ secret,
+ access_token_url=access_token_url
+ )
- # exchange authorization code for access token.
+ jwks_url = current_app.config.get('PING_JWKS_URL')
+ validate_id_token(id_token, args['clientId'], jwks_url)
- r = requests.post(access_token_url, headers=headers, params=params)
- id_token = r.json()['id_token']
- access_token = r.json()['access_token']
+ user, profile = retrieve_user(user_api_url, access_token)
+ roles = create_user_roles(profile)
+ update_user(user, profile, roles)
- # fetch token public key
- header_data = fetch_token_header(id_token)
- jwks_url = current_app.config.get('PING_JWKS_URL')
+ if not user.active:
+ metrics.send('invalid_login', 'counter', 1)
+ return dict(message='The supplied credentials are invalid'), 403
- # retrieve the key material as specified by the token header
- r = requests.get(jwks_url)
- for key in r.json()['keys']:
- if key['kid'] == header_data['kid']:
- secret = get_rsa_public_key(key['n'], key['e'])
- algo = header_data['alg']
- break
- else:
- return dict(message='Key not found'), 401
-
- # validate your token based on the key it was signed with
- try:
- jwt.decode(id_token, secret.decode('utf-8'), algorithms=[algo], audience=args['clientId'])
- except jwt.DecodeError:
- return dict(message='Token is invalid'), 401
- except jwt.ExpiredSignatureError:
- return dict(message='Token has expired'), 401
- except jwt.InvalidTokenError:
- return dict(message='Token is invalid'), 401
-
- user_params = dict(access_token=access_token, schema='profile')
-
- # retrieve information about the current user.
- r = requests.get(user_api_url, params=user_params)
- profile = r.json()
+ # Tell Flask-Principal the identity changed
+ identity_changed.send(current_app._get_current_object(), identity=Identity(user.id))
- user = user_service.get_by_email(profile['email'])
metrics.send('successful_login', 'counter', 1)
+ return dict(token=create_token(user))
- # update their google 'roles'
- roles = []
+ def post(self):
+ self.reqparse.add_argument('clientId', type=str, required=True, location='json')
+ self.reqparse.add_argument('redirectUri', type=str, required=True, location='json')
+ self.reqparse.add_argument('code', type=str, required=True, location='json')
- for group in profile['googleGroups']:
- role = role_service.get_by_name(group)
- if not role:
- role = role_service.create(group, description='This is a google group based role created by Lemur', third_party=True)
- if not role.third_party:
- role = role_service.set_third_party(role.id, third_party_status=True)
- roles.append(role)
+ args = self.reqparse.parse_args()
- role = role_service.get_by_name(profile['email'])
+ # you can either discover these dynamically or simply configure them
+ access_token_url = current_app.config.get('PING_ACCESS_TOKEN_URL')
+ user_api_url = current_app.config.get('PING_USER_API_URL')
- if not role:
- role = role_service.create(profile['email'], description='This is a user specific role', third_party=True)
- if not role.third_party:
- role = role_service.set_third_party(role.id, third_party_status=True)
+ secret = current_app.config.get('PING_SECRET')
- roles.append(role)
+ id_token, access_token = exchange_for_access_token(
+ args['code'],
+ args['redirectUri'],
+ args['clientId'],
+ secret,
+ access_token_url=access_token_url
+ )
- # every user is an operator (tied to a default role)
- if current_app.config.get('LEMUR_DEFAULT_ROLE'):
- default = role_service.get_by_name(current_app.config['LEMUR_DEFAULT_ROLE'])
- if not default:
- default = role_service.create(current_app.config['LEMUR_DEFAULT_ROLE'], description='This is the default Lemur role.')
- if not default.third_party:
- role_service.set_third_party(default.id, third_party_status=True)
- roles.append(default)
-
- # if we get an sso user create them an account
- if not user:
- user = user_service.create(
- profile['email'],
- get_psuedo_random_string(),
- profile['email'],
- True,
- profile.get('thumbnailPhotoUrl'),
- roles
- )
+ jwks_url = current_app.config.get('PING_JWKS_URL')
+ validate_id_token(id_token, args['clientId'], jwks_url)
- else:
- # we add 'lemur' specific roles, so they do not get marked as removed
- for ur in user.roles:
- if not ur.third_party:
- roles.append(ur)
-
- # update any changes to the user
- user_service.update(
- user.id,
- profile['email'],
- profile['email'],
- True,
- profile.get('thumbnailPhotoUrl'), # profile isn't google+ enabled
- roles
- )
+ user, profile = retrieve_user(user_api_url, access_token)
+ roles = create_user_roles(profile)
+ update_user(user, profile, roles)
if not user.active:
metrics.send('invalid_login', 'counter', 1)
@@ -277,133 +387,76 @@ def __init__(self):
self.reqparse = reqparse.RequestParser()
super(OAuth2, self).__init__()
- def post(self):
- self.reqparse.add_argument('clientId', type=str, required=True, location='json')
- self.reqparse.add_argument('redirectUri', type=str, required=True, location='json')
- self.reqparse.add_argument('code', type=str, required=True, location='json')
-
+ def get(self):
+ self.reqparse.add_argument('code', type=str, required=True, location='args')
args = self.reqparse.parse_args()
- # take the information we have received from the provider to create a new request
- params = {
- 'grant_type': 'authorization_code',
- 'scope': 'openid email profile groups',
- 'redirect_uri': args['redirectUri'],
- 'code': args['code'],
- }
-
# you can either discover these dynamically or simply configure them
access_token_url = current_app.config.get('OAUTH2_ACCESS_TOKEN_URL')
user_api_url = current_app.config.get('OAUTH2_USER_API_URL')
- verify_cert = current_app.config.get('OAUTH2_VERIFY_CERT', True)
+ verify_cert = current_app.config.get('OAUTH2_VERIFY_CERT')
- # the secret and cliendId will be given to you when you signup for the provider
- token = '{0}:{1}'.format(args['clientId'], current_app.config.get("OAUTH2_SECRET"))
+ secret = current_app.config.get('OAUTH2_SECRET')
- basic = base64.b64encode(bytes(token, 'utf-8'))
+ id_token, access_token = exchange_for_access_token(
+ args['code'],
+ args['redirectUri'],
+ args['clientId'],
+ secret,
+ access_token_url=access_token_url,
+ verify_cert=verify_cert
+ )
- headers = {
- 'Content-Type': 'application/x-www-form-urlencoded',
- 'authorization': 'basic {0}'.format(basic.decode('utf-8'))
- }
+ jwks_url = current_app.config.get('PING_JWKS_URL')
+ validate_id_token(id_token, args['clientId'], jwks_url)
- # exchange authorization code for access token.
- # Try Params first
- r = requests.post(access_token_url, headers=headers, params=params, verify=verify_cert)
- if r.status_code == 400:
- r = requests.post(access_token_url, headers=headers, data=params, verify=verify_cert)
- id_token = r.json()['id_token']
- access_token = r.json()['access_token']
-
- # fetch token public key
- header_data = fetch_token_header(id_token)
- jwks_url = current_app.config.get('OAUTH2_JWKS_URL')
-
- # retrieve the key material as specified by the token header
- r = requests.get(jwks_url, verify=verify_cert)
- for key in r.json()['keys']:
- if key['kid'] == header_data['kid']:
- secret = get_rsa_public_key(key['n'], key['e'])
- algo = header_data['alg']
- break
- else:
- return dict(message='Key not found'), 401
-
- # validate your token based on the key it was signed with
- try:
- if sys.version_info >= (3, 0):
- jwt.decode(id_token, secret.decode('utf-8'), algorithms=[algo], audience=args['clientId'])
- else:
- jwt.decode(id_token, secret, algorithms=[algo], audience=args['clientId'])
- except jwt.DecodeError:
- return dict(message='Token is invalid'), 401
- except jwt.ExpiredSignatureError:
- return dict(message='Token has expired'), 401
- except jwt.InvalidTokenError:
- return dict(message='Token is invalid'), 401
-
- headers = {'authorization': 'Bearer {0}'.format(access_token)}
-
- # retrieve information about the current user.
- r = requests.get(user_api_url, headers=headers, verify=verify_cert)
- profile = r.json()
+ user, profile = retrieve_user(user_api_url, access_token)
+ roles = create_user_roles(profile)
+ update_user(user, profile, roles)
+
+ if not user.active:
+ metrics.send('invalid_login', 'counter', 1)
+ return dict(message='The supplied credentials are invalid'), 403
+
+ # Tell Flask-Principal the identity changed
+ identity_changed.send(current_app._get_current_object(), identity=Identity(user.id))
- user = user_service.get_by_email(profile['email'])
metrics.send('successful_login', 'counter', 1)
+ return dict(token=create_token(user))
- # update with roles sent by identity provider
- roles = []
+ def post(self):
+ self.reqparse.add_argument('clientId', type=str, required=True, location='json')
+ self.reqparse.add_argument('redirectUri', type=str, required=True, location='json')
+ self.reqparse.add_argument('code', type=str, required=True, location='json')
- if 'roles' in profile:
- for group in profile['roles']:
- role = role_service.get_by_name(group)
- if not role:
- role = role_service.create(group, description='This is a group configured by identity provider', third_party=True)
- if not role.third_party:
- role = role_service.set_third_party(role.id, third_party_status=True)
- roles.append(role)
+ args = self.reqparse.parse_args()
- role = role_service.get_by_name(profile['email'])
- if not role:
- role = role_service.create(profile['email'], description='This is a user specific role', third_party=True)
- if not role.third_party:
- role = role_service.set_third_party(role.id, third_party_status=True)
- roles.append(role)
+ # you can either discover these dynamically or simply configure them
+ access_token_url = current_app.config.get('OAUTH2_ACCESS_TOKEN_URL')
+ user_api_url = current_app.config.get('OAUTH2_USER_API_URL')
+ verify_cert = current_app.config.get('OAUTH2_VERIFY_CERT')
- # if we get an sso user create them an account
- if not user:
- # every user is an operator (tied to a default role)
- if current_app.config.get('LEMUR_DEFAULT_ROLE'):
- v = role_service.get_by_name(current_app.config.get('LEMUR_DEFAULT_ROLE'))
- if not v.third_party:
- v = role_service.set_third_party(v.id, third_party_status=True)
- if v:
- roles.append(v)
-
- user = user_service.create(
- profile['name'],
- get_psuedo_random_string(),
- profile['email'],
- True,
- profile.get('thumbnailPhotoUrl'),
- roles
- )
+ secret = current_app.config.get('OAUTH2_SECRET')
- else:
- # we add 'lemur' specific roles, so they do not get marked as removed
- for ur in user.roles:
- if not ur.third_party:
- roles.append(ur)
-
- # update any changes to the user
- user_service.update(
- user.id,
- profile['name'],
- profile['email'],
- True,
- profile.get('thumbnailPhotoUrl'), # incase profile isn't google+ enabled
- roles
- )
+ id_token, access_token = exchange_for_access_token(
+ args['code'],
+ args['redirectUri'],
+ args['clientId'],
+ secret,
+ access_token_url=access_token_url,
+ verify_cert=verify_cert
+ )
+
+ jwks_url = current_app.config.get('PING_JWKS_URL')
+ validate_id_token(id_token, args['clientId'], jwks_url)
+
+ user, profile = retrieve_user(user_api_url, access_token)
+ roles = create_user_roles(profile)
+ update_user(user, profile, roles)
+
+ if not user.active:
+ metrics.send('invalid_login', 'counter', 1)
+ return dict(message='The supplied credentials are invalid'), 403
# Tell Flask-Principal the identity changed
identity_changed.send(current_app._get_current_object(), identity=Identity(user.id))
| diff --git a/lemur/tests/test_certificates.py b/lemur/tests/test_certificates.py
--- a/lemur/tests/test_certificates.py
+++ b/lemur/tests/test_certificates.py
@@ -230,7 +230,7 @@ def test_certificate_valid_years(client, authority):
'owner': '[email protected]',
'authority': {'id': authority.id},
'description': 'testtestest',
- 'validityYears': 2
+ 'validityYears': 1
}
data, errors = CertificateInputSchema().load(input_data)
| Allow both GET and POST for auth endpoints
Whether GET or POST is sent to our endpoints we should correctly handle the callback.
This is particularly a problem on newer versions of FF.
| 2018-01-02T02:59:32 |
|
Netflix/lemur | 1,170 | Netflix__lemur-1170 | [
"969"
] | fb494bc32a189316587d40ab2e2d234df4357ace | diff --git a/lemur/plugins/lemur_sftp/__init__.py b/lemur/plugins/lemur_sftp/__init__.py
new file mode 100644
--- /dev/null
+++ b/lemur/plugins/lemur_sftp/__init__.py
@@ -0,0 +1,5 @@
+try:
+ VERSION = __import__('pkg_resources') \
+ .get_distribution(__name__).version
+except Exception as e:
+ VERSION = 'unknown'
diff --git a/lemur/plugins/lemur_sftp/plugin.py b/lemur/plugins/lemur_sftp/plugin.py
new file mode 100644
--- /dev/null
+++ b/lemur/plugins/lemur_sftp/plugin.py
@@ -0,0 +1,179 @@
+"""
+.. module: lemur.plugins.lemur_sftp.plugin
+ :platform: Unix
+ :synopsis: Allow the uploading of certificates to SFTP.
+ :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more
+ :license: Apache, see LICENSE for more details.
+
+ Allow the uploading of certificates to SFTP.
+
+ NGINX and Apache export formats are supported.
+
+ Password and RSA private key are supported.
+ Passwords are not encrypted and stored as a plain text.
+
+ Detailed logging when Lemur debug mode is enabled.
+
+.. moduleauthor:: Dmitry Zykov https://github.com/DmitryZykov
+"""
+
+import paramiko
+
+from flask import current_app
+from lemur.plugins import lemur_sftp
+from lemur.common.defaults import common_name
+from lemur.common.utils import parse_certificate
+from lemur.plugins.bases import DestinationPlugin
+
+
+class SFTPDestinationPlugin(DestinationPlugin):
+ title = 'SFTP'
+ slug = 'sftp-destination'
+ description = 'Allow the uploading of certificates to SFTP'
+ version = lemur_sftp.VERSION
+
+ author = 'Dmitry Zykov'
+ author_url = 'https://github.com/DmitryZykov'
+
+ options = [
+ {
+ 'name': 'host',
+ 'type': 'str',
+ 'required': True,
+ 'helpMessage': 'The SFTP host.'
+ },
+ {
+ 'name': 'port',
+ 'type': 'int',
+ 'required': True,
+ 'helpMessage': 'The SFTP port, default is 22.',
+ 'validation': '^(6553[0-5]|655[0-2][0-9]\d|65[0-4](\d){2}|6[0-4](\d){3}|[1-5](\d){4}|[1-9](\d){0,3})',
+ 'default': '22'
+ },
+ {
+ 'name': 'user',
+ 'type': 'str',
+ 'required': True,
+ 'helpMessage': 'The SFTP user. Default is root.',
+ 'default': 'root'
+ },
+ {
+ 'name': 'password',
+ 'type': 'str',
+ 'required': False,
+ 'helpMessage': 'The SFTP password (optional when the private key is used).',
+ 'default': None
+ },
+ {
+ 'name': 'privateKeyPath',
+ 'type': 'str',
+ 'required': False,
+ 'helpMessage': 'The path to the RSA private key on the Lemur server (optional).',
+ 'default': None
+ },
+ {
+ 'name': 'privateKeyPass',
+ 'type': 'str',
+ 'required': False,
+ 'helpMessage': 'The password for the encrypted RSA private key (optional).',
+ 'default': None
+ },
+ {
+ 'name': 'destinationPath',
+ 'type': 'str',
+ 'required': True,
+ 'helpMessage': 'The SFTP path where certificates will be uploaded.',
+ 'default': '/etc/nginx/certs'
+ },
+ {
+ 'name': 'exportFormat',
+ 'required': True,
+ 'value': 'NGINX',
+ 'helpMessage': 'The export format for certificates.',
+ 'type': 'select',
+ 'available': [
+ 'NGINX',
+ 'Apache'
+ ]
+ }
+ ]
+
+ def upload(self, name, body, private_key, cert_chain, options, **kwargs):
+
+ current_app.logger.debug('SFTP destination plugin is started')
+
+ cn = common_name(parse_certificate(body))
+ host = self.get_option('host', options)
+ port = self.get_option('port', options)
+ user = self.get_option('user', options)
+ password = self.get_option('password', options)
+ ssh_priv_key = self.get_option('privateKeyPath', options)
+ ssh_priv_key_pass = self.get_option('privateKeyPass', options)
+ dst_path = self.get_option('destinationPath', options)
+ export_format = self.get_option('exportFormat', options)
+
+ # prepare files for upload
+ files = {cn + '.key': private_key,
+ cn + '.pem': body}
+
+ if cert_chain:
+ if export_format == 'NGINX':
+ # assemble body + chain in the single file
+ files[cn + '.pem'] += '\n' + cert_chain
+
+ elif export_format == 'Apache':
+ # store chain in the separate file
+ files[cn + '.ca.bundle.pem'] = cert_chain
+
+ # upload files
+ try:
+ current_app.logger.debug('Connecting to {0}@{1}:{2}'.format(user, host, port))
+ ssh = paramiko.SSHClient()
+
+ # allow connection to the new unknown host
+ ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+
+ # open the ssh connection
+ if password:
+ current_app.logger.debug('Using password')
+ ssh.connect(host, username=user, port=port, password=password)
+ elif ssh_priv_key:
+ current_app.logger.debug('Using RSA private key')
+ pkey = paramiko.RSAKey.from_private_key_file(ssh_priv_key, ssh_priv_key_pass)
+ ssh.connect(host, username=user, port=port, pkey=pkey)
+ else:
+ current_app.logger.error("No password or private key provided. Can't proceed")
+ raise paramiko.ssh_exception.AuthenticationException
+
+ # open the sftp session inside the ssh connection
+ sftp = ssh.open_sftp()
+
+ # make sure that the destination path exist
+ try:
+ current_app.logger.debug('Creating {0}'.format(dst_path))
+ sftp.mkdir(dst_path)
+ except IOError:
+ current_app.logger.debug('{0} already exist, resuming'.format(dst_path))
+ try:
+ dst_path_cn = dst_path + '/' + cn
+ current_app.logger.debug('Creating {0}'.format(dst_path_cn))
+ sftp.mkdir(dst_path_cn)
+ except IOError:
+ current_app.logger.debug('{0} already exist, resuming'.format(dst_path_cn))
+
+ # upload certificate files to the sftp destination
+ for filename, data in files.items():
+ current_app.logger.debug('Uploading {0} to {1}'.format(filename, dst_path_cn))
+ with sftp.open(dst_path_cn + '/' + filename, 'w') as f:
+ f.write(data)
+ # read only for owner, -r--------
+ sftp.chmod(dst_path_cn + '/' + filename, 0o400)
+
+ ssh.close()
+
+ except Exception as e:
+ current_app.logger.error('ERROR in {0}: {1}'.format(e.__class__, e))
+ try:
+ ssh.close()
+ except BaseException:
+ pass
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -147,7 +147,8 @@ def run(self):
'digicert_issuer = lemur.plugins.lemur_digicert.plugin:DigiCertIssuerPlugin',
'digicert_cis_issuer = lemur.plugins.lemur_digicert.plugin:DigiCertCISIssuerPlugin',
'digicert_cis_source = lemur.plugins.lemur_digicert.plugin:DigiCertCISSourcePlugin',
- 'csr_export = lemur.plugins.lemur_csr.plugin:CSRExportPlugin'
+ 'csr_export = lemur.plugins.lemur_csr.plugin:CSRExportPlugin',
+ 'sftp_destination = lemur.plugins.lemur_sftp.plugin:SFTPDestinationPlugin'
],
},
classifiers=[
| Destination: Linux Host: Fails to copy to host. "Private key file is encrypted"
Even when the private key password is provided in the destination plugin configuration the copy fails. The workaround is to comment out the lines referenced below. The intention of these lines are to check if a private key password is present or not and return None if there is not one given.
https://github.com/Netflix/lemur/blob/e43268f58504bdecc98e591dc0c7f3d8731ec897/lemur/plugins/lemur_linuxdst/plugin.py#L79-L80
```
Private key file is encrypted
Traceback (most recent call last):
File "/www/lemur/lemur/certificates/models.py", line 355, in update_destinations
destination_plugin.upload(target.name, target.body, target.private_key, target.chain, value.options)
File "/www/lemur/lemur/plugins/lemur_linuxdst/plugin.py", line 82, in upload
remote_host.create_cert(name, dst_dir, export_type, dst_user, dst_priv, dst_priv_key, dst_host, int(dst_host_port))
File "/www/lemur/lemur/plugins/lemur_linuxdst/remote_host.py", line 65, in create_cert
copy_cert(lem_cert.cn, dst_user, dst_priv, dst_priv_key, dst_host, dst_host_port, dst_dir, dst_file, dst_data)
File "/www/lemur/lemur/plugins/lemur_linuxdst/remote_host.py", line 14, in copy_cert
priv_key = paramiko.RSAKey.from_private_key_file(dst_priv)
File "/www/lemur/lib/python3.5/site-packages/paramiko/pkey.py", line 205, in from_private_key_file
key = cls(filename=filename, password=password)
File "/www/lemur/lib/python3.5/site-packages/paramiko/rsakey.py", line 47, in __init__
self._from_private_key_file(filename, password)
File "/www/lemur/lib/python3.5/site-packages/paramiko/rsakey.py", line 172, in _from_private_key_file
data = self._read_private_key_file('RSA', filename, password)
File "/www/lemur/lib/python3.5/site-packages/paramiko/pkey.py", line 278, in _read_private_key_file
data = self._read_private_key(tag, f, password)
File "/www/lemur/lib/python3.5/site-packages/paramiko/pkey.py", line 325, in _read_private_key
raise PasswordRequiredException('Private key file is encrypted')
paramiko.ssh_exception.PasswordRequiredException: Private key file is encrypted
```
| 2018-04-03T11:41:36 |
||
Netflix/lemur | 1,187 | Netflix__lemur-1187 | [
"1072"
] | 0b5f85469cda856c01c964eb2059da4b3e1b0024 | diff --git a/lemur/plugins/lemur_digicert/plugin.py b/lemur/plugins/lemur_digicert/plugin.py
--- a/lemur/plugins/lemur_digicert/plugin.py
+++ b/lemur/plugins/lemur_digicert/plugin.py
@@ -157,7 +157,7 @@ def map_cis_fields(options, csr):
"csr": csr,
"signature_hash": signature_hash(options.get('signing_algorithm')),
"validity": {
- "valid_to": options['validity_end'].format('YYYY-MM-DD')
+ "valid_to": options['validity_end'].format('YYYY-MM-DDTHH:MM:SSZ')
},
"organization": {
"name": options['organization'],
| diff --git a/lemur/plugins/lemur_digicert/tests/test_digicert.py b/lemur/plugins/lemur_digicert/tests/test_digicert.py
--- a/lemur/plugins/lemur_digicert/tests/test_digicert.py
+++ b/lemur/plugins/lemur_digicert/tests/test_digicert.py
@@ -103,7 +103,7 @@ def test_map_cis_fields(app):
'signature_hash': 'sha256',
'organization': {'name': 'Example, Inc.', 'units': ['Example Org']},
'validity': {
- 'valid_to': arrow.get(2017, 5, 7).format('YYYY-MM-DD')
+ 'valid_to': arrow.get(2017, 5, 7).format('YYYY-MM-DDTHH:MM:SSZ')
},
'profile_name': None
}
@@ -132,7 +132,7 @@ def test_map_cis_fields(app):
'signature_hash': 'sha256',
'organization': {'name': 'Example, Inc.', 'units': ['Example Org']},
'validity': {
- 'valid_to': arrow.get(2018, 11, 3).format('YYYY-MM-DD')
+ 'valid_to': arrow.get(2018, 11, 3).format('YYYY-MM-DDTHH:MM:SSZ')
},
'profile_name': None
}
| Allow Digicert CIS to specify end time as well as end date
Currently we truncate the the times sent to digicert, we should respect the times given to us:
https://github.com/Netflix/lemur/blob/master/lemur/plugins/lemur_digicert/plugin.py#L160
Valid format for `valid_to` is `YYYY-MM-DDTHH:MM:SSZ`
cc @andrewachen
| 2018-04-10T19:51:33 |
|
Netflix/lemur | 1,223 | Netflix__lemur-1223 | [
"1046"
] | 91500d10221963330fcee82f4212264e7475e2e7 | diff --git a/lemur/manage.py b/lemur/manage.py
--- a/lemur/manage.py
+++ b/lemur/manage.py
@@ -251,12 +251,17 @@ def run(self, password):
recipients = current_app.config.get('LEMUR_SECURITY_TEAM_EMAIL')
notification_service.create_default_expiration_notifications("DEFAULT_SECURITY", recipients=recipients)
- days = current_app.config.get("LEMUR_DEFAULT_ROTATION_INTERVAL", 30)
- sys.stdout.write("[+] Creating default certificate rotation policy of {days} days before issuance.\n".format(
- days=days
- ))
+ _DEFAULT_ROTATION_INTERVAL = 'default'
+ default_rotation_interval = policy_service.get_by_name(_DEFAULT_ROTATION_INTERVAL)
+
+ if default_rotation_interval:
+ sys.stdout.write("[-] Default rotation interval policy already created, skipping...!\n")
+ else:
+ days = current_app.config.get("LEMUR_DEFAULT_ROTATION_INTERVAL", 30)
+ sys.stdout.write("[+] Creating default certificate rotation policy of {days} days before issuance.\n".format(
+ days=days))
+ policy_service.create(days=days, name=_DEFAULT_ROTATION_INTERVAL)
- policy_service.create(days=days, name='default')
sys.stdout.write("[/] Done!\n")
diff --git a/lemur/policies/service.py b/lemur/policies/service.py
--- a/lemur/policies/service.py
+++ b/lemur/policies/service.py
@@ -18,6 +18,15 @@ def get(policy_id):
return database.get(RotationPolicy, policy_id)
+def get_by_name(policy_name):
+ """
+ Retrieves policy by its name.
+ :param policy_name:
+ :return:
+ """
+ return database.get_all(RotationPolicy, policy_name, field='name').all()
+
+
def delete(policy_id):
"""
Delete a rotation policy.
| Running init more than once will create multiple 'default' rotation policies
When you run `init`, it checks for default roles, default users, etc. and skips creating them if those are found. However, default rotation policy ('default': 30) is created and inserted into the database every time. This causes an issue when you try to create a certificate without a rotation policy, then the default rotation policy is used (named: "default"). This commonly occurs if you use lemur-docker to test, and then only restart the web container and keep the existing lemur-postgres container with the existing data.
The fix is to either go into the DB and remove the duplicate. We should check if 'default' already exists (name should probably be set to unique as well).
| This makes sense, rotation policy was added quite a bit after the init stuff was done. I may have missed checking for existence of the policy. | 2018-04-24T22:52:13 |
|
Netflix/lemur | 1,225 | Netflix__lemur-1225 | [
"1224"
] | ae638086782fa346d85b602310d19d39d82209e1 | diff --git a/lemur/manage.py b/lemur/manage.py
--- a/lemur/manage.py
+++ b/lemur/manage.py
@@ -208,16 +208,16 @@ def run(self, password):
if operator_role:
sys.stdout.write("[-] Operator role already created, skipping...!\n")
else:
- # we create an admin role
+ # we create an operator role
operator_role = role_service.create('operator', description='This is the Lemur operator role.')
sys.stdout.write("[+] Created 'operator' role\n")
read_only_role = role_service.get_by_name('read-only')
if read_only_role:
- sys.stdout.write("[-] Operator role already created, skipping...!\n")
+ sys.stdout.write("[-] Read only role already created, skipping...!\n")
else:
- # we create an admin role
+ # we create an read only role
read_only_role = role_service.create('read-only', description='This is the Lemur read only role.')
sys.stdout.write("[+] Created 'read-only' role\n")
| Minor Comment Stdout Issues in InitializeApp()
| 2018-04-24T23:30:40 |
||
Netflix/lemur | 1,227 | Netflix__lemur-1227 | [
"1226"
] | e09b7eb97860b69e40493b3eb528e57e823dba30 | diff --git a/lemur/manage.py b/lemur/manage.py
--- a/lemur/manage.py
+++ b/lemur/manage.py
@@ -237,9 +237,6 @@ def run(self, password):
else:
sys.stdout.write("[-] Default user has already been created, skipping...!\n")
- sys.stdout.write("[+] Creating expiration email notifications!\n")
- sys.stdout.write("[!] Using {0} as specified by LEMUR_SECURITY_TEAM_EMAIL for notifications\n".format("LEMUR_SECURITY_TEAM_EMAIL"))
-
intervals = current_app.config.get("LEMUR_DEFAULT_EXPIRATION_NOTIFICATION_INTERVALS", [])
sys.stdout.write(
"[!] Creating {num} notifications for {intervals} days as specified by LEMUR_DEFAULT_EXPIRATION_NOTIFICATION_INTERVALS\n".format(
@@ -249,6 +246,8 @@ def run(self, password):
)
recipients = current_app.config.get('LEMUR_SECURITY_TEAM_EMAIL')
+ sys.stdout.write("[+] Creating expiration email notifications!\n")
+ sys.stdout.write("[!] Using {0} as specified by LEMUR_SECURITY_TEAM_EMAIL for notifications\n".format(recipients))
notification_service.create_default_expiration_notifications("DEFAULT_SECURITY", recipients=recipients)
_DEFAULT_ROTATION_INTERVAL = 'default'
| Variable Name Printed Instead of Value
https://github.com/Netflix/lemur/blob/e09b7eb97860b69e40493b3eb528e57e823dba30/lemur/manage.py#L241
| 2018-04-26T15:47:37 |
||
Netflix/lemur | 1,231 | Netflix__lemur-1231 | [
"283"
] | 4da2f33892a2a014b91ec82a376e219d6170b33f | diff --git a/lemur/certificates/views.py b/lemur/certificates/views.py
--- a/lemur/certificates/views.py
+++ b/lemur/certificates/views.py
@@ -300,12 +300,14 @@ def post(self, data=None):
{
"owner": "[email protected]",
- "publicCert": "-----BEGIN CERTIFICATE-----...",
- "intermediateCert": "-----BEGIN CERTIFICATE-----...",
+ "body": "-----BEGIN CERTIFICATE-----...",
+ "chain": "-----BEGIN CERTIFICATE-----...",
"privateKey": "-----BEGIN RSA PRIVATE KEY-----..."
"destinations": [],
"notifications": [],
"replacements": [],
+ "roles": [],
+ "notify": true,
"name": "cert1"
}
| Uploading certificate
Hi Team,
How can we upload / import a certificate to Lemur using APIs? I am not that much good in python scripting. Could you please guide me how can we achieve this?
| Please see the documentation at:
http://lemur.readthedocs.org/en/latest/developer/index.html#lemur.certificates.views.CertificatesUpload
You can use whatever language would like to talk to the API.
Hi Kevgliss,
Thanks for sharing the documentation, but unfortunately I am not that much familiar with python scripting. As a first step I tried to create a script for Authentication (http://lemur.readthedocs.org/en/latest/developer/index.html#post--auth-login). I am getting an output of 405. Could you please let me know how can we fix the issue with the script first and later will trying with uploading.
**Script created**
#!/usr/bin/python
import requests
response = requests.request("POST",
"http://10.1.1.1/#/login",
data={'username': "lemur", 'password': "adsadsad"},
headers={'content-type': 'application/json'}
)
print response
**Output getting**
> > > print response
> > > Response [405]
You are using the wrong url, what you see in the browser is not the same as the api as its all javascript. I think you want something like: `http://10.1.1.1/api/1/auth/login`.
Hi Mate,
Thanks for the update and that was informative. I am trying to develop a python script, but now the result is showing as Response [400] and the Chrome postmaster plug in giving json format output as
{
"status": 400,
"message": "Bad Request"
}
In Text Format we are getting an output as given below,
{
"message": {
"username": "Missing required parameter in the JSON body"
}
}
Could you please let me know how can we fix this issue?
The updated code was
#!/usr/bin/python
import requests
response = requests.request("POST",
"http://10.1.1.1/api/1/auth/login",
data={'username': "lemur", 'password': "adsadsad"},
headers={'content-type': 'application/json'}
)
print response
The data you send to Lemur needs to be in json format.
try:
```
#!/usr/bin/python
import json
import requests
response = requests.request("POST",
"http://10.1.1.1/api/1/auth/login",
data=json.dumps({'username': "lemur", 'password': "adsadsad"}),
headers={'content-type': 'application/json'}
)
```
Hi kevgliss,
The script is seems to be providing 200 response. As per the documentation (http://lemur.readthedocs.org/en/latest/developer/index.html#post--auth-login) we will be getting a token as output, but while executing this script. Could you please let us know how can we get that?
Hi kevgliss,
Thanks for your guide lines I was able to create script to upload certificate to Lemur. Please find the script below. please go through it and let know if we can improve it some how.
#!/usr/bin/python
import json
import requests
login = requests.request("POST",
"http://10.1.1.1/api/1/auth/login",
data=json.dumps({'username': "lemur", 'password': "admin"}),
headers={'content-type': 'application/json'}
)
login.json()["token"]
print login.json()
Auth = {'Authorization': 'token %s' %login.json()["token"], 'content-type': 'application/json'}
rootcert = open('/home/user/scripts/spk.pem', 'rw')
privatecert = open('/home/user/scripts/private.key', 'rw')
upload = requests.request("POST",
"http://10.1.1.1/api/1/certificates/upload",
data=json.dumps({"owner": "[email protected]", "publicCert": rootcert.read(), "privateKey": privatecert.read(), "destinations": [], "notifications": [], "replacements": [], "name": "Final Test" }),
headers=Auth
)
print upload.json()
Looks good to me! Glad you got it working.
Above script didn't work for me. It threw below error.
{"reasons": {"body": "Missing data for required field."}, "message": "Validation Error."}
I had to use "body" instead of "publicCert" and "chain" instead of "intermediateCert" to make it working.
Also in the official lemur documentation
(http://lemur.readthedocs.io/en/latest/developer/index.html#post--auth-login), the example for "lemur.certificates.views.CertificatesUpload" is completely misleading. | 2018-05-08T16:31:19 |
|
Netflix/lemur | 2,646 | Netflix__lemur-2646 | [
"2636"
] | 721fb8ec70c929fd88ccb837c166a7a572588b47 | diff --git a/lemur/__init__.py b/lemur/__init__.py
--- a/lemur/__init__.py
+++ b/lemur/__init__.py
@@ -62,8 +62,8 @@
)
-def create_app(config=None):
- app = factory.create_app(app_name=__name__, blueprints=LEMUR_BLUEPRINTS, config=config)
+def create_app(config_path=None):
+ app = factory.create_app(app_name=__name__, blueprints=LEMUR_BLUEPRINTS, config=config_path)
configure_hook(app)
return app
diff --git a/lemur/manage.py b/lemur/manage.py
--- a/lemur/manage.py
+++ b/lemur/manage.py
@@ -50,7 +50,7 @@
from lemur.dns_providers.models import DnsProvider # noqa
manager = Manager(create_app)
-manager.add_option('-c', '--config', dest='config')
+manager.add_option('-c', '--config', dest='config_path', required=False)
migrate = Migrate(create_app)
@@ -391,7 +391,7 @@ def run(self, *args, **kwargs):
# run startup tasks on an app like object
validate_conf(current_app, REQUIRED_VARIABLES)
- app.app_uri = 'lemur:create_app(config="{0}")'.format(current_app.config.get('CONFIG_PATH'))
+ app.app_uri = 'lemur:create_app(config_path="{0}")'.format(current_app.config.get('CONFIG_PATH'))
return app.run()
| diff --git a/lemur/tests/conftest.py b/lemur/tests/conftest.py
--- a/lemur/tests/conftest.py
+++ b/lemur/tests/conftest.py
@@ -43,7 +43,7 @@ def app(request):
Creates a new Flask application for a test duration.
Uses application factory `create_app`.
"""
- _app = create_app(os.path.dirname(os.path.realpath(__file__)) + '/conf.py')
+ _app = create_app(config_path=os.path.dirname(os.path.realpath(__file__)) + '/conf.py')
ctx = _app.app_context()
ctx.push()
| Lemur Ignores -c Option
When starting lemur with -c (ex: `lemur start -c /etc/lemur.conf.py`), the value passed into -c is silently ignored.
| Looks similar to #1887 have you tried `--config`? Could be just the sort name not being parsed.
Just tested with `--config`, same result. The only way that I can get it to use the config file is to have it in `~/.lemur/lemur.cfg.py` (or symlink the config file to that location).
Interestingly, if the file passed to `-c` or `--config` is not readable due to permissions, lemur will die because it failed to read the config file... so at some point it is attempting to at least open the configuration file passed in on the command line. It is not actually using the settings in that file though, only the one in the users home directory. | 2019-03-05T23:40:15 |
Netflix/lemur | 3,153 | Netflix__lemur-3153 | [
"3152"
] | c65386a8a8121d0ea96a58e01263ba4adab6f3d5 | diff --git a/lemur/authorities/service.py b/lemur/authorities/service.py
--- a/lemur/authorities/service.py
+++ b/lemur/authorities/service.py
@@ -39,6 +39,22 @@ def update(authority_id, description, owner, active, roles):
return database.update(authority)
+def update_options(authority_id, options):
+ """
+ Update an authority with new options.
+
+ :param authority_id:
+ :param options: the new options to be saved into the authority
+ :return:
+ """
+
+ authority = get(authority_id)
+
+ authority.options = options
+
+ return database.update(authority)
+
+
def mint(**kwargs):
"""
Creates the authority based on the plugin provided.
diff --git a/lemur/plugins/lemur_acme/plugin.py b/lemur/plugins/lemur_acme/plugin.py
--- a/lemur/plugins/lemur_acme/plugin.py
+++ b/lemur/plugins/lemur_acme/plugin.py
@@ -32,6 +32,7 @@
from lemur.plugins import lemur_acme as acme
from lemur.plugins.bases import IssuerPlugin
from lemur.plugins.lemur_acme import cloudflare, dyn, route53, ultradns, powerdns
+from lemur.authorities import service as authorities_service
from retrying import retry
@@ -240,6 +241,7 @@ def setup_acme_client(self, authority):
existing_regr = options.get("acme_regr", current_app.config.get("ACME_REGR"))
if existing_key and existing_regr:
+ current_app.logger.debug("Reusing existing ACME account")
# Reuse the same account for each certificate issuance
key = jose.JWK.json_loads(existing_key)
regr = messages.RegistrationResource.json_loads(existing_regr)
@@ -253,6 +255,7 @@ def setup_acme_client(self, authority):
# Create an account for each certificate issuance
key = jose.JWKRSA(key=generate_private_key("RSA2048"))
+ current_app.logger.debug("Creating a new ACME account")
current_app.logger.debug(
"Connecting with directory at {0}".format(directory_url)
)
@@ -262,6 +265,27 @@ def setup_acme_client(self, authority):
registration = client.new_account_and_tos(
messages.NewRegistration.from_data(email=email)
)
+
+ # if store_account is checked, add the private_key and registration resources to the options
+ if options['store_account']:
+ new_options = json.loads(authority.options)
+ # the key returned by fields_to_partial_json is missing the key type, so we add it manually
+ key_dict = key.fields_to_partial_json()
+ key_dict["kty"] = "RSA"
+ acme_private_key = {
+ "name": "acme_private_key",
+ "value": json.dumps(key_dict)
+ }
+ new_options.append(acme_private_key)
+
+ acme_regr = {
+ "name": "acme_regr",
+ "value": json.dumps({"body": {}, "uri": registration.uri})
+ }
+ new_options.append(acme_regr)
+
+ authorities_service.update_options(authority.id, options=json.dumps(new_options))
+
current_app.logger.debug("Connected: {0}".format(registration.uri))
return client, registration
@@ -447,6 +471,13 @@ class ACMEIssuerPlugin(IssuerPlugin):
"validation": "/^-----BEGIN CERTIFICATE-----/",
"helpMessage": "Certificate to use",
},
+ {
+ "name": "store_account",
+ "type": "bool",
+ "required": False,
+ "helpMessage": "Disable to create a new account for each ACME request",
+ "default": False,
+ }
]
def __init__(self, *args, **kwargs):
| diff --git a/lemur/plugins/lemur_acme/tests/test_acme.py b/lemur/plugins/lemur_acme/tests/test_acme.py
--- a/lemur/plugins/lemur_acme/tests/test_acme.py
+++ b/lemur/plugins/lemur_acme/tests/test_acme.py
@@ -1,8 +1,10 @@
import unittest
from unittest.mock import patch, Mock
+import josepy as jose
from cryptography.x509 import DNSName
from lemur.plugins.lemur_acme import plugin
+from lemur.common.utils import generate_private_key
from mock import MagicMock
@@ -165,11 +167,65 @@ def test_setup_acme_client_fail(self):
with self.assertRaises(Exception):
self.acme.setup_acme_client(mock_authority)
+ @patch("lemur.plugins.lemur_acme.plugin.jose.JWK.json_loads")
@patch("lemur.plugins.lemur_acme.plugin.BackwardsCompatibleClientV2")
@patch("lemur.plugins.lemur_acme.plugin.current_app")
- def test_setup_acme_client_success(self, mock_current_app, mock_acme):
+ def test_setup_acme_client_success_load_account_from_authority(self, mock_current_app, mock_acme, mock_key_json_load):
mock_authority = Mock()
- mock_authority.options = '[{"name": "mock_name", "value": "mock_value"}]'
+ mock_authority.id = 2
+ mock_authority.options = '[{"name": "mock_name", "value": "mock_value"}, ' \
+ '{"name": "store_account", "value": true},' \
+ '{"name": "acme_private_key", "value": "{\\"n\\": \\"PwIOkViO\\", \\"kty\\": \\"RSA\\"}"}, ' \
+ '{"name": "acme_regr", "value": "{\\"body\\": {}, \\"uri\\": \\"http://test.com\\"}"}]'
+ mock_client = Mock()
+ mock_acme.return_value = mock_client
+ mock_current_app.config = {}
+
+ mock_key_json_load.return_value = jose.JWKRSA(key=generate_private_key("RSA2048"))
+
+ result_client, result_registration = self.acme.setup_acme_client(mock_authority)
+
+ mock_acme.new_account_and_tos.assert_not_called()
+ assert result_client
+ assert not result_registration
+
+ @patch("lemur.plugins.lemur_acme.plugin.jose.JWKRSA.fields_to_partial_json")
+ @patch("lemur.plugins.lemur_acme.plugin.authorities_service")
+ @patch("lemur.plugins.lemur_acme.plugin.BackwardsCompatibleClientV2")
+ @patch("lemur.plugins.lemur_acme.plugin.current_app")
+ def test_setup_acme_client_success_store_new_account(self, mock_current_app, mock_acme, mock_authorities_service,
+ mock_key_generation):
+ mock_authority = Mock()
+ mock_authority.id = 2
+ mock_authority.options = '[{"name": "mock_name", "value": "mock_value"}, ' \
+ '{"name": "store_account", "value": true}]'
+ mock_client = Mock()
+ mock_registration = Mock()
+ mock_registration.uri = "http://test.com"
+ mock_client.register = mock_registration
+ mock_client.agree_to_tos = Mock(return_value=True)
+ mock_client.new_account_and_tos.return_value = mock_registration
+ mock_acme.return_value = mock_client
+ mock_current_app.config = {}
+
+ mock_key_generation.return_value = {"n": "PwIOkViO"}
+
+ mock_authorities_service.update_options = Mock(return_value=True)
+
+ self.acme.setup_acme_client(mock_authority)
+
+ mock_authorities_service.update_options.assert_called_with(2, options='[{"name": "mock_name", "value": "mock_value"}, '
+ '{"name": "store_account", "value": true}, '
+ '{"name": "acme_private_key", "value": "{\\"n\\": \\"PwIOkViO\\", \\"kty\\": \\"RSA\\"}"}, '
+ '{"name": "acme_regr", "value": "{\\"body\\": {}, \\"uri\\": \\"http://test.com\\"}"}]')
+
+ @patch("lemur.plugins.lemur_acme.plugin.authorities_service")
+ @patch("lemur.plugins.lemur_acme.plugin.BackwardsCompatibleClientV2")
+ @patch("lemur.plugins.lemur_acme.plugin.current_app")
+ def test_setup_acme_client_success(self, mock_current_app, mock_acme, mock_authorities_service):
+ mock_authority = Mock()
+ mock_authority.options = '[{"name": "mock_name", "value": "mock_value"}, ' \
+ '{"name": "store_account", "value": false}]'
mock_client = Mock()
mock_registration = Mock()
mock_registration.uri = "http://test.com"
@@ -178,6 +234,7 @@ def test_setup_acme_client_success(self, mock_current_app, mock_acme):
mock_acme.return_value = mock_client
mock_current_app.config = {}
result_client, result_registration = self.acme.setup_acme_client(mock_authority)
+ mock_authorities_service.update_options.assert_not_called()
assert result_client
assert result_registration
| ACME plugin should store account credentials per authority
Currently the ACME plugin, either reads the account details from `ACME_PRIVATE_KEY` and `ACME_REGR` configuration variables, or always creates a new account for each request.
This can lead to rate limit exhaustion if you try to create a lot of certificates at once, and also makes it impossible, to revoke a ACME certificate, since the revocation needs to be done using the same account as the certificate was issued for.
When creating a client, it tries to read the the private key and the registration resource from the authority
https://github.com/Netflix/lemur/blob/772894c414baca916c44ce1f91fb3fe6b33c88f1/lemur/plugins/lemur_acme/plugin.py#L237-L240
But since they aren't stored if a new account was created the first time around, this is futile.
After a new registration is done, it should store the credentials in the authority
https://github.com/Netflix/lemur/blob/772894c414baca916c44ce1f91fb3fe6b33c88f1/lemur/plugins/lemur_acme/plugin.py#L253-L265
| 2020-09-23T14:44:53 |
|
Netflix/lemur | 3,166 | Netflix__lemur-3166 | [
"3165"
] | 12e8e4891cd9454ae92b68d7421bae3071591940 | diff --git a/lemur/dns_providers/schemas.py b/lemur/dns_providers/schemas.py
--- a/lemur/dns_providers/schemas.py
+++ b/lemur/dns_providers/schemas.py
@@ -8,7 +8,7 @@ class DnsProvidersNestedOutputSchema(LemurOutputSchema):
__envelope__ = False
id = fields.Integer()
name = fields.String()
- providerType = fields.String()
+ provider_type = fields.String()
description = fields.String()
credentials = fields.String()
api_endpoint = fields.String()
| DNS Providers list doesn't show type
In the DNS Providers list, there is a column for the provider type, but it's always empty.
Looking at the code, and the API requests, the issue seems to be with the dns_providers API call, which returns the list of all providers.
There should be a providerType value in the JSON, but it's not there.
A quick glance at the `DnsProvidersNestedOutputSchema` shows that the value is called `providerType`, but in the database the field is called `provider_type` similar to `api_endpoint` which is called `api_endpoint` in the OutputSchema, so I guess, it's probably just mislabeled in the OutputSchema, and needs to be adjusted there, and maybe in the angular template.
| 2020-09-29T13:09:23 |
||
Netflix/lemur | 3,264 | Netflix__lemur-3264 | [
"3255"
] | cbdaa4e3e419e7b3f8ae1a262a7c56492e350fbf | diff --git a/lemur/plugins/lemur_acme/acme_handlers.py b/lemur/plugins/lemur_acme/acme_handlers.py
--- a/lemur/plugins/lemur_acme/acme_handlers.py
+++ b/lemur/plugins/lemur_acme/acme_handlers.py
@@ -224,7 +224,7 @@ def get_domains(self, options):
def revoke_certificate(self, certificate):
if not self.reuse_account(certificate.authority):
raise InvalidConfiguration("There is no ACME account saved, unable to revoke the certificate.")
- acme_client, _ = self.acme.setup_acme_client(certificate.authority)
+ acme_client, _ = self.setup_acme_client(certificate.authority)
fullchain_com = jose.ComparableX509(
OpenSSL.crypto.load_certificate(
| ACME Revoke Failed
'AcmeDnsHandler' object has no attribute 'acme'
Traceback (most recent call last):
File "/opt/lemur/lemur/common/schema.py", line 160, in decorated_function
resp = f(*args, **kwargs)
File "/opt/lemur/lemur/certificates/views.py", line 1444, in put
plugin.revoke_certificate(cert, data)
File "/opt/lemur/lemur/plugins/lemur_acme/plugin.py", line 272, in revoke_certificate
return self.acme.revoke_certificate(certificate)
File "/opt/lemur/lemur/plugins/lemur_acme/acme_handlers.py", line 227, in revoke_certificate
acme_client, _ = self.acme.setup_acme_client(certificate.authority)
AttributeError: 'AcmeDnsHandler' object has no attribute 'acme'
This works if the function call:
acme_client, _ = self.setup_acme_client(certificate.authority)
| I'll look into this, I have a decent idea what I messed up while refactoring.
I should probably also add tests for the certificate revocation. | 2020-11-23T14:36:18 |
|
Netflix/lemur | 3,379 | Netflix__lemur-3379 | [
"3378"
] | f0fbc8137078554af74dd329bd38298b9ee86a1c | diff --git a/docker/src/lemur.conf.py b/docker/src/lemur.conf.py
--- a/docker/src/lemur.conf.py
+++ b/docker/src/lemur.conf.py
@@ -1,11 +1,18 @@
-import os
+import os.path
import random
import string
+from celery.schedules import crontab
+
import base64
-from ast import literal_eval
_basedir = os.path.abspath(os.path.dirname(__file__))
+# See the Lemur docs (https://lemur.readthedocs.org) for more information on configuration
+
+LOG_LEVEL = str(os.environ.get('LOG_LEVEL', 'DEBUG'))
+LOG_FILE = str(os.environ.get('LOG_FILE', '/home/lemur/.lemur/lemur.log'))
+LOG_JSON = True
+
CORS = os.environ.get("CORS") == "True"
debug = os.environ.get("DEBUG") == "True"
@@ -17,44 +24,214 @@ def get_random_secret(length):
return secret_key + ''.join(random.choice(string.digits) for x in range(round(length / 4)))
+# This is the secret key used by Flask session management
SECRET_KEY = repr(os.environ.get('SECRET_KEY', get_random_secret(32).encode('utf8')))
+# You should consider storing these separately from your config
LEMUR_TOKEN_SECRET = repr(os.environ.get('LEMUR_TOKEN_SECRET',
base64.b64encode(get_random_secret(32).encode('utf8'))))
+# This must match the key for whichever DB the container is using - this could be a dump of dev or test, or a unique key
LEMUR_ENCRYPTION_KEYS = repr(os.environ.get('LEMUR_ENCRYPTION_KEYS',
- base64.b64encode(get_random_secret(32).encode('utf8'))))
-
-LEMUR_ALLOWED_DOMAINS = []
-
-LEMUR_EMAIL = ''
-LEMUR_SECURITY_TEAM_EMAIL = []
-
-ALLOW_CERT_DELETION = os.environ.get('ALLOW_CERT_DELETION') == "True"
-
-LEMUR_DEFAULT_COUNTRY = str(os.environ.get('LEMUR_DEFAULT_COUNTRY',''))
-LEMUR_DEFAULT_STATE = str(os.environ.get('LEMUR_DEFAULT_STATE',''))
-LEMUR_DEFAULT_LOCATION = str(os.environ.get('LEMUR_DEFAULT_LOCATION',''))
-LEMUR_DEFAULT_ORGANIZATION = str(os.environ.get('LEMUR_DEFAULT_ORGANIZATION',''))
-LEMUR_DEFAULT_ORGANIZATIONAL_UNIT = str(os.environ.get('LEMUR_DEFAULT_ORGANIZATIONAL_UNIT',''))
-
-LEMUR_DEFAULT_ISSUER_PLUGIN = str(os.environ.get('LEMUR_DEFAULT_ISSUER_PLUGIN',''))
-LEMUR_DEFAULT_AUTHORITY = str(os.environ.get('LEMUR_DEFAULT_AUTHORITY',''))
+ base64.b64encode(get_random_secret(32).encode('utf8')).decode('utf8')))
+
+REDIS_HOST = 'redis'
+REDIS_PORT = 6379
+REDIS_DB = 0
+CELERY_RESULT_BACKEND = f'redis://{REDIS_HOST}:{REDIS_PORT}'
+CELERY_BROKER_URL = f'redis://{REDIS_HOST}:{REDIS_PORT}'
+CELERY_IMPORTS = ('lemur.common.celery')
+CELERYBEAT_SCHEDULE = {
+ # All tasks are disabled by default. Enable any tasks you wish to run.
+ # 'fetch_all_pending_acme_certs': {
+ # 'task': 'lemur.common.celery.fetch_all_pending_acme_certs',
+ # 'options': {
+ # 'expires': 180
+ # },
+ # 'schedule': crontab(minute="*"),
+ # },
+ # 'remove_old_acme_certs': {
+ # 'task': 'lemur.common.celery.remove_old_acme_certs',
+ # 'options': {
+ # 'expires': 180
+ # },
+ # 'schedule': crontab(hour=8, minute=0, day_of_week=5),
+ # },
+ # 'clean_all_sources': {
+ # 'task': 'lemur.common.celery.clean_all_sources',
+ # 'options': {
+ # 'expires': 180
+ # },
+ # 'schedule': crontab(hour=5, minute=0, day_of_week=5),
+ # },
+ # 'sync_all_sources': {
+ # 'task': 'lemur.common.celery.sync_all_sources',
+ # 'options': {
+ # 'expires': 180
+ # },
+ # 'schedule': crontab(hour="*/2", minute=0),
+ # # this job is running 30min before endpoints_expire which deletes endpoints which were not updated
+ # },
+ # 'sync_source_destination': {
+ # 'task': 'lemur.common.celery.sync_source_destination',
+ # 'options': {
+ # 'expires': 180
+ # },
+ # 'schedule': crontab(hour="*/2", minute=15),
+ # },
+ # 'report_celery_last_success_metrics': {
+ # 'task': 'lemur.common.celery.report_celery_last_success_metrics',
+ # 'options': {
+ # 'expires': 180
+ # },
+ # 'schedule': crontab(minute="*"),
+ # },
+ # 'certificate_reissue': {
+ # 'task': 'lemur.common.celery.certificate_reissue',
+ # 'options': {
+ # 'expires': 180
+ # },
+ # 'schedule': crontab(hour=9, minute=0),
+ # },
+ # 'certificate_rotate': {
+ # 'task': 'lemur.common.celery.certificate_rotate',
+ # 'options': {
+ # 'expires': 180
+ # },
+ # 'schedule': crontab(hour=10, minute=0),
+ # },
+ # 'endpoints_expire': {
+ # 'task': 'lemur.common.celery.endpoints_expire',
+ # 'options': {
+ # 'expires': 180
+ # },
+ # 'schedule': crontab(hour="*/2", minute=30),
+ # # this job is running 30min after sync_all_sources which updates endpoints
+ # },
+ # 'get_all_zones': {
+ # 'task': 'lemur.common.celery.get_all_zones',
+ # 'options': {
+ # 'expires': 180
+ # },
+ # 'schedule': crontab(minute="*/30"),
+ # },
+ # 'check_revoked': {
+ # 'task': 'lemur.common.celery.check_revoked',
+ # 'options': {
+ # 'expires': 180
+ # },
+ # 'schedule': crontab(hour=10, minute=0),
+ # }
+ # 'enable_autorotate_for_certs_attached_to_endpoint': {
+ # 'task': 'lemur.common.celery.enable_autorotate_for_certs_attached_to_endpoint',
+ # 'options': {
+ # 'expires': 180
+ # },
+ # 'schedule': crontab(hour=10, minute=0),
+ # }
+ # 'notify_expirations': {
+ # 'task': 'lemur.common.celery.notify_expirations',
+ # 'options': {
+ # 'expires': 180
+ # },
+ # 'schedule': crontab(hour=10, minute=0),
+ # },
+ # 'notify_authority_expirations': {
+ # 'task': 'lemur.common.celery.notify_authority_expirations',
+ # 'options': {
+ # 'expires': 180
+ # },
+ # 'schedule': crontab(hour=10, minute=0),
+ # },
+ # 'send_security_expiration_summary': {
+ # 'task': 'lemur.common.celery.send_security_expiration_summary',
+ # 'options': {
+ # 'expires': 180
+ # },
+ # 'schedule': crontab(hour=10, minute=0, day_of_week='mon-fri'),
+ # }
+}
+CELERY_TIMEZONE = 'UTC'
+
+SQLALCHEMY_ENABLE_FLASK_REPLICATED = False
+SQLALCHEMY_DATABASE_URI = os.environ.get('SQLALCHEMY_DATABASE_URI', 'postgresql://lemur:lemur@localhost:5432/lemur')
+
+SQLALCHEMY_TRACK_MODIFICATIONS = False
+SQLALCHEMY_ECHO = True
+SQLALCHEMY_POOL_RECYCLE = 499
+SQLALCHEMY_POOL_TIMEOUT = 20
+
+LEMUR_EMAIL = '[email protected]'
+LEMUR_SECURITY_TEAM_EMAIL = ['[email protected]']
+LEMUR_SECURITY_TEAM_EMAIL_INTERVALS = [15, 2]
+LEMUR_DEFAULT_EXPIRATION_NOTIFICATION_INTERVALS = [30, 15, 2]
+LEMUR_EMAIL_SENDER = 'smtp'
+
+# mail configuration
+# MAIL_SERVER = 'mail.example.com'
+
+PUBLIC_CA_MAX_VALIDITY_DAYS = 397
+DEFAULT_VALIDITY_DAYS = 365
+
+LEMUR_OWNER_EMAIL_IN_SUBJECT = False
+
+LEMUR_DEFAULT_COUNTRY = str(os.environ.get('LEMUR_DEFAULT_COUNTRY', 'US'))
+LEMUR_DEFAULT_STATE = str(os.environ.get('LEMUR_DEFAULT_STATE', 'California'))
+LEMUR_DEFAULT_LOCATION = str(os.environ.get('LEMUR_DEFAULT_LOCATION', 'Los Gatos'))
+LEMUR_DEFAULT_ORGANIZATION = str(os.environ.get('LEMUR_DEFAULT_ORGANIZATION', 'Example, Inc.'))
+LEMUR_DEFAULT_ORGANIZATIONAL_UNIT = str(os.environ.get('LEMUR_DEFAULT_ORGANIZATIONAL_UNIT', ''))
+
+LEMUR_DEFAULT_AUTHORITY = str(os.environ.get('LEMUR_DEFAULT_AUTHORITY', 'ExampleCa'))
+
+LEMUR_DEFAULT_ROLE = 'operator'
ACTIVE_PROVIDERS = []
-
METRIC_PROVIDERS = []
-LOG_LEVEL = str(os.environ.get('LOG_LEVEL','DEBUG'))
-LOG_FILE = str(os.environ.get('LOG_FILE','/home/lemur/.lemur/lemur.log'))
-
-SQLALCHEMY_DATABASE_URI = os.environ.get('SQLALCHEMY_DATABASE_URI','postgresql://lemur:lemur@localhost:5432/lemur')
-
-LDAP_DEBUG = os.environ.get('LDAP_DEBUG') == "True"
-LDAP_AUTH = os.environ.get('LDAP_AUTH') == "True"
-LDAP_IS_ACTIVE_DIRECTORY = os.environ.get('LDAP_IS_ACTIVE_DIRECTORY') == "True"
-LDAP_BIND_URI = str(os.environ.get('LDAP_BIND_URI',''))
-LDAP_BASE_DN = str(os.environ.get('LDAP_BASE_DN',''))
-LDAP_EMAIL_DOMAIN = str(os.environ.get('LDAP_EMAIL_DOMAIN',''))
-LDAP_USE_TLS = str(os.environ.get('LDAP_USE_TLS',''))
-LDAP_REQUIRED_GROUP = str(os.environ.get('LDAP_REQUIRED_GROUP',''))
-LDAP_GROUPS_TO_ROLES = literal_eval(os.environ.get('LDAP_GROUPS_TO_ROLES') or "{}")
+# Authority Settings - These will change depending on which authorities you are
+# using
+current_path = os.path.dirname(os.path.realpath(__file__))
+
+# DNS Settings
+
+# exclude logging missing SAN, since we can have certs from private CAs with only cn, prod parity
+LOG_SSL_SUBJ_ALT_NAME_ERRORS = False
+
+ACME_DNS_PROVIDER_TYPES = {"items": [
+ {
+ 'name': 'route53',
+ 'requirements': [
+ {
+ 'name': 'account_id',
+ 'type': 'int',
+ 'required': True,
+ 'helpMessage': 'AWS Account number'
+ },
+ ]
+ },
+ {
+ 'name': 'cloudflare',
+ 'requirements': [
+ {
+ 'name': 'email',
+ 'type': 'str',
+ 'required': True,
+ 'helpMessage': 'Cloudflare Email'
+ },
+ {
+ 'name': 'key',
+ 'type': 'str',
+ 'required': True,
+ 'helpMessage': 'Cloudflare Key'
+ },
+ ]
+ },
+ {
+ 'name': 'dyn',
+ },
+ {
+ 'name': 'ultradns',
+ },
+]}
+
+# Authority plugins which support revocation
+SUPPORTED_REVOCATION_AUTHORITY_PLUGINS = ['acme-issuer']
| failed to build docker, yargs parser supports a minimum Node.js version of 10.
```bash
cd docker && sudo docker-compose up -d --build
```
```
Error: yargs parser supports a minimum Node.js version of 10. Read our version support policy: https://github.com/yargs/yargs-parser#supported-nodejs-versions
at Object.<anonymous> (/opt/lemur/node_modules/yargs-parser/build/index.cjs:991:15)
at Module._compile (module.js:653:30)
at Object.Module._extensions..js (module.js:664:10)
at Module.load (module.js:566:32)
at tryModuleLoad (module.js:506:12)
at Function.Module._load (module.js:498:3)
at Module.require (module.js:597:17)
at require (internal/module.js:11:18)
at Object.<anonymous> (/opt/lemur/node_modules/yargs/build/index.cjs:2855:16)
at Module._compile (module.js:653:30)
at Object.Module._extensions..js (module.js:664:10)
at Module.load (module.js:566:32)
at tryModuleLoad (module.js:506:12)
at Function.Module._load (module.js:498:3)
at Module.require (module.js:597:17)
at require (internal/module.js:11:18)
ERROR: Service 'lemur' failed to build: The command '/bin/sh -c npm install --unsafe-perm && pip3 install -e . && node_modules/.bin/gulp build && node_modules/.bin/gulp package --urlContextPath=${URLCONTEXT} && apk del build-dependencies' returned a non-zero code: 1
```
| 2021-01-26T07:34:33 |
||
Netflix/lemur | 3,726 | Netflix__lemur-3726 | [
"3722"
] | 634f34cc966dff55679308cb87af2120a231e719 | diff --git a/lemur/auth/views.py b/lemur/auth/views.py
--- a/lemur/auth/views.py
+++ b/lemur/auth/views.py
@@ -564,7 +564,7 @@ def __init__(self):
def post(self):
access_token_url = "https://accounts.google.com/o/oauth2/token"
- people_api_url = "https://www.googleapis.com/plus/v1/people/me/openIdConnect"
+ user_info_url = "https://www.googleapis.com/oauth2/v1/userinfo"
self.reqparse.add_argument("clientId", type=str, required=True, location="json")
self.reqparse.add_argument(
@@ -581,6 +581,7 @@ def post(self):
"redirect_uri": args["redirectUri"],
"code": args["code"],
"client_secret": current_app.config.get("GOOGLE_SECRET"),
+ "scope": "email",
}
r = requests.post(access_token_url, data=payload)
@@ -589,7 +590,7 @@ def post(self):
# Step 2. Retrieve information about the current user
headers = {"Authorization": "Bearer {0}".format(token["access_token"])}
- r = requests.get(people_api_url, headers=headers)
+ r = requests.get(user_info_url, headers=headers)
profile = r.json()
user = user_service.get_by_email(profile["email"])
| Google authentication fails when legacy people API is disabled
When using OAuth credentials generated under a developer project with the [legacy people API](https://developers.google.com/people/legacy) enabled, Lemur will fail to retrieve the user's e-mail address from the user info endpoint:
```
{'error': {'code': 403, 'message': 'Legacy People API has not been used in project 35982982159 before or it is disabled. Enable it by visiting https://console.developers.google.com/apis/api/legacypeople.googleapis.com/overview?project=35982982159 then retry. If you enabled this API recently, wait a few minutes for the action to propagate to our systems and retry.', 'status': 'PERMISSION_DENIED', 'details': [{'@type': 'type.googleapis.com/google.rpc.Help', 'links': [{'description': 'Google developers console API activation', 'url': 'https://console.developers.google.com/apis/api/legacypeople.googleapis.com/overview?project=35982982159'}]}, {'@type': 'type.googleapis.com/google.rpc.ErrorInfo', 'reason': 'SERVICE_DISABLED', 'domain': 'googleapis.com', 'metadata': {'service': 'legacypeople.googleapis.com', 'consumer': 'projects/35982982159'}}]}}
```
It seems that it would need to be updated to use the newer [People API](https://developers.google.com/people/api/rest/v1/people/get) to circumvent this issue.
Unfortunately, it looks like once the legacy People API is disabled it cannot be re-enabled again so there's not a lot of available workarounds.
| 2021-08-23T20:29:38 |
||
Netflix/lemur | 3,766 | Netflix__lemur-3766 | [
"3765"
] | 4b03baaf5544f167e78055bab15a903b1badf22b | diff --git a/lemur/factory.py b/lemur/factory.py
--- a/lemur/factory.py
+++ b/lemur/factory.py
@@ -15,6 +15,7 @@
import errno
import pkg_resources
import socket
+import stat
from logging import Formatter, StreamHandler
from logging.handlers import RotatingFileHandler
@@ -193,9 +194,14 @@ def configure_logging(app):
:param app:
"""
- handler = RotatingFileHandler(
- app.config.get("LOG_FILE", "lemur.log"), maxBytes=10000000, backupCount=100
- )
+ logfile = app.config.get("LOG_FILE", "lemur.log")
+ # if the log file is a character special device file (ie. stdout/stderr),
+ # file rotation will not work and must be disabled.
+ disable_file_rotation = os.path.exists(logfile) and stat.S_ISCHR(os.stat(logfile).st_mode)
+ if disable_file_rotation:
+ handler = StreamHandler(open(logfile, 'a'))
+ else:
+ handler = RotatingFileHandler(logfile, maxBytes=10000000, backupCount=100)
handler.setFormatter(
Formatter(
| Support emitting logs to stdout
**Context**
We currently deploy Lemur in a containerized environment and would like to have Lemur emit JSON logs to `stdout` instead of in a rotating log file. Our reason is that it is generally regarded as a best practice when running in a container to write all logs to stdout and have the orchestrator do the collection. This is something we really embrace in our company and our tooling is built with this assumption in mind.
**Issue**
We tried configuring Lemur with `LOG_FILE = "/dev/stdout"` but the `RotatingFileHandler` throws an exception when attempting a `os.seek(...)` on the file descriptor. Unstructured logs are already emitted on `stderr` but that unfortunately doesn't address our need since splitting those logs is essentially impossible.
| 2021-09-23T10:45:03 |
||
Netflix/lemur | 3,922 | Netflix__lemur-3922 | [
"3921"
] | 4e0c04998ad55ce63f35874c59c3661aefe9c56a | diff --git a/lemur/common/celery.py b/lemur/common/celery.py
--- a/lemur/common/celery.py
+++ b/lemur/common/celery.py
@@ -75,6 +75,8 @@ def is_task_active(fun, task_id, args):
i = inspect()
active_tasks = i.active()
+ if active_tasks is None:
+ return False
for _, tasks in active_tasks.items():
for task in tasks:
if task.get("id") == task_id:
| Metric reporting task fails if there are no other active celery tasks
When the `report_celery_last_success_metrics` task is run, it will [check if it's already running](https://github.com/Netflix/lemur/blob/master/lemur/common/celery.py#L139), and if so, skip the task execution. However, if there are no other active tasks being executed by Celery, this check fails and an exception is raised as shown below.
```
{'function': 'lemur.common.celery.report_failed_task', 'Message': 'Celery Task Failure', 'traceback': 'Traceback (most recent call last):
File "/opt/venv/lib/python3.8/site-packages/celery/app/trace.py", line 385, in trace_task
R = retval = fun(*args, **kwargs)
File "/opt/lemur/lemur/common/celery.py", line 62, in __call__
return TaskBase.__call__(self, *args, **kwargs)
File "/opt/venv/lib/python3.8/site-packages/celery/app/trace.py", line 650, in __protected_call__
return self.run(*args, **kwargs)
File "/opt/lemur/lemur/common/celery.py", line 140, in report_celery_last_success_metrics
if task_id and is_task_active(function, task_id, None):
File "/opt/lemur/lemur/common/celery.py", line 79, in is_task_active
for _, tasks in active_tasks.items():
AttributeError: \'NoneType\' object has no attribute \'items\'
', 'task_name': 'lemur.common.celery.report_celery_last_success_metrics', 'task_id': '5de894e1-2445-48bd-bfbf-50692e302c51', 'sender_hostname': 'gen1@lemur-ffd76664-s4jd4', 'receiver_hostname': 'celery@lemur-ffd76664-s4jd4', 'error': 'AttributeError("\'NoneType\' object has no attribute \'items\'")'}
```
| 2022-01-04T14:21:07 |
||
Netflix/lemur | 4,581 | Netflix__lemur-4581 | [
"4195"
] | 307f3996ab7828beb773151df574fc83de10fb46 | diff --git a/lemur/plugins/base/v1.py b/lemur/plugins/base/v1.py
--- a/lemur/plugins/base/v1.py
+++ b/lemur/plugins/base/v1.py
@@ -6,10 +6,11 @@
.. moduleauthor:: Kevin Glisson <[email protected]>
"""
-from flask import current_app
+import re
from threading import local
from typing import Optional, Dict, List, Any
-import re
+
+from flask import current_app
# stolen from https://github.com/getsentry/sentry/
@@ -133,7 +134,7 @@ def validate_option_value(self, option_name, value):
validation = class_opt.get("validation")
if not validation:
return value
- if not re.match(validation, value):
+ if (callable(validation) and not validation(value)) or not re.match(validation, value):
raise ValueError(f"Option '{option_name}' cannot be validated")
elif opt_type == "select":
available = class_opt.get("available")
diff --git a/lemur/plugins/lemur_acme/route53.py b/lemur/plugins/lemur_acme/route53.py
--- a/lemur/plugins/lemur_acme/route53.py
+++ b/lemur/plugins/lemur_acme/route53.py
@@ -16,24 +16,27 @@ def wait_for_dns_change(change_id, client=None):
@sts_client("route53")
def find_zone_id(domain, client=None):
+ return _find_zone_id(domain, client)
+
+
+def _find_zone_id(domain, client=None):
paginator = client.get_paginator("list_hosted_zones")
- zones = []
- match_length = 0
+ min_diff_length = float("inf")
+ chosen_zone = None
+
for page in paginator.paginate():
for zone in page["HostedZones"]:
if domain.endswith(zone["Name"]) or (domain + ".").endswith(zone["Name"]):
if not zone["Config"]["PrivateZone"]:
- if len(zone["Name"]) > match_length:
- # reset the list, as we have found a longer match
- zones = [(zone["Name"], zone["Id"])]
- match_length = len(zone["Name"])
- elif len(zone["Name"]) == match_length:
- # add all equal length zones, though only the first one will be returned
- zones.append((zone["Name"], zone["Id"]))
-
- if not zones:
+ diff_length = len(domain) - len(zone["Name"])
+ if diff_length < min_diff_length:
+ min_diff_length = diff_length
+ chosen_zone = (zone["Name"], zone["Id"])
+
+ if chosen_zone is None:
raise ValueError("Unable to find a Route53 hosted zone for {}".format(domain))
- return zones[0][1]
+
+ return chosen_zone[1] # Return the chosen zone ID
@sts_client("route53")
diff --git a/lemur/plugins/lemur_vault_dest/plugin.py b/lemur/plugins/lemur_vault_dest/plugin.py
--- a/lemur/plugins/lemur_vault_dest/plugin.py
+++ b/lemur/plugins/lemur_vault_dest/plugin.py
@@ -11,19 +11,18 @@
"""
import os
import re
+
import hvac
+from cryptography import x509
+from cryptography.hazmat.backends import default_backend
from flask import current_app
+from validators.url import url
from lemur.common.defaults import common_name, country, state, location, organizational_unit, organization
from lemur.common.utils import parse_certificate, check_validation
from lemur.plugins.bases import DestinationPlugin
from lemur.plugins.bases import SourcePlugin
-from cryptography import x509
-from cryptography.hazmat.backends import default_backend
-
-from validators.url import regex as url_regex
-
class VaultSourcePlugin(SourcePlugin):
""" Class for importing certificates from Hashicorp Vault"""
@@ -40,7 +39,7 @@ class VaultSourcePlugin(SourcePlugin):
"name": "vaultUrl",
"type": "str",
"required": True,
- "validation": url_regex.pattern,
+ "validation": url,
"helpMessage": "Valid URL to Hashi Vault instance",
},
{
@@ -167,7 +166,7 @@ class VaultDestinationPlugin(DestinationPlugin):
"name": "vaultUrl",
"type": "str",
"required": True,
- "validation": url_regex.pattern,
+ "validation": url,
"helpMessage": "Valid URL to Hashi Vault instance",
},
{
| diff --git a/lemur/plugins/lemur_acme/tests/test_route53.py b/lemur/plugins/lemur_acme/tests/test_route53.py
new file mode 100644
--- /dev/null
+++ b/lemur/plugins/lemur_acme/tests/test_route53.py
@@ -0,0 +1,31 @@
+from unittest.mock import MagicMock
+
+import pytest
+
+from lemur.plugins.lemur_acme.route53 import _find_zone_id
+from lemur.tests.conftest import app # noqa
+
+
+def test_zone_selection(app): # noqa
+ # Mocking AWS client
+ client = MagicMock()
+
+ zones = [
+ {"Config": {"PrivateZone": False}, "Name": "acme.identity.uq.edu.au.", "Id": "Z1"},
+ {"Config": {"PrivateZone": False}, "Name": "dev.acme.identity.uq.edu.au.", "Id": "Z2"},
+ {"Config": {"PrivateZone": True}, "Name": "test.dev.acme.identity.uq.edu.au.", "Id": "Z3"}
+ ]
+
+ # Mocking the paginator
+ paginator = MagicMock()
+ paginator.paginate.return_value = [{"HostedZones": zones}]
+ client.get_paginator.return_value = paginator
+
+ # Replace this with reference to your function
+ assert _find_zone_id("test.dev.acme.identity.uq.edu.au", client) == "Z2"
+ assert _find_zone_id("another.dev.acme.identity.uq.edu.au", client) == "Z2"
+ assert _find_zone_id("test2.acme.identity.uq.edu.au", client) == "Z1"
+
+ # Test that it raises a ValueError for a domain where no matching zone is found
+ with pytest.raises(ValueError):
+ _find_zone_id("test3.some.other.domain", client)
| Choice of Route53 DNS
https://github.com/Netflix/lemur/blob/8b60f10dd63065f4da6eaf9e338e4b3dee50fda4/lemur/plugins/lemur_acme/route53.py#L29
Choice should be on min of DNS - route53 domain suffix.
Example: Two Route53 Zones
Cert requested for (A) test.dev.acme.identity.uq.edu.au
Zones:
(B) acme.identity.uq.edu.au
(C) dev.acme.identity.uq.edu.au
Txt record needed at _acme-validation.test.dev.acme.identity.uq.edu.au
(A) - (B) = 9
(A) - (C) = 5
Choose C for zone.
| 2023-09-06T20:37:28 |
|
Netflix/lemur | 4,594 | Netflix__lemur-4594 | [
"4543"
] | 40b54aa23c3fd32eaa69448d37de337cb6ebb557 | diff --git a/lemur/auth/views.py b/lemur/auth/views.py
--- a/lemur/auth/views.py
+++ b/lemur/auth/views.py
@@ -15,6 +15,8 @@
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, hmac
from flask import Blueprint, current_app
+from flask_limiter import Limiter
+from flask_limiter.util import get_remote_address
from flask_principal import Identity, identity_changed
from flask_restful import reqparse, Resource, Api
@@ -31,6 +33,7 @@
mod = Blueprint("auth", __name__)
api = Api(mod)
+limiter = Limiter(app=current_app, key_func=get_remote_address)
def exchange_for_access_token(
@@ -351,6 +354,7 @@ def __init__(self):
self.reqparse = reqparse.RequestParser()
super(Login, self).__init__()
+ @limiter.limit("10/5minute")
def post(self):
"""
.. http:post:: /auth/login
diff --git a/lemur/common/schema.py b/lemur/common/schema.py
--- a/lemur/common/schema.py
+++ b/lemur/common/schema.py
@@ -8,12 +8,12 @@
"""
from functools import wraps
-from flask import request, current_app
-from sentry_sdk import capture_exception
-from sqlalchemy.orm.collections import InstrumentedList
+from flask import request, current_app
from inflection import camelize, underscore
from marshmallow import Schema, post_dump, pre_load
+from sentry_sdk import capture_exception
+from sqlalchemy.orm.collections import InstrumentedList
class LemurSchema(Schema):
diff --git a/lemur/factory.py b/lemur/factory.py
--- a/lemur/factory.py
+++ b/lemur/factory.py
@@ -9,32 +9,29 @@
.. moduleauthor:: Kevin Glisson <[email protected]>
"""
-import os
-import importlib
-import logmatic
import errno
-import pkg_resources
+import importlib
+import os
import socket
import stat
-
from logging import Formatter, StreamHandler
from logging.handlers import RotatingFileHandler
+import logmatic
+import pkg_resources
+import sentry_sdk
+from click import get_current_context
from flask import Flask, current_app
from flask_replicated import FlaskReplicated
-from click import get_current_context
-
-import sentry_sdk
from sentry_sdk.integrations.celery import CeleryIntegration
+from sentry_sdk.integrations.flask import FlaskIntegration
from sentry_sdk.integrations.redis import RedisIntegration
from sentry_sdk.integrations.sqlalchemy import SqlalchemyIntegration
-from sentry_sdk.integrations.flask import FlaskIntegration
from lemur.certificates.hooks import activate_debug_dump
from lemur.common.health import mod as health
from lemur.extensions import db, migrate, principal, smtp_mail, metrics, cors
-
DEFAULT_BLUEPRINTS = (health,)
API_VERSION = 1
| No rate limiting for login
Currently there is no rate limiting mechanism for the login endpoint. An attacker could potentially take advantage of this functionality to overwhelm the system and try to gain access to user accounts.
References
[OWASP's Page on Blocking Brute-Force Attacks](https://owasp.org/www-community/controls/Blocking_Brute_Force_Attacks)
---
The referenced issue was found via a pen test conducted in collaboration with [Infor](https://www.infor.com/) and [Cobalt.io](https://www.cobalt.io/)
| 2023-09-11T21:02:43 |
||
Netflix/lemur | 4,595 | Netflix__lemur-4595 | [
"4541"
] | bbe73e58f98b031fffc04ee3af29b0ba5bb6dddd | diff --git a/lemur/auth/service.py b/lemur/auth/service.py
--- a/lemur/auth/service.py
+++ b/lemur/auth/service.py
@@ -8,27 +8,23 @@
.. moduleauthor:: Kevin Glisson <[email protected]>
"""
-import jwt
import json
-import binascii
-
-from functools import wraps
from datetime import datetime, timedelta
+from functools import wraps
-from flask import g, current_app, jsonify, request
-
-from flask_restful import Resource
-from flask_principal import identity_loaded, RoleNeed, UserNeed
-
-from flask_principal import Identity, identity_changed
-
+import binascii
+import jwt
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric.rsa import RSAPublicNumbers
+from flask import g, current_app, jsonify, request
+from flask_principal import Identity, identity_changed
+from flask_principal import identity_loaded, RoleNeed, UserNeed
+from flask_restful import Resource
-from lemur.users import service as user_service
from lemur.api_keys import service as api_key_service
from lemur.auth.permissions import AuthorityCreatorNeed, RoleMemberNeed
+from lemur.users import service as user_service
def get_rsa_public_key(n, e):
@@ -57,9 +53,21 @@ def create_token(user, aid=None, ttl=None):
:param user:
:return:
"""
- expiration_delta = timedelta(
- days=int(current_app.config.get("LEMUR_TOKEN_EXPIRATION", 1))
- )
+ expiration_delta = timedelta(days=1)
+ custom_expiry = current_app.config.get("LEMUR_TOKEN_EXPIRATION")
+ if custom_expiry:
+ if isinstance(custom_expiry, str) and custom_expiry.endswith("m"):
+ expiration_delta = timedelta(
+ minutes=int(custom_expiry.rstrip("m"))
+ )
+ elif isinstance(custom_expiry, str) and custom_expiry.endswith("h"):
+ expiration_delta = timedelta(
+ hours=int(custom_expiry.rstrip("h"))
+ )
+ else:
+ expiration_delta = timedelta(
+ days=int(custom_expiry)
+ )
payload = {"iat": datetime.utcnow(), "exp": datetime.utcnow() + expiration_delta}
# Handle Just a User ID & User Object.
| Session timeout can be reduced
Currently the JWT Session Token currently has an excessive session timeout. The length of the timeout can be reduced to lower the risk of an attacker gaining access.
If a user leaves their computer unattended, a nearby attacker could access the user’s computer and any open applications. Automatically logging a user out after an extended period of inactivity can limit the time that an attacker could make use of any hijacked sessions.
References:
[OWASP's Session Management Cheat Sheet](https://cheatsheetseries.owasp.org/cheatsheets/Session_Management_Cheat_Sheet.html)
[OWASP's Page on Session Timeout](https://owasp.org/www-community/Session_Timeout)
---
The referenced issue was found via a pen test conducted in collaboration with [Infor](https://www.infor.com/) and [Cobalt.io](https://www.cobalt.io/)
| 2023-09-11T21:24:08 |
||
Netflix/lemur | 4,596 | Netflix__lemur-4596 | [
"4542"
] | d2f32579825a4a2b9ef235c12775fee8bca7e432 | diff --git a/lemur/auth/views.py b/lemur/auth/views.py
--- a/lemur/auth/views.py
+++ b/lemur/auth/views.py
@@ -6,31 +6,28 @@
.. moduleauthor:: Kevin Glisson <[email protected]>
"""
-import jwt
import base64
-import requests
import time
+import jwt
+import requests
from cryptography.exceptions import InvalidSignature
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, hmac
-
from flask import Blueprint, current_app
-
-from flask_restful import reqparse, Resource, Api
from flask_principal import Identity, identity_changed
+from flask_restful import reqparse, Resource, Api
+from lemur.auth import ldap
+from lemur.auth.service import create_token, fetch_token_header, get_rsa_public_key
+from lemur.common.utils import get_psuedo_random_string, get_state_token_secret
from lemur.constants import SUCCESS_METRIC_STATUS, FAILURE_METRIC_STATUS
from lemur.exceptions import TokenExchangeFailed
from lemur.extensions import metrics
-from lemur.common.utils import get_psuedo_random_string, get_state_token_secret
-
-from lemur.users import service as user_service
-from lemur.roles import service as role_service
from lemur.logs import service as log_service
-from lemur.auth.service import create_token, fetch_token_header, get_rsa_public_key
-from lemur.auth import ldap
from lemur.plugins.base import plugins
+from lemur.roles import service as role_service
+from lemur.users import service as user_service
mod = Blueprint("auth", __name__)
api = Api(mod)
diff --git a/lemur/users/schemas.py b/lemur/users/schemas.py
--- a/lemur/users/schemas.py
+++ b/lemur/users/schemas.py
@@ -5,7 +5,10 @@
:license: Apache, see LICENSE for more details.
.. moduleauthor:: Kevin Glisson <[email protected]>
"""
-from marshmallow import fields
+import re
+
+from flask import current_app
+from marshmallow import fields, validates, ValidationError
from lemur.common.schema import LemurInputSchema, LemurOutputSchema
from lemur.schemas import (
@@ -19,13 +22,34 @@ class UserInputSchema(LemurInputSchema):
id = fields.Integer()
username = fields.String(required=True)
email = fields.Email(required=True)
- password = fields.String() # TODO add complexity requirements
+ password = fields.String()
active = fields.Boolean()
roles = fields.Nested(AssociatedRoleSchema, many=True, missing=[])
certificates = fields.Nested(AssociatedCertificateSchema, many=True, missing=[])
authorities = fields.Nested(AssociatedAuthoritySchema, many=True, missing=[])
+class UserCreateInputSchema(UserInputSchema):
+ @validates('password')
+ def validate_password(self, value):
+ if current_app.config.get('CHECK_PASSWORD_STRENGTH', True):
+ # At least 12 characters
+ if len(value) < 12:
+ raise ValidationError('Password must be at least 12 characters long.')
+
+ # A mixture of both uppercase and lowercase letters
+ if not any(map(str.isupper, value)) or not any(map(str.islower, value)):
+ raise ValidationError('Password must contain both uppercase and lowercase characters.')
+
+ # A mixture of letters and numbers
+ if not any(map(str.isdigit, value)):
+ raise ValidationError('Password must contain at least one digit.')
+
+ # Inclusion of at least one special character
+ if not re.findall(r'[!@#?\]]', value):
+ raise ValidationError('Password must contain at least one special character (!@#?]).')
+
+
class UserOutputSchema(LemurOutputSchema):
id = fields.Integer()
username = fields.String()
@@ -36,6 +60,7 @@ class UserOutputSchema(LemurOutputSchema):
user_input_schema = UserInputSchema()
+user_create_input_schema = UserCreateInputSchema()
user_output_schema = UserOutputSchema()
users_output_schema = UserOutputSchema(many=True)
diff --git a/lemur/users/views.py b/lemur/users/views.py
--- a/lemur/users/views.py
+++ b/lemur/users/views.py
@@ -18,7 +18,7 @@
from lemur.users.schemas import (
user_input_schema,
user_output_schema,
- users_output_schema,
+ users_output_schema, user_create_input_schema,
)
mod = Blueprint("users", __name__)
@@ -89,7 +89,7 @@ def get(self):
args = parser.parse_args()
return service.render(args)
- @validate_schema(user_input_schema, user_output_schema)
+ @validate_schema(user_create_input_schema, user_output_schema)
@admin_permission.require(http_exception=403)
def post(self, data=None):
"""
| diff --git a/lemur/tests/test_users.py b/lemur/tests/test_users.py
--- a/lemur/tests/test_users.py
+++ b/lemur/tests/test_users.py
@@ -1,8 +1,10 @@
import json
import pytest
+from marshmallow import ValidationError
from lemur.tests.factories import UserFactory, RoleFactory
+from lemur.users.schemas import UserInputSchema, UserCreateInputSchema
from lemur.users.views import * # noqa
from .vectors import (
VALID_ADMIN_API_TOKEN,
@@ -12,8 +14,6 @@
def test_user_input_schema(client):
- from lemur.users.schemas import UserInputSchema
-
input_data = {
"username": "example",
"password": "1233432",
@@ -25,6 +25,30 @@ def test_user_input_schema(client):
assert not errors
+def test_valid_password():
+ schema = UserCreateInputSchema()
+ good_password = "ABcdefg123456@#]"
+ # This password should not raise an exception
+ schema.validate_password(good_password)
+
+
[email protected](
+ "bad_password",
+ [
+ "ABCD1234!#]", # No lowercase
+ "abcd1234@#]", # No uppercase
+ "!@#]Abcdefg", # No digit
+ "ABCDabcd1234", # No special character
+ "Ab1!@#]", # less than 12 characters
+ ],
+)
+def test_invalid_password(bad_password):
+ schema = UserCreateInputSchema()
+ # All these passwords should raise an exception
+ with pytest.raises(ValidationError):
+ schema.validate_password(bad_password)
+
+
@pytest.mark.parametrize(
"token,status",
[
| Weak password policy for user accounts
Currently there is a weak password policy implemented when created user accounts. The password policy can be increased to include the following:
- At least 12 characters (required for your Muhlenberg password)—the more characters, the better
- A mixture of both uppercase and lowercase letters
- A mixture of letters and numbers
- Inclusion of at least one special character, e.g., ! @ # ? ]
---
The referenced issue was found via a pen test conducted in collaboration with [Infor](https://www.infor.com/) and [Cobalt.io](https://www.cobalt.io/)
| 2023-09-11T22:01:26 |
|
Netflix/lemur | 4,597 | Netflix__lemur-4597 | [
"3888"
] | 13a0ca9f47c4df0738f0bd6f685ca31ea2b39215 | diff --git a/lemur/users/service.py b/lemur/users/service.py
--- a/lemur/users/service.py
+++ b/lemur/users/service.py
@@ -13,7 +13,6 @@
from lemur.logs import service as log_service
from lemur.users.models import User
-
STRICT_ENFORCEMENT_DEFAULT_ROLES = ["admin", "operator", "read-only"]
@@ -46,7 +45,7 @@ def create(username, password, email, active, profile_picture, roles):
return database.create(user)
-def update(user_id, username, email, active, profile_picture, roles):
+def update(user_id, username, email, active, profile_picture, roles, password=None):
"""
Updates an existing user
@@ -56,6 +55,7 @@ def update(user_id, username, email, active, profile_picture, roles):
:param active:
:param profile_picture:
:param roles:
+ :param password:
:return:
"""
strict_role_enforcement = current_app.config.get("LEMUR_STRICT_ROLE_ENFORCEMENT", False)
@@ -68,6 +68,8 @@ def update(user_id, username, email, active, profile_picture, roles):
user.email = email
user.active = active
user.profile_picture = profile_picture
+ if password:
+ user.password = password
update_roles(user, roles)
log_service.audit_log("update_user", username, f"Updating user with id {user_id}")
diff --git a/lemur/users/views.py b/lemur/users/views.py
--- a/lemur/users/views.py
+++ b/lemur/users/views.py
@@ -8,23 +8,19 @@
from flask import g, Blueprint
from flask_restful import reqparse, Api
-from lemur.common.schema import validate_schema
-from lemur.common.utils import paginated_parser
-
-from lemur.auth.service import AuthenticatedResource
from lemur.auth.permissions import admin_permission
-
-from lemur.users import service
+from lemur.auth.service import AuthenticatedResource
from lemur.certificates import service as certificate_service
+from lemur.common.schema import validate_schema
+from lemur.common.utils import paginated_parser
from lemur.roles import service as role_service
-
+from lemur.users import service
from lemur.users.schemas import (
user_input_schema,
user_output_schema,
users_output_schema,
)
-
mod = Blueprint("users", __name__)
api = Api(mod)
@@ -282,6 +278,7 @@ def put(self, user_id, data=None):
data["active"],
None,
data["roles"],
+ data.get("password")
)
| Missing option to change user password
It should be possible to change the user password after the user has been created.
I noticed that changing the `LEMUR_ADMIN_PASSWORD` after the database had been initialised, did not change the lemur user password.
| 2023-09-11T22:04:25 |
||
doccano/doccano | 163 | doccano__doccano-163 | [
"162"
] | 781f94333e122b4f0aeca7970e19d3a1d20a84bd | diff --git a/app/server/serializers.py b/app/server/serializers.py
--- a/app/server/serializers.py
+++ b/app/server/serializers.py
@@ -34,8 +34,11 @@ def validate(self, attrs):
raise ValidationError('Shortcut key may not have a suffix key.')
# Don't allow to save same shortcut key when prefix_key is null.
+ context = self.context['request'].parser_context
+ project_id = context['kwargs'].get('project_id')
if Label.objects.filter(suffix_key=suffix_key,
- prefix_key__isnull=True).exists():
+ prefix_key__isnull=True,
+ project=project_id).exists():
raise ValidationError('Duplicate key.')
return super().validate(attrs)
| diff --git a/app/server/tests/test_api.py b/app/server/tests/test_api.py
--- a/app/server/tests/test_api.py
+++ b/app/server/tests/test_api.py
@@ -152,8 +152,10 @@ def setUpTestData(cls):
cls.main_project_label = mommy.make('server.Label', project=cls.main_project)
sub_project = mommy.make('server.Project', users=[non_project_member])
+ other_project = mommy.make('server.Project', users=[super_user])
mommy.make('server.Label', project=sub_project)
cls.url = reverse(viewname='label_list', args=[cls.main_project.id])
+ cls.other_url = reverse(viewname='label_list', args=[other_project.id])
cls.data = {'text': 'example'}
def test_returns_labels_to_project_member(self):
@@ -194,6 +196,15 @@ def test_can_create_multiple_labels_without_shortcut_key(self):
response = self.client.post(self.url, format='json', data=label)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
+ def test_can_create_same_label_in_multiple_projects(self):
+ self.client.login(username=self.super_user_name,
+ password=self.super_user_pass)
+ label = {'text': 'LOC', 'prefix_key': None, 'suffix_key': 'l'}
+ response = self.client.post(self.url, format='json', data=label)
+ self.assertEqual(response.status_code, status.HTTP_201_CREATED)
+ response = self.client.post(self.other_url, format='json', data=label)
+ self.assertEqual(response.status_code, status.HTTP_201_CREATED)
+
def test_disallows_project_member_to_create_label(self):
self.client.login(username=self.project_member_name,
password=self.project_member_pass)
| Cannot use the same shortcut and label name in different projects
The issue is the following:
1. I create Project A.
2. Import some annotated data to the project - set of labels is automatically created as a result (let's say, Label 1, Label 2, Label 3)
3. I edit these labels - provide a shortcut and color
4. I create Project B and import another portion of dataset (with the same set of labels)
5. I try to replicate setting of Project A (assign the same shortcuts, colors and label names to labels), which gives me an error:
`You cannot use same label name or shortcut key.`
It seems not very convenient that we cannot create labels with the same shortcut and label name in different project. I wonder if it was made intentionally or it is a bug.
Thank you very much for a great tool.
| Thanks! I will check the problem. | 2019-04-16T06:44:04 |
doccano/doccano | 363 | doccano__doccano-363 | [
"309"
] | 8d66e69c29d79183433b85b1496b580229489eba | diff --git a/app/authentification/views.py b/app/authentification/views.py
--- a/app/authentification/views.py
+++ b/app/authentification/views.py
@@ -9,7 +9,7 @@
from django.views.generic import TemplateView
from django.shortcuts import redirect
-from app import settings
+from django.conf import settings
class SignupView(TemplateView):
| diff --git a/app/authentification/tests/test_template.py b/app/authentification/tests/test_template.py
--- a/app/authentification/tests/test_template.py
+++ b/app/authentification/tests/test_template.py
@@ -1,7 +1,7 @@
from django.test import SimpleTestCase, TestCase, RequestFactory, override_settings
from django.http import HttpRequest
from ..views import SignupView
-from app import settings
+from django.conf import settings
from api.tests.test_config import setenv
@override_settings(STATICFILES_STORAGE='django.contrib.staticfiles.storage.StaticFilesStorage')
| New user signup page question
Hi i'm trying to understand the user structure. I see a few posts about only being able to assign users to specific projects through the django admin screen, but my question is about the 'sign up' page you get offered when you click login, is this totally non functional? That is, is the *only* way to make new users of any kind through the django admin page?
Thanks,
Z
| I'd like to ask the same question
If you have a look at the `docker-compose.yml` file, you may have what you want: at line 15, change "False" to "True" and it should be ok
``` bash
version: "3"
services:
django:
image: python:3.6
volumes:
- .:/src
- venv:/src/venv
command: ["/src/tools/dev-django.sh", "0.0.0.0:8000"]
environment:
ADMIN_USERNAME: "admin"
ADMIN_PASSWORD: "password"
ADMIN_EMAIL: "[email protected]"
DATABASE_URL: "postgres://doccano:doccano@postgres:5432/doccano?sslmode=disable"
ALLOW_SIGNUP: "False"
ports:
- 8000:8000
```
@guillim Hello, I changed the fifteenth line of `docker-compose.yml` file to "true" and set up the mailbox at the bottom of `settings.py,` but when registering users, I still prompted, "The webmaster has not set up any emails yet, so we can't send any link confirmation. Contact the admin to learn more." What's the reason?
Does it work when you set up the mailing box as "localhost" by simply uncommenting
```EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'```
from this part of settings.py ?
```
## necessary for email verification setup
# EMAIL_USE_TLS = True
# EMAIL_HOST = 'smtp.gmail.com'
# EMAIL_HOST_USER = '[email protected]'
# EMAIL_HOST_PASSWORD = 'gfds6jk#4ljIr%G8%'
# EMAIL_PORT = 587
#
## During development only
# EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
```
@guillim Hi,
I did that, but registration is still prompted
`The webmaster has not set up any emails yet, so
we can't send you any link confirmation
Please contact the admin to learn more`
How do I configure files to use the user registration function?
You should see in your logs a message like this one:
```
Content-Type: text/plain; charset="utf-8"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Subject: Activate your account.
From: webmaster@localhost
To: [email protected]
Date: Wed, 31 Jul 2019 08:58:26 -0000
Message-ID: <156456350669.28.14006987809080586397@6c92c4a3631f>
Hi user2,
Please click on the link to confirm your email,
http://localhost:8000/activate/NA/58h-5a294df47d15b63332b1
```
Simply follow the link to confirm your email and it should be good. Note that when this is done, you also need to add the user to the projects that you created as an admin (go to the [admin](http://localhost:8000/admin) page to do this)
@guillim
Hi, thank you for your help. I want to know where the log is.
Depends if you use **docker**, **docker-compose** or if you launch it straight away. According to your earlier messages, I guess you use **docker-compose**. In that case, the log should be on the same terminal where you typed
`docker-compose up`
@guillim The way I install it is`Option1: Pull the production Docker image`
you probably want to read things such as [docker logging](https://docs.docker.com/config/containers/logging/)
On my computer it would be something like that:
`docker logs doccano_test_django_1` | 2019-08-30T14:41:37 |
doccano/doccano | 603 | doccano__doccano-603 | [
"599"
] | c364e5685ed9a17e2e604607802a656c1ce4e798 | diff --git a/app/api/models.py b/app/api/models.py
--- a/app/api/models.py
+++ b/app/api/models.py
@@ -150,7 +150,7 @@ class Label(models.Model):
('ctrl shift', 'ctrl shift')
)
SUFFIX_KEYS = tuple(
- (c, c) for c in string.ascii_lowercase
+ (c, c) for c in string.digits + string.ascii_lowercase
)
text = models.CharField(max_length=100)
| [Tiny enhancement request] Allow digit keys as shortkeys
Feature description
---------
English letters are allowed as shortkeys for annotation now, only.
Proposition: allow English letters and digits as shortkeys.
| OK. | 2020-03-10T11:37:35 |
|
doccano/doccano | 607 | doccano__doccano-607 | [
"589"
] | 37093680865a9f5f734986b2804bca1292563a89 | diff --git a/app/server/management/commands/create_roles.py b/app/server/management/commands/create_roles.py
--- a/app/server/management/commands/create_roles.py
+++ b/app/server/management/commands/create_roles.py
@@ -13,11 +13,13 @@ def handle(self, *args, **options):
except KeyError as key_error:
self.stderr.write(self.style.ERROR(f'Missing Key: "{key_error}"'))
for role_name in role_names:
+ if Role.objects.filter(name=role_name).exists():
+ continue
role = Role()
role.name = role_name
try:
role.save()
except DatabaseError as db_error:
- self.stderr.write(self.style.ERROR(f'Datbase Error: "{db_error}"'))
+ self.stderr.write(self.style.ERROR(f'Database Error: "{db_error}"'))
else:
self.stdout.write(self.style.SUCCESS(f'Role created successfully "{role_name}"'))
| [Enhancement request] Avoid duplicate key value error on launching
Enhancement description
---------
I have these errors in log on each start:
```
postgres_1 | 2020-03-04 05:34:30.467 UTC [27] ERROR: duplicate key value violates unique constraint "api_role_name_key"
postgres_1 | 2020-03-04 05:34:30.467 UTC [27] DETAIL: Key (name)=(project_admin) already exists.
postgres_1 | 2020-03-04 05:34:30.467 UTC [27] STATEMENT: INSERT INTO "api_role" ("name", "description", "created_at", "updated_at") VALUES ('project_admin', '', '2020-03-04T05:34:30.460290+00:00'::timestamptz, '2020-03-04T05:34:30.460312+00:00'::timestamptz) RETURNING "api_role"."id"
backend_1 | Datbase Error: "duplicate key value violates unique constraint "api_role_name_key"
backend_1 | DETAIL: Key (name)=(project_admin) already exists.
backend_1 | "
postgres_1 | 2020-03-04 05:34:30.468 UTC [27] ERROR: duplicate key value violates unique constraint "api_role_name_key"
postgres_1 | 2020-03-04 05:34:30.468 UTC [27] DETAIL: Key (name)=(annotator) already exists.
postgres_1 | 2020-03-04 05:34:30.468 UTC [27] STATEMENT: INSERT INTO "api_role" ("name", "description", "created_at", "updated_at") VALUES ('annotator', '', '2020-03-04T05:34:30.467909+00:00'::timestamptz, '2020-03-04T05:34:30.467926+00:00'::timestamptz) RETURNING "api_role"."id"
backend_1 | Datbase Error: "duplicate key value violates unique constraint "api_role_name_key"
backend_1 | DETAIL: Key (name)=(annotator) already exists.
backend_1 | "
postgres_1 | 2020-03-04 05:34:30.468 UTC [27] ERROR: duplicate key value violates unique constraint "api_role_name_key"
postgres_1 | 2020-03-04 05:34:30.468 UTC [27] DETAIL: Key (name)=(annotation_approver) already exists.
postgres_1 | 2020-03-04 05:34:30.468 UTC [27] STATEMENT: INSERT INTO "api_role" ("name", "description", "created_at", "updated_at") VALUES ('annotation_approver', '', '2020-03-04T05:34:30.468689+00:00'::timestamptz, '2020-03-04T05:34:30.468706+00:00'::timestamptz) RETURNING "api_role"."id"
backend_1 | Datbase Error: "duplicate key value violates unique constraint "api_role_name_key"
backend_1 | DETAIL: Key (name)=(annotation_approver) already exists.
backend_1 | "
postgres_1 | 2020-03-04 05:34:32.026 UTC [28] ERROR: duplicate key value violates unique constraint "auth_user_username_key"
postgres_1 | 2020-03-04 05:34:32.026 UTC [28] DETAIL: Key (username)=(admin) already exists.
postgres_1 | 2020-03-04 05:34:32.026 UTC [28] STATEMENT: INSERT INTO "auth_user" ("password", "last_login", "is_superuser", "username", "first_name", "last_name", "email", "is_staff", "is_active", "date_joined") VALUES ('<...>', NULL, true, 'admin', '', '', '[email protected]', true, true, '2020-03-04T05:34:32.023520+00:00'::timestamptz) RETURNING "auth_user"."id"
backend_1 | User admin already exists.
backend_1 | CommandError: Error: That username is already taken.
```
Propose to check existence of specified table's rows before creation to avoid these errors.
| Thank you for reporting the issue. Please provide the procedure to reproduce at first.
@icoxfog417 , just `$ /opt/docker-compose -f docker-compose.prod.yml up` twice. You'll see it the second time. Oh, yes: turn on `debug` first.
@icoxfog417 , about changing the title: not only that, see last lines.
```
postgres_1 | 2020-03-04 05:34:32.026 UTC [28] ERROR: duplicate key value violates unique constraint "auth_user_username_key"
postgres_1 | 2020-03-04 05:34:32.026 UTC [28] DETAIL: Key (username)=(admin) already exists.
postgres_1 | 2020-03-04 05:34:32.026 UTC [28] STATEMENT: INSERT INTO "auth_user" ("password", "last_login", "is_superuser", "username", "first_name", "last_name", "email", "is_staff", "is_active", "date_joined") VALUES ('<...>', NULL, true, 'admin', '', '', '[email protected]', true, true, '2020-03-04T05:34:32.023520+00:00'::timestamptz) RETURNING "auth_user"."id"
backend_1 | User admin already exists.
``` | 2020-03-11T07:15:36 |
|
doccano/doccano | 651 | doccano__doccano-651 | [
"455",
"591"
] | 7026e183d255e1c6f72ae4b233341db7175b59c0 | diff --git a/app/api/urls.py b/app/api/urls.py
--- a/app/api/urls.py
+++ b/app/api/urls.py
@@ -4,7 +4,7 @@
from .views import Me, Features, Users
from .views import ProjectList, ProjectDetail
-from .views import LabelList, LabelDetail, ApproveLabelsAPI
+from .views import LabelList, LabelDetail, ApproveLabelsAPI, LabelUploadAPI
from .views import DocumentList, DocumentDetail
from .views import AnnotationList, AnnotationDetail
from .views import TextUploadAPI, TextDownloadAPI, CloudUploadAPI
@@ -24,6 +24,8 @@
StatisticsAPI.as_view(), name='statistics'),
path('projects/<int:project_id>/labels',
LabelList.as_view(), name='label_list'),
+ path('projects/<int:project_id>/label-upload',
+ LabelUploadAPI.as_view(), name='label_upload'),
path('projects/<int:project_id>/labels/<int:label_id>',
LabelDetail.as_view(), name='label_detail'),
path('projects/<int:project_id>/docs',
diff --git a/app/api/views.py b/app/api/views.py
--- a/app/api/views.py
+++ b/app/api/views.py
@@ -1,5 +1,8 @@
+import json
from django.conf import settings
from django.contrib.auth.models import User
+from django.db import transaction
+from django.db.utils import IntegrityError
from django.shortcuts import get_object_or_404, redirect
from django_filters.rest_framework import DjangoFilterBackend
from django.db.models import Count, F, Q
@@ -366,3 +369,24 @@ class RoleMappingDetail(generics.RetrieveUpdateDestroyAPIView):
serializer_class = RoleMappingSerializer
lookup_url_kwarg = 'rolemapping_id'
permission_classes = [IsAuthenticated & IsProjectAdmin]
+
+
+class LabelUploadAPI(APIView):
+ parser_classes = (MultiPartParser,)
+ permission_classes = [IsAuthenticated & IsProjectAdmin]
+
+ @transaction.atomic
+ def post(self, request, *args, **kwargs):
+ if 'file' not in request.data:
+ raise ParseError('Empty content')
+ labels = json.load(request.data['file'])
+ project = get_object_or_404(Project, pk=kwargs['project_id'])
+ try:
+ for label in labels:
+ serializer = LabelSerializer(data=label)
+ serializer.is_valid(raise_exception=True)
+ serializer.save(project=project)
+ return Response(status=status.HTTP_201_CREATED)
+ except IntegrityError:
+ content = {'error': 'IntegrityError: you cannot create a label with same name or shortkey.'}
+ return Response(content, status=status.HTTP_400_BAD_REQUEST)
| diff --git a/app/api/tests/data/invalid_labels.json b/app/api/tests/data/invalid_labels.json
new file mode 100644
--- /dev/null
+++ b/app/api/tests/data/invalid_labels.json
@@ -0,0 +1,18 @@
+[
+ {
+ "id": 44,
+ "text": "Dog",
+ "prefix_key": null,
+ "suffix_key": "a",
+ "background_color": "#FF0000",
+ "text_color": "#ffffff"
+ },
+ {
+ "id": 45,
+ "text": "Dog",
+ "prefix_key": null,
+ "suffix_key": "c",
+ "background_color": "#FF0000",
+ "text_color": "#ffffff"
+ }
+]
\ No newline at end of file
diff --git a/app/api/tests/data/valid_labels.json b/app/api/tests/data/valid_labels.json
new file mode 100644
--- /dev/null
+++ b/app/api/tests/data/valid_labels.json
@@ -0,0 +1,18 @@
+[
+ {
+ "id": 44,
+ "text": "Dog",
+ "prefix_key": null,
+ "suffix_key": "a",
+ "background_color": "#FF0000",
+ "text_color": "#ffffff"
+ },
+ {
+ "id": 45,
+ "text": "Cat",
+ "prefix_key": null,
+ "suffix_key": "c",
+ "background_color": "#FF0000",
+ "text_color": "#ffffff"
+ }
+]
\ No newline at end of file
diff --git a/app/api/tests/test_api.py b/app/api/tests/test_api.py
--- a/app/api/tests/test_api.py
+++ b/app/api/tests/test_api.py
@@ -389,6 +389,53 @@ def doCleanups(cls):
remove_all_role_mappings()
+class TestLabelUploadAPI(APITestCase):
+
+ @classmethod
+ def setUpTestData(cls):
+ cls.project_member_name = 'project_member_name'
+ cls.project_member_pass = 'project_member_pass'
+ cls.non_project_member_name = 'non_project_member_name'
+ cls.non_project_member_pass = 'non_project_member_pass'
+ cls.super_user_name = 'super_user_name'
+ cls.super_user_pass = 'super_user_pass'
+ create_default_roles()
+ project_member = User.objects.create_user(username=cls.project_member_name,
+ password=cls.project_member_pass)
+ User.objects.create_user(username=cls.non_project_member_name, password=cls.non_project_member_pass)
+ project_admin = User.objects.create_user(username=cls.super_user_name,
+ password=cls.super_user_pass)
+ project = mommy.make('Project', users=[project_member, project_admin])
+ cls.url = reverse(viewname='label_upload', args=[project.id])
+ create_default_roles()
+ assign_user_to_role(project_member=project_admin, project=project, role_name=settings.ROLE_PROJECT_ADMIN)
+ assign_user_to_role(project_member=project_member, project=project, role_name=settings.ROLE_ANNOTATOR)
+
+ def help_to_upload_file(self, filename, expected_status):
+ with open(os.path.join(DATA_DIR, filename), 'rb') as f:
+ response = self.client.post(self.url, data={'file': f})
+ self.assertEqual(response.status_code, expected_status)
+
+ def test_allows_project_admin_to_upload_label(self):
+ self.client.login(username=self.super_user_name,
+ password=self.super_user_pass)
+ self.help_to_upload_file('valid_labels.json', status.HTTP_201_CREATED)
+
+ def test_disallows_project_member_to_upload_label(self):
+ self.client.login(username=self.project_member_name,
+ password=self.project_member_pass)
+ self.help_to_upload_file('valid_labels.json', status.HTTP_403_FORBIDDEN)
+
+ def test_try_to_upload_invalid_file(self):
+ self.client.login(username=self.super_user_name,
+ password=self.super_user_pass)
+ self.help_to_upload_file('invalid_labels.json', status.HTTP_400_BAD_REQUEST)
+
+ @classmethod
+ def doCleanups(cls):
+ remove_all_role_mappings()
+
+
class TestDocumentListAPI(APITestCase, TestUtilsMixin):
@classmethod
| Errors on import labels are not displayed in the UI
Description:
* Go to Labels section.
* Import Labels.
* Upload a file with some error.
* The modal disappears, no label is added but no error is displayed either. I can see server errors in the HTTP response with developer tools (in my case, `{"background_color":["Ensure this field has no more than 7 characters."]}`), but apparently there is no error displayed in the UI.
Environment:
* Operating System: Ubuntu Linux
* How did you install doccano: docker-compose up (from commit 29d187e22621ddcda21db50cb98598d0c44f3ae1)
* Browser: Firefox 70.0.1
Format for uploading labels
How to reproduce the behaviour
---------
What is the required format/structure for uploading the labels? When I click the import labels from the Actions menu, the file picker dialog suggests a json file. But the format/jso structure for the labels isn't documented.
Your Environment
---------
<!-- Include details of your environment. -->
* Operating System: Ubuntu (18.04)
* Python Version Used:3.7.4
* When you install doccano: 04/March/2020
* How did you install doccano (Heroku button etc): using the docker command
| 2020-03-30T01:01:28 |
|
doccano/doccano | 670 | doccano__doccano-670 | [
"513",
"513"
] | fda1e0c7429b7fa50c8eb4fb7392b7683dfc09b5 | diff --git a/app/api/utils.py b/app/api/utils.py
--- a/app/api/utils.py
+++ b/app/api/utils.py
@@ -222,6 +222,10 @@ class FileParser(object):
def parse(self, file):
raise NotImplementedError()
+ @staticmethod
+ def encode_metadata(data):
+ return json.dumps(data, ensure_ascii=False)
+
class CoNLLParser(FileParser):
"""Uploads CoNLL format file.
@@ -358,7 +362,7 @@ def parse_excel_csv_reader(reader):
elif len(row) == len(columns) and len(row) >= 2:
datum = dict(zip(columns, row))
text, label = datum.pop('text'), datum.pop('label')
- meta = json.dumps(datum)
+ meta = FileParser.encode_metadata(datum)
j = {'text': text, 'labels': [label], 'meta': meta}
data.append(j)
else:
@@ -379,7 +383,7 @@ def parse(self, file):
data = []
try:
j = json.loads(line)
- j['meta'] = json.dumps(j.get('meta', {}))
+ j['meta'] = FileParser.encode_metadata(j.get('meta', {}))
data.append(j)
except json.decoder.JSONDecodeError:
raise FileParseException(line_num=i, line=line)
| [Bug Report] Dataset page: non-ASCII metadata is displayed as Unicode-escaped sequence
Dataset page: non-ASCII metadata is displayed as Unicode-escaped sequence.

(UTF-8 metadata. Displaying at annotation page is correct.)
Doccano 1.0.2
[Bug Report] Dataset page: non-ASCII metadata is displayed as Unicode-escaped sequence
Dataset page: non-ASCII metadata is displayed as Unicode-escaped sequence.

(UTF-8 metadata. Displaying at annotation page is correct.)
Doccano 1.0.2
| @c-w where to fix this in your mind? I'll try.
@kuraga This looks to be a bug with the v1 UI with which I haven't yet familiarized myself (for my use-cases, I'm sticking with the v0 UI for now). @Hironsan can perhaps provide pointers for the v1 UI.
Hello @kuraga,
I can display non-ascii characters:

Could you please tell me an example of metadata to analyze the problem for me?
@Hironsan ,
dataset (CSV):
```
text,label,metadata_ключ
"a text","a_label","metadata_значение"
```
result:

Thank you @kuraga for the information.
It's really helpful.
# Analysis
The issue is caused by `json.dumps` function in `FileParser`:
https://github.com/doccano/doccano/blob/1745821b2e484dc35b79959473a68070c8b5e3ae/app/api/utils.py#L361
When we upload a file, `FileParser` parses it and changes metadata to JSON string by `json.dumps`. By default, it escapes Unicode strings due to `ensure_ascii=True` argument:
```python
>>> import json
>>> json.dumps('metadata_значение')
'"metadata_\\u0437\\u043d\\u0430\\u0447\\u0435\\u043d\\u0438\\u0435"'
```
# Quick-fix solution
Use `ensure_ascii=False`:
```python
>>> json.dumps('metadata_значение', ensure_ascii=False)
'"metadata_значение"'
```

@c-w where to fix this in your mind? I'll try.
@kuraga This looks to be a bug with the v1 UI with which I haven't yet familiarized myself (for my use-cases, I'm sticking with the v0 UI for now). @Hironsan can perhaps provide pointers for the v1 UI.
Hello @kuraga,
I can display non-ascii characters:

Could you please tell me an example of metadata to analyze the problem for me?
@Hironsan ,
dataset (CSV):
```
text,label,metadata_ключ
"a text","a_label","metadata_значение"
```
result:

Thank you @kuraga for the information.
It's really helpful.
# Analysis
The issue is caused by `json.dumps` function in `FileParser`:
https://github.com/doccano/doccano/blob/1745821b2e484dc35b79959473a68070c8b5e3ae/app/api/utils.py#L361
When we upload a file, `FileParser` parses it and changes metadata to JSON string by `json.dumps`. By default, it escapes Unicode strings due to `ensure_ascii=True` argument:
```python
>>> import json
>>> json.dumps('metadata_значение')
'"metadata_\\u0437\\u043d\\u0430\\u0447\\u0435\\u043d\\u0438\\u0435"'
```
# Quick-fix solution
Use `ensure_ascii=False`:
```python
>>> json.dumps('metadata_значение', ensure_ascii=False)
'"metadata_значение"'
```

| 2020-04-06T02:46:46 |
|
doccano/doccano | 675 | doccano__doccano-675 | [
"674"
] | 6743ac8a44cf0db8e609a0254e97861dbe9ecd22 | diff --git a/app/app/settings.py b/app/app/settings.py
--- a/app/app/settings.py
+++ b/app/app/settings.py
@@ -144,6 +144,8 @@
AUTHENTICATION_BACKENDS = [
'social_core.backends.github.GithubOAuth2',
'social_core.backends.azuread_tenant.AzureADTenantOAuth2',
+ 'social_core.backends.okta.OktaOAuth2',
+ 'social_core.backends.okta_openidconnect.OktaOpenIdConnect',
'django.contrib.auth.backends.ModelBackend',
]
@@ -173,6 +175,22 @@
SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_RESOURCE = 'https://graph.microsoft.com/'
SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_SCOPE = ['Directory.Read.All']
+SOCIAL_AUTH_OKTA_OAUTH2_KEY = env('OAUTH_OKTA_OAUTH2_KEY', None)
+SOCIAL_AUTH_OKTA_OAUTH2_SECRET = env('OAUTH_OKTA_OAUTH2_SECRET', None)
+SOCIAL_AUTH_OKTA_OAUTH2_API_URL = env('OAUTH_OKTA_OAUTH2_API_URL', None)
+OKTA_OAUTH2_ADMIN_GROUP_NAME = env('OKTA_OAUTH2_ADMIN_GROUP_NAME', None)
+
+if SOCIAL_AUTH_OKTA_OAUTH2_API_URL:
+ SOCIAL_AUTH_OKTA_OAUTH2_SCOPE = ["groups"]
+
+SOCIAL_AUTH_OKTA_OPENIDCONNECT_KEY = env('OAUTH_OKTA_OPENIDCONNECT_KEY', None)
+SOCIAL_AUTH_OKTA_OPENIDCONNECT_SECRET = env('OAUTH_OKTA_OPENIDCONNECT_SECRET', None)
+SOCIAL_AUTH_OKTA_OPENIDCONNECT_API_URL = env('OAUTH_OKTA_OPENIDCONNECT_API_URL', None)
+OKTA_OPENIDCONNECT_ADMIN_GROUP_NAME = env('OKTA_OPENIDCONNECT_ADMIN_GROUP_NAME', None)
+
+if SOCIAL_AUTH_OKTA_OPENIDCONNECT_API_URL:
+ SOCIAL_AUTH_OKTA_OPENIDCONNECT_SCOPE = ["groups"]
+
SOCIAL_AUTH_PIPELINE = [
'social_core.pipeline.social_auth.social_details',
'social_core.pipeline.social_auth.social_uid',
@@ -185,6 +203,8 @@
'social_core.pipeline.user.user_details',
'server.social_auth.fetch_github_permissions',
'server.social_auth.fetch_azuread_permissions',
+ 'server.social_auth.fetch_okta_oauth2_permissions',
+ 'server.social_auth.fetch_okta_openidconnect_permissions',
]
ROLE_PROJECT_ADMIN = env('ROLE_PROJECT_ADMIN', 'project_admin')
diff --git a/app/server/social_auth.py b/app/server/social_auth.py
--- a/app/server/social_auth.py
+++ b/app/server/social_auth.py
@@ -2,6 +2,8 @@
from django.conf import settings
from social_core.backends.azuread_tenant import AzureADTenantOAuth2
from social_core.backends.github import GithubOAuth2
+from social_core.backends.okta import OktaOAuth2
+from social_core.backends.okta_openidconnect import OktaOpenIdConnect
# noinspection PyUnusedLocal
@@ -68,3 +70,74 @@ def fetch_azuread_permissions(strategy, details, user=None, is_new=False, *args,
if user.is_superuser != is_superuser:
user.is_superuser = is_superuser
user.save()
+
+
+# noinspection PyUnusedLocal
+def fetch_okta_oauth2_permissions(strategy, details, user=None, is_new=False, *args, **kwargs):
+ org_url = getattr(settings, 'SOCIAL_AUTH_OKTA_OAUTH2_API_URL', '')
+ admin_group_name = getattr(settings, "OKTA_OAUTH2_ADMIN_GROUP_NAME", "")
+ if not user or not isinstance(kwargs['backend'], OktaOAuth2):
+ return
+
+ # OktaOpenIdConnect inherits `OktaOAuth2`, so we have to explicitly skip OAuth2 trying
+ # to fetch permissions when using OIDC backend.
+ if isinstance(kwargs['backend'], OktaOpenIdConnect):
+ return
+
+ response = requests.post(
+ url=f"{org_url}/v1/userinfo",
+ headers={
+ 'Authorization': 'Bearer {}'.format(kwargs['response']['access_token']),
+ },
+ )
+ response.raise_for_status()
+ response = response.json()
+
+ is_superuser = admin_group_name in response.get("groups", [])
+ is_staff = admin_group_name in response.get("groups", [])
+
+ user_changed = False
+
+ if user.is_superuser != is_superuser:
+ user.is_superuser = is_superuser
+ user_changed = user_changed or True
+
+ if user.is_staff != is_staff:
+ user.is_staff = is_staff
+ user_changed = user_changed or True
+
+ if user_changed:
+ user.save()
+
+
+# noinspection PyUnusedLocal
+def fetch_okta_openidconnect_permissions(strategy, details, user=None, is_new=False, *args, **kwargs):
+ org_url = getattr(settings, 'SOCIAL_AUTH_OKTA_OPENIDCONNECT_API_URL', '')
+ admin_group_name = getattr(settings, "OKTA_OPENIDCONNECT_ADMIN_GROUP_NAME", "")
+ if not user or not isinstance(kwargs['backend'], OktaOpenIdConnect):
+ return
+
+ response = requests.post(
+ url=f"{org_url}/v1/userinfo",
+ headers={
+ 'Authorization': 'Bearer {}'.format(kwargs['response']['access_token']),
+ },
+ )
+ response.raise_for_status()
+ response = response.json()
+
+ is_superuser = admin_group_name in response.get("groups", [])
+ is_staff = admin_group_name in response.get("groups", [])
+
+ user_changed = False
+
+ if user.is_superuser != is_superuser:
+ user.is_superuser = is_superuser
+ user_changed = user_changed or True
+
+ if user.is_staff != is_staff:
+ user.is_staff = is_staff
+ user_changed = user_changed or True
+
+ if user_changed:
+ user.save()
diff --git a/app/server/views.py b/app/server/views.py
--- a/app/server/views.py
+++ b/app/server/views.py
@@ -104,6 +104,8 @@ class LoginView(BaseLoginView):
extra_context = {
'github_login': bool(settings.SOCIAL_AUTH_GITHUB_KEY),
'aad_login': bool(settings.SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_TENANT_ID),
+ 'okta_oauth_login': bool(settings.SOCIAL_AUTH_OKTA_OAUTH2_KEY),
+ 'okta_openidconnect_login': bool(settings.SOCIAL_AUTH_OKTA_OPENIDCONNECT_KEY),
'allow_signup': bool(settings.ALLOW_SIGNUP),
}
| diff --git a/app/server/tests/cassettes/TestOktaOAuth2SocialAuth.test_fetch_permissions_is_admin.yaml b/app/server/tests/cassettes/TestOktaOAuth2SocialAuth.test_fetch_permissions_is_admin.yaml
new file mode 100644
--- /dev/null
+++ b/app/server/tests/cassettes/TestOktaOAuth2SocialAuth.test_fetch_permissions_is_admin.yaml
@@ -0,0 +1,38 @@
+interactions:
+- request:
+ body: ""
+ headers:
+ Accept:
+ - '*/*'
+ Accept-Encoding:
+ - gzip, deflate
+ Connection:
+ - keep-alive
+ Content-Length:
+ - '54'
+ Content-Type:
+ - application/json
+ User-Agent:
+ - python-requests/2.21.0
+ method: POST
+ uri: https://dev-000000.okta.com/oauth2/v1/userinfo
+ response:
+ body:
+ string: '{"sub":"agaga42hrey546","groups":["admin-group"]}'
+ headers:
+ Cache-Control:
+ - no-cache, no-store
+ Content-Type:
+ - application/json;charset=UTF-8
+ Date:
+ - Fri, 24 Apr 2020 02:54:39 GMT
+ Strict-Transport-Security:
+ - max-age=315360000
+ Transfer-Encoding:
+ - chunked
+ Vary:
+ - Accept-Encoding
+ status:
+ code: 200
+ message: OK
+version: 1
diff --git a/app/server/tests/cassettes/TestOktaOAuth2SocialAuth.test_fetch_permissions_not_admin.yaml b/app/server/tests/cassettes/TestOktaOAuth2SocialAuth.test_fetch_permissions_not_admin.yaml
new file mode 100644
--- /dev/null
+++ b/app/server/tests/cassettes/TestOktaOAuth2SocialAuth.test_fetch_permissions_not_admin.yaml
@@ -0,0 +1,38 @@
+interactions:
+- request:
+ body: ""
+ headers:
+ Accept:
+ - '*/*'
+ Accept-Encoding:
+ - gzip, deflate
+ Connection:
+ - keep-alive
+ Content-Length:
+ - '54'
+ Content-Type:
+ - application/json
+ User-Agent:
+ - python-requests/2.21.0
+ method: POST
+ uri: https://dev-000000.okta.com/oauth2/v1/userinfo
+ response:
+ body:
+ string: '{"sub":"agaga42hrey546","groups":["user-group"]}'
+ headers:
+ Cache-Control:
+ - no-cache, no-store
+ Content-Type:
+ - application/json;charset=UTF-8
+ Date:
+ - Fri, 24 Apr 2020 02:54:39 GMT
+ Strict-Transport-Security:
+ - max-age=315360000
+ Transfer-Encoding:
+ - chunked
+ Vary:
+ - Accept-Encoding
+ status:
+ code: 200
+ message: OK
+version: 1
diff --git a/app/server/tests/cassettes/TestOktaOpenIdConnectSocialAuth.test_fetch_permissions_is_admin.yaml b/app/server/tests/cassettes/TestOktaOpenIdConnectSocialAuth.test_fetch_permissions_is_admin.yaml
new file mode 100644
--- /dev/null
+++ b/app/server/tests/cassettes/TestOktaOpenIdConnectSocialAuth.test_fetch_permissions_is_admin.yaml
@@ -0,0 +1,38 @@
+interactions:
+- request:
+ body: ""
+ headers:
+ Accept:
+ - '*/*'
+ Accept-Encoding:
+ - gzip, deflate
+ Connection:
+ - keep-alive
+ Content-Length:
+ - '54'
+ Content-Type:
+ - application/json
+ User-Agent:
+ - python-requests/2.21.0
+ method: POST
+ uri: https://dev-000000.okta.com/oauth2/v1/userinfo
+ response:
+ body:
+ string: '{"sub":"agaga42hrey546","groups":["admin-group"]}'
+ headers:
+ Cache-Control:
+ - no-cache, no-store
+ Content-Type:
+ - application/json;charset=UTF-8
+ Date:
+ - Fri, 24 Apr 2020 02:54:39 GMT
+ Strict-Transport-Security:
+ - max-age=315360000
+ Transfer-Encoding:
+ - chunked
+ Vary:
+ - Accept-Encoding
+ status:
+ code: 200
+ message: OK
+version: 1
diff --git a/app/server/tests/cassettes/TestOktaOpenIdConnectSocialAuth.test_fetch_permissions_not_admin.yaml b/app/server/tests/cassettes/TestOktaOpenIdConnectSocialAuth.test_fetch_permissions_not_admin.yaml
new file mode 100644
--- /dev/null
+++ b/app/server/tests/cassettes/TestOktaOpenIdConnectSocialAuth.test_fetch_permissions_not_admin.yaml
@@ -0,0 +1,38 @@
+interactions:
+- request:
+ body: ""
+ headers:
+ Accept:
+ - '*/*'
+ Accept-Encoding:
+ - gzip, deflate
+ Connection:
+ - keep-alive
+ Content-Length:
+ - '54'
+ Content-Type:
+ - application/json
+ User-Agent:
+ - python-requests/2.21.0
+ method: POST
+ uri: https://dev-000000.okta.com/oauth2/v1/userinfo
+ response:
+ body:
+ string: '{"sub":"agaga42hrey546","groups":["user-group"]}'
+ headers:
+ Cache-Control:
+ - no-cache, no-store
+ Content-Type:
+ - application/json;charset=UTF-8
+ Date:
+ - Fri, 24 Apr 2020 02:54:39 GMT
+ Strict-Transport-Security:
+ - max-age=315360000
+ Transfer-Encoding:
+ - chunked
+ Vary:
+ - Accept-Encoding
+ status:
+ code: 200
+ message: OK
+version: 1
diff --git a/app/server/tests/test_social_auth.py b/app/server/tests/test_social_auth.py
--- a/app/server/tests/test_social_auth.py
+++ b/app/server/tests/test_social_auth.py
@@ -2,6 +2,8 @@
from django.test import TestCase, override_settings
from social_core.backends.azuread_tenant import AzureADTenantOAuth2
from social_core.backends.github import GithubOAuth2
+from social_core.backends.okta import OktaOAuth2
+from social_core.backends.okta_openidconnect import OktaOpenIdConnect
from vcr_unittest import VCRMixin
from .. import social_auth
@@ -93,3 +95,75 @@ def test_fetch_permissions_not_admin(self):
)
self.assertFalse(user.is_superuser)
+
+
+@override_settings(SOCIAL_AUTH_OKTA_OAUTH2_KEY='0000000000aaaaaaaaaa') # nosec
+@override_settings(SOCIAL_AUTH_OKTA_OAUTH2_SECRET='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb=') # nosec
+@override_settings(SOCIAL_AUTH_OKTA_OAUTH2_API_URL='https://dev-000000.okta.com/oauth2') # nosec
+@override_settings(OKTA_OAUTH2_ADMIN_GROUP_NAME='admin-group')
+class TestOktaOAuth2SocialAuth(VCRTestCase):
+ strategy = None
+ backend = OktaOAuth2(strategy=strategy)
+ access_token = 'censored'
+
+ def test_fetch_permissions_is_admin(self):
+ user = User()
+
+ social_auth.fetch_okta_oauth2_permissions(
+ strategy=self.strategy,
+ details={},
+ user=user,
+ backend=self.backend,
+ response={'access_token': self.access_token},
+ )
+
+ self.assertTrue(user.is_superuser)
+
+ def test_fetch_permissions_not_admin(self):
+ user = User()
+
+ social_auth.fetch_okta_oauth2_permissions(
+ strategy=self.strategy,
+ details={},
+ user=user,
+ backend=self.backend,
+ response={'access_token': self.access_token},
+ )
+
+ self.assertFalse(user.is_superuser)
+
+
+@override_settings(SOCIAL_AUTH_OKTA_OPENIDCONNECT_KEY='0000000000aaaaaaaaaa') # nosec
+@override_settings(SOCIAL_AUTH_OKTA_OPENIDCONNECT_SECRET='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb=') # nosec
+@override_settings(SOCIAL_AUTH_OKTA_OPENIDCONNECT_API_URL='https://dev-000000.okta.com/oauth2') # nosec
+@override_settings(OKTA_OPENIDCONNECT_ADMIN_GROUP_NAME='admin-group')
+class TestOktaOpenIdConnectSocialAuth(VCRTestCase):
+ strategy = None
+ backend = OktaOpenIdConnect(strategy=strategy)
+ access_token = 'censored'
+
+ def test_fetch_permissions_is_admin(self):
+ user = User()
+
+ social_auth.fetch_okta_openidconnect_permissions(
+ strategy=self.strategy,
+ details={},
+ user=user,
+ backend=self.backend,
+ response={'access_token': self.access_token},
+ )
+
+ self.assertTrue(user.is_superuser)
+
+ def test_fetch_permissions_not_admin(self):
+ user = User()
+
+ social_auth.fetch_okta_openidconnect_permissions(
+ strategy=self.strategy,
+ details={},
+ user=user,
+ backend=self.backend,
+ response={'access_token': self.access_token},
+ )
+
+ self.assertFalse(user.is_superuser)
| Add Okta Integration for OAuth
Feature description
---------
<!-- Please describe the feature: Which area of the library is it related to? What specific solution would you like? -->
Add Okta OpenID and OAuth support as other social login options.
| 2020-04-08T01:37:50 |
|
doccano/doccano | 693 | doccano__doccano-693 | [
"692"
] | ec889bf43a4c83be455876d57b5ddeedd0bd6a07 | diff --git a/app/app/settings.py b/app/app/settings.py
--- a/app/app/settings.py
+++ b/app/app/settings.py
@@ -314,4 +314,5 @@
CORS_ORIGIN_WHITELIST = (
'http://127.0.0.1:3000',
'http://0.0.0.0:3000',
+ 'http://localhost:3000'
)
| CORS error when running locally in development mode
How to reproduce the behaviour
---------
1. `git clone https://github.com/doccano/doccano.git`
2. `cd doccano`
3. `docker-compose -f docker-compose.dev.yml up`
4. Visit `http://localhost:3000/auth`
5. Login with user `admin` and password `password`
Your Environment
---------
* Operating System: macOS Catalina 10.15.3
* Browser: Chrome 80.0.3987.163 (Official Build) (64-bit)
What Happens
---------
I get a CORS error and I can't login:
```
Access to XMLHttpRequest at 'http://127.0.0.1:8000/v1/auth-token' from origin 'http://localhost:3000' has been blocked by CORS policy: Response to preflight request doesn't pass access control check: No 'Access-Control-Allow-Origin' header is present on the requested resource.
```

Here is what the Request Headers look like:

| 2020-04-12T22:15:36 |
||
doccano/doccano | 841 | doccano__doccano-841 | [
"663"
] | 676a37137b02ae0ad5f49cac54478dbb68191a32 | diff --git a/app/app/settings.py b/app/app/settings.py
--- a/app/app/settings.py
+++ b/app/app/settings.py
@@ -314,6 +314,7 @@
EMAIL_HOST_USER = env('EMAIL_HOST_USER', None)
EMAIL_HOST_PASSWORD = env('EMAIL_HOST_PASSWORD', None)
EMAIL_PORT = env.int('EMAIL_PORT', 587)
+DEFAULT_FROM_EMAIL = env('DEFAULT_FROM_EMAIL', 'webmaster@localhost')
if not EMAIL_HOST:
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
| Signup verification email not received
How to reproduce the behaviour
---------
I setup the project using AWS one-click deployment button. Everything works fine, but when a new user sign ups, email verification is not received. I believe I have to set up a email host configurations in `settings.py`. How do I set it up as the project has already been deployed? Is it in the `/env.list` file, or the AWS one-click automatically does this?
```
# necessary for email verification of new accounts
EMAIL_USE_TLS = env.bool('EMAIL_USE_TLS', False)
EMAIL_HOST = env('EMAIL_HOST', None)
EMAIL_HOST_USER = env('EMAIL_HOST_USER', None)
EMAIL_HOST_PASSWORD = env('EMAIL_HOST_PASSWORD', None)
EMAIL_PORT = env.int('EMAIL_PORT', 587)
```
Your Environment
---------
<!-- Include details of your environment. -->
* Operating System: AWS ubuntu
* Python Version Used: 3.6
* When you install doccano: Mar 30, 2020
* How did you install doccano (Heroku button etc): AWS one-click deployment
---------
Also, when I deployed the project using docker-compose by pulling the github project, the project looks older and not as in demo http://doccano.herokuapp.com/. Why is that? Am I missing something here?
| I am having the same issue when deploying doccano using the one-click button for Heroku.
Any advice would be greatly helpful.
@srbek-cmd I have figured out how to make email verification work. You have to add the Email host configurations in the `/env.list` file and rerun the doccano.
> Note: It will erase all the previous doccano containers and create a new one, so all your old data from doccano will be erased.
Eg:
/env.list
```
ADMIN=admin
[email protected]
PASSWORD=password
DEBUG=False
SECRET_KEY=your_secret_key
EMAIL_USE_TLS=True
EMAIL_HOST=smtp.gmail.com
[email protected]
EMAIL_HOST_PASSWORD=email_host_password
EMAIL_PORT=587
```
Then remove the docker containers and rebuild them using the new /env.list file.
```
sudo docker stop doccano
sudo docker rm doccano
sudo docker run -d --name doccano --env-file /env.list -p 80:8000 chakkiworks/doccano:latest
sudo docker exec doccano tools/create-admin.sh ${ADMIN} ${EMAIL} ${PASSWORD}
```
| 2020-06-13T21:35:43 |
|
doccano/doccano | 863 | doccano__doccano-863 | [
"859"
] | dbbf6d65b064a09381c70915b1ca9533c472a063 | diff --git a/app/api/admin.py b/app/api/admin.py
--- a/app/api/admin.py
+++ b/app/api/admin.py
@@ -9,13 +9,13 @@
class LabelAdmin(admin.ModelAdmin):
list_display = ('text', 'project', 'text_color', 'background_color')
ordering = ('project',)
- search_fields = ('project',)
+ search_fields = ('text',)
class DocumentAdmin(admin.ModelAdmin):
list_display = ('text', 'project', 'meta')
ordering = ('project',)
- search_fields = ('project',)
+ search_fields = ('text',)
class ProjectAdmin(admin.ModelAdmin):
@@ -27,19 +27,19 @@ class ProjectAdmin(admin.ModelAdmin):
class SequenceAnnotationAdmin(admin.ModelAdmin):
list_display = ('document', 'label', 'start_offset', 'user')
ordering = ('document',)
- search_fields = ('document',)
+ search_fields = ('document__text',)
class DocumentAnnotationAdmin(admin.ModelAdmin):
list_display = ('document', 'label', 'user')
ordering = ('document',)
- search_fields = ('document',)
+ search_fields = ('document__text',)
class Seq2seqAnnotationAdmin(admin.ModelAdmin):
list_display = ('document', 'text', 'user')
ordering = ('document',)
- search_fields = ('document',)
+ search_fields = ('document__text',)
class RoleAdmin(admin.ModelAdmin):
@@ -51,7 +51,7 @@ class RoleAdmin(admin.ModelAdmin):
class RoleMappingAdmin(admin.ModelAdmin):
list_display = ('user', 'role', 'project', )
ordering = ('user',)
- search_fields = ('user',)
+ search_fields = ('user__username',)
admin.site.register(DocumentAnnotation, DocumentAnnotationAdmin)
| [Bug report] Error on Django Admin search
How to reproduce the behaviour
---------

Press "Search".
```
backend_1 | Internal Server Error: /admin/api/document/
backend_1 | Traceback (most recent call last):
backend_1 | File "/src/venv/lib/python3.6/site-packages/django/core/handlers/exception.py", line 34, in inner
backend_1 | response = get_response(request)
backend_1 | File "/src/venv/lib/python3.6/site-packages/django/core/handlers/base.py", line 115, in _get_response
backend_1 | response = self.process_exception_by_middleware(e, request)
backend_1 | File "/src/venv/lib/python3.6/site-packages/django/core/handlers/base.py", line 113, in _get_response
backend_1 | response = wrapped_callback(request, *callback_args, **callback_kwargs)
backend_1 | File "/src/venv/lib/python3.6/site-packages/django/contrib/admin/options.py", line 606, in wrapper
backend_1 | return self.admin_site.admin_view(view)(*args, **kwargs)
backend_1 | File "/src/venv/lib/python3.6/site-packages/django/utils/decorators.py", line 142, in _wrapped_view
backend_1 | response = view_func(request, *args, **kwargs)
backend_1 | File "/src/venv/lib/python3.6/site-packages/django/views/decorators/cache.py", line 44, in _wrapped_view_func
backend_1 | response = view_func(request, *args, **kwargs)
backend_1 | File "/src/venv/lib/python3.6/site-packages/django/contrib/admin/sites.py", line 223, in inner
backend_1 | return view(request, *args, **kwargs)
backend_1 | File "/src/venv/lib/python3.6/site-packages/django/utils/decorators.py", line 45, in _wrapper
backend_1 | return bound_method(*args, **kwargs)
backend_1 | File "/src/venv/lib/python3.6/site-packages/django/utils/decorators.py", line 142, in _wrapped_view
backend_1 | response = view_func(request, *args, **kwargs)
backend_1 | File "/src/venv/lib/python3.6/site-packages/django/contrib/admin/options.py", line 1685, in changelist_view
backend_1 | cl = self.get_changelist_instance(request)
backend_1 | File "/src/venv/lib/python3.6/site-packages/django/contrib/admin/options.py", line 744, in get_changelist_instance
backend_1 | sortable_by,
backend_1 | File "/src/venv/lib/python3.6/site-packages/django/contrib/admin/views/main.py", line 81, in __init__
backend_1 | self.queryset = self.get_queryset(request)
backend_1 | File "/src/venv/lib/python3.6/site-packages/django/contrib/admin/views/main.py", line 439, in get_queryset
backend_1 | qs, search_use_distinct = self.model_admin.get_search_results(request, qs, self.query)
backend_1 | File "/src/venv/lib/python3.6/site-packages/django/contrib/admin/options.py", line 1023, in get_search_results
backend_1 | queryset = queryset.filter(reduce(operator.or_, or_queries))
backend_1 | File "/src/venv/lib/python3.6/site-packages/django/db/models/query.py", line 892, in filter
backend_1 | return self._filter_or_exclude(False, *args, **kwargs)
backend_1 | File "/src/venv/lib/python3.6/site-packages/django/db/models/query.py", line 910, in _filter_or_exclude
backend_1 | clone.query.add_q(Q(*args, **kwargs))
backend_1 | File "/src/venv/lib/python3.6/site-packages/django/db/models/sql/query.py", line 1290, in add_q
backend_1 | clause, _ = self._add_q(q_object, self.used_aliases)
backend_1 | File "/src/venv/lib/python3.6/site-packages/django/db/models/sql/query.py", line 1312, in _add_q
backend_1 | current_negated, allow_joins, split_subq, simple_col)
backend_1 | File "/src/venv/lib/python3.6/site-packages/django/db/models/sql/query.py", line 1318, in _add_q
backend_1 | split_subq=split_subq, simple_col=simple_col,
backend_1 | File "/src/venv/lib/python3.6/site-packages/django/db/models/sql/query.py", line 1251, in build_filter
backend_1 | condition = self.build_lookup(lookups, col, value)
backend_1 | File "/src/venv/lib/python3.6/site-packages/django/db/models/sql/query.py", line 1107, in build_lookup
backend_1 | raise FieldError('Related Field got invalid lookup: {}'.format(lookup_name))
backend_1 | django.core.exceptions.FieldError: Related Field got invalid lookup: icontains
backend_1 | [29/Jun/2020 12:25:49] "GET /admin/api/document/?q=request HTTP/1.1" 500 160618
```
Is this like https://stackoverflow.com/questions/11754877/troubleshooting-related-field-has-invalid-lookup-icontains?
Your Environment
---------
<!-- Include details of your environment.-->
* Operating System: Calculate Linux 20.6
* Python Version Used: system Python version is 3.7.7
* When you install doccano: 4927a01f090b91d8e14e467f2fd40d8301612e72
* How did you install doccano (Heroku button etc): Docker Compose
| Thank you. The link is very helpful. | 2020-07-01T09:34:19 |
|
doccano/doccano | 916 | doccano__doccano-916 | [
"531"
] | 6743ac8a44cf0db8e609a0254e97861dbe9ecd22 | diff --git a/app/api/views.py b/app/api/views.py
--- a/app/api/views.py
+++ b/app/api/views.py
@@ -235,9 +235,16 @@ def check_single_class_classification(project_id, doc_id, user):
class AnnotationDetail(generics.RetrieveUpdateDestroyAPIView):
lookup_url_kwarg = 'annotation_id'
- permission_classes = [IsAuthenticated & (((IsAnnotator & IsOwnAnnotation) | IsAnnotationApprover) | IsProjectAdmin)]
swagger_schema = None
+ def get_permissions(self):
+ project = get_object_or_404(Project, pk=self.kwargs['project_id'])
+ if project.collaborative_annotation:
+ self.permission_classes = [IsAuthenticated & IsInProjectOrAdmin]
+ else:
+ self.permission_classes = [IsAuthenticated & IsInProjectOrAdmin & IsOwnAnnotation]
+ return super().get_permissions()
+
def get_serializer_class(self):
project = get_object_or_404(Project, pk=self.kwargs['project_id'])
self.serializer_class = project.get_annotation_serializer()
| diff --git a/app/api/tests/test_api.py b/app/api/tests/test_api.py
--- a/app/api/tests/test_api.py
+++ b/app/api/tests/test_api.py
@@ -880,9 +880,11 @@ def setUpTestData(cls):
another_entity = mommy.make('SequenceAnnotation',
document=main_project_doc, user=another_project_member)
- sub_project = mommy.make('SequenceLabelingProject', users=[non_project_member])
- sub_project_doc = mommy.make('Document', project=sub_project)
- mommy.make('SequenceAnnotation', document=sub_project_doc)
+ shared_project = mommy.make('SequenceLabelingProject',
+ collaborative_annotation=True,
+ users=[project_member, another_project_member])
+ shared_project_doc = mommy.make('Document', project=shared_project)
+ shared_entity = mommy.make('SequenceAnnotation', document=shared_project_doc, user=another_project_member)
cls.url = reverse(viewname='annotation_detail', args=[main_project.id,
main_project_doc.id,
@@ -890,9 +892,12 @@ def setUpTestData(cls):
cls.another_url = reverse(viewname='annotation_detail', args=[main_project.id,
main_project_doc.id,
another_entity.id])
+ cls.shared_url = reverse(viewname='annotation_detail', args=[shared_project.id,
+ shared_project_doc.id,
+ shared_entity.id])
cls.post_data = {'start_offset': 0, 'end_offset': 10}
- assign_user_to_role(project_member=project_member, project=main_project,
- role_name=settings.ROLE_ANNOTATOR)
+ assign_user_to_role(project_member=project_member, project=main_project, role_name=settings.ROLE_ANNOTATOR)
+ assign_user_to_role(project_member=project_member, project=shared_project, role_name=settings.ROLE_ANNOTATOR)
def test_returns_annotation_to_project_member(self):
self.client.login(username=self.project_member_name,
@@ -954,6 +959,18 @@ def test_disallows_project_member_to_delete_annotation_of_another_member(self):
response = self.client.delete(self.another_url, format='json', data=self.post_data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
+ def test_allow_member_to_update_others_annotation_in_shared_project(self):
+ self.client.login(username=self.project_member_name,
+ password=self.project_member_pass)
+ response = self.client.patch(self.shared_url, format='json', data=self.post_data)
+ self.assertEqual(response.status_code, status.HTTP_200_OK)
+
+ def test_allow_member_to_delete_others_annotation_in_shared_project(self):
+ self.client.login(username=self.project_member_name,
+ password=self.project_member_pass)
+ response = self.client.delete(self.shared_url, format='json')
+ self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
+
@classmethod
def doCleanups(cls):
remove_all_role_mappings()
| Allow user to change another users' annotation
How to reproduce the behaviour
---------
<!-- Before submitting an issue, make sure to check the docs and closed issues and FAQ to see if any of the solutions work for you. https://github.com/chakki-works/doccano/wiki/Frequently-Asked-Questions -->
I have added the permission to allow users change the annotations.
But I cannot find where to allow users to change other people's annotations.
This is especially important because I've already had some annotations. I need multiple people to correct these annotations.
<!--
Include a code example or the steps that led to the problem. Please try to be as specific as possible. -->
Your Environment
---------
<!-- Include details of your environment. -->
* Operating System: linux
* Python Version Used: python 3.6
* When you install doccano: Oct 2019
* How did you install doccano (Heroku button etc):
| Did you try Share annotations across all users"?

@icoxfog417 yes, i've set this option when creating the project.
#519 ?
@Slyne Thank you for reporting, please help me to clarify your problem.
* You checked "Share annotations across all users" then ...
* You can't "watch" any annotations by other users?
* You can't "edit" any annotations by other users?
* You can't "control" the role of other annotators for editing/watching?
Yes, I checked "share annotations across all users" and also set collaborative annotation in admin page.
I can watch annotations by other users.
I can't edit any annotations by other users.
I'm not sure where to control the role. I give all the permissions to all users on the admin page but it still doesn't work.
@Slyne Thank you for the clarification. We arrange the relation between the feature to share the annotation and the feature to control the authority to it (read/write).
| | read | write |
|---------|------------------------------------------|-------------|
| private | (default) | (default) |
| share | "Share annotations across all users" now | **missing** |
I think this should be fixed with my implementation of #703 if it gets approved. After that annotation approvers can edit/remove other annotations that were made by annotators.
I guess annotator should be able to edit other annotators tags only if document was not approved.
To make my mind clear:
- If `share annotation` is not checked:
- all the roles can't view other's annotations.
- If `share annotation` is checked:
- all the roles can view other's annotations.
- `project_admin` and `annotation_approver` can edit/delete other's annotations.
- `annotator` cannot edit/delete other's annotations.
Ideal situation:
- If `share annotation` is not checked, all the roles can view/edit/delete only their own annotations.
- If `share annotation` is checked, all the roles can view/edit/delete other's annotations.
- (I don't think `annotation approver` is allowed to annotate. we need to design roles and their permissions more clearly.)
Solution:
- Make `permission_classes` in `AnnotationDetail` dependent on `share annotation` option.
- If `share annotation` is not checked, `permission_classes` has `[IsAuthenticated & IsProjectAdmin & IsOwnAnnotation]`.
- If `share annotation` is checked, `permission_classes` has `[IsAuthenticated & IsInProjectOrAdmin]`.
The way:
- overwrite `get_permissions` method.
```
class AnnotationDetail(generics.RetrieveUpdateDestroyAPIView):
lookup_url_kwarg = 'annotation_id'
# permission_classes = [IsAuthenticated & IsInProjectOrAdmin]
swagger_schema = None
def get_permissions(self):
project = get_object_or_404(Project, pk=self.kwargs['project_id'])
if project.collaborative_annotation:
self.permission_classes = [IsAuthenticated & IsInProjectOrAdmin]
else:
self.permission_classes = [IsAuthenticated & IsInProjectOrAdmin & IsOwnAnnotation]
return super().get_permissions()
``` | 2020-07-21T12:20:26 |
doccano/doccano | 918 | doccano__doccano-918 | [
"481"
] | 35cc8b303b52646f9055f5fd07a54db0ec79f658 | diff --git a/app/api/serializers.py b/app/api/serializers.py
--- a/app/api/serializers.py
+++ b/app/api/serializers.py
@@ -84,6 +84,13 @@ class Meta:
fields = ('id', 'text', 'annotations', 'meta', 'annotation_approver')
+class ApproverSerializer(DocumentSerializer):
+
+ class Meta:
+ model = Document
+ fields = ('id', 'annotation_approver')
+
+
class ProjectSerializer(serializers.ModelSerializer):
current_users_role = serializers.SerializerMethodField()
diff --git a/app/api/views.py b/app/api/views.py
--- a/app/api/views.py
+++ b/app/api/views.py
@@ -20,7 +20,7 @@
from .filters import DocumentFilter
from .models import Project, Label, Document, RoleMapping, Role
from .permissions import IsProjectAdmin, IsAnnotatorAndReadOnly, IsAnnotator, IsAnnotationApproverAndReadOnly, IsOwnAnnotation, IsAnnotationApprover
-from .serializers import ProjectSerializer, LabelSerializer, DocumentSerializer, UserSerializer
+from .serializers import ProjectSerializer, LabelSerializer, DocumentSerializer, UserSerializer, ApproverSerializer
from .serializers import ProjectPolymorphicSerializer, RoleMappingSerializer, RoleSerializer
from .utils import CSVParser, ExcelParser, JSONParser, PlainTextParser, CoNLLParser, AudioParser, iterable_to_io
from .utils import JSONLRenderer
@@ -133,7 +133,7 @@ def post(self, request, *args, **kwargs):
document = get_object_or_404(Document, pk=self.kwargs['doc_id'])
document.annotations_approved_by = self.request.user if approved else None
document.save()
- return Response(DocumentSerializer(document).data)
+ return Response(ApproverSerializer(document).data)
class LabelList(generics.ListCreateAPIView):
| Can't change from "Checked" to "Not Checked"?
I have annotate following sentence in NER that annotate by others already.
Here is the sentence:

I clicked the top left button to see others annotation:

However, when I don't want to see others annotation, I clicked the top left button and it dosen't recover:

| 2020-07-22T01:45:09 |
||
doccano/doccano | 964 | doccano__doccano-964 | [
"963"
] | 4e25f169462a2d0413679beeb58c95b532616d14 | diff --git a/app/api/serializers.py b/app/api/serializers.py
--- a/app/api/serializers.py
+++ b/app/api/serializers.py
@@ -177,7 +177,7 @@ class DocumentAnnotationSerializer(serializers.ModelSerializer):
class Meta:
model = DocumentAnnotation
- fields = ('id', 'prob', 'label', 'user', 'document')
+ fields = ('id', 'prob', 'label', 'user', 'document', 'created_at', 'updated_at')
read_only_fields = ('user', )
@@ -188,7 +188,7 @@ class SequenceAnnotationSerializer(serializers.ModelSerializer):
class Meta:
model = SequenceAnnotation
- fields = ('id', 'prob', 'label', 'start_offset', 'end_offset', 'user', 'document')
+ fields = ('id', 'prob', 'label', 'start_offset', 'end_offset', 'user', 'document', 'created_at', 'updated_at')
read_only_fields = ('user',)
@@ -197,7 +197,7 @@ class Seq2seqAnnotationSerializer(serializers.ModelSerializer):
class Meta:
model = Seq2seqAnnotation
- fields = ('id', 'text', 'user', 'document', 'prob')
+ fields = ('id', 'text', 'user', 'document', 'prob', 'created_at', 'updated_at')
read_only_fields = ('user',)
@@ -206,7 +206,7 @@ class Speech2textAnnotationSerializer(serializers.ModelSerializer):
class Meta:
model = Speech2textAnnotation
- fields = ('id', 'prob', 'text', 'user', 'document')
+ fields = ('id', 'prob', 'text', 'user', 'document', 'created_at', 'updated_at')
read_only_fields = ('user',)
| Expose timestamps associated with annotations with the API
The `..Annotation` models currently stores the `created_at` and `updated_at` fields but they are not exposed by the API.
I'd like to propose exposing them through the API so that downstream analysis can be conducted using simple API calls, like those made using [`doccano-client`](https://github.com/doccano/doccano-client), for instance.
| 2020-08-27T06:13:01 |
||
doccano/doccano | 1,072 | doccano__doccano-1072 | [
"1026"
] | 1adf8644b833ad16ff486341a497c5de4a9d1874 | diff --git a/app/api/views.py b/app/api/views.py
--- a/app/api/views.py
+++ b/app/api/views.py
@@ -1,5 +1,7 @@
import collections
import json
+import random
+
from django.conf import settings
from django.contrib.auth.models import User
from django.db import transaction
@@ -106,7 +108,6 @@ def _get_user_completion_data(annotation_class, annotation_filter):
set_user_data[ind_obj['user__username']].add(ind_obj['document__id'])
return {i: len(set_user_data[i]) for i in set_user_data}
-
def progress(self, project):
docs = project.documents
annotation_class = project.get_annotation_class()
@@ -171,7 +172,9 @@ def get_queryset(self):
queryset = project.documents
if project.randomize_document_order:
- queryset = queryset.annotate(sort_id=F('id') % self.request.user.id).order_by('sort_id')
+ random.seed(self.request.user.id)
+ value = random.randrange(2, 20)
+ queryset = queryset.annotate(sort_id=F('id') % value).order_by('sort_id', 'id')
else:
queryset = queryset.order_by('id')
| Duplication of the data occurs at the bottom of paging
How to reproduce the behaviour
---------
The data at bottom of every page is same, that also make reduplicate document when doing annotation.



Your Environment
---------
* Operating System: ubuntu 16
* Python Version Used: python3.6
* When you install doccano: 2020-10-26
* How did you install doccano (Heroku button etc): github
| Issue-Label Bot is automatically applying the label `bug` to this issue, with a confidence of 0.85. Please mark this comment with :thumbsup: or :thumbsdown: to give our bot feedback!
Links: [app homepage](https://github.com/marketplace/issue-label-bot), [dashboard](https://mlbot.net/data/doccano/doccano) and [code](https://github.com/hamelsmu/MLapp) for this bot.
@buriy @ramok @vinayaugustine @clarus
Same issue. Besides, this problem only occurs in sequence labeling and randomize document order. I guess this bug is caused by a wrong randomization algorithm. Hope that will help.
related #933 | 2020-11-26T09:29:47 |
|
doccano/doccano | 1,088 | doccano__doccano-1088 | [
"786"
] | 6e76b9586b9c95f27e9432a456f4213d2febef53 | diff --git a/app/app/settings.py b/app/app/settings.py
--- a/app/app/settings.py
+++ b/app/app/settings.py
@@ -92,7 +92,7 @@
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
- 'DIRS': [path.join(BASE_DIR, 'server/templates'), path.join(BASE_DIR, 'authentification/templates')],
+ 'DIRS': [path.join(BASE_DIR, 'client/dist')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
@@ -118,12 +118,7 @@
STATIC_ROOT = path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = [
- static_path
- for static_path in (
- path.join(BASE_DIR, 'server', 'static', 'assets'),
- path.join(BASE_DIR, 'server', 'static', 'static'),
- )
- if path.isdir(static_path)
+ path.join(BASE_DIR, 'client/dist/static'),
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
diff --git a/app/app/urls.py b/app/app/urls.py
--- a/app/app/urls.py
+++ b/app/app/urls.py
@@ -15,12 +15,11 @@
"""
from django.conf import settings
from django.contrib import admin
-from django.urls import path, include
-from django.contrib.auth.views import PasswordResetView, LogoutView
+from django.urls import path, include, re_path
+from django.contrib.auth.views import TemplateView
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
-from server.views import LoginView
# TODO: adds AnnotationList and AnnotationDetail endpoint.
schema_view = get_schema_view(
@@ -34,16 +33,12 @@
)
urlpatterns = [
- path('', include('authentification.urls')),
- path('', include('server.urls')),
path('admin/', admin.site.urls),
path('social/', include('social_django.urls')),
- path('login/', LoginView.as_view(), name='login'),
- path('logout/', LogoutView.as_view(), name='logout'),
- path('password_reset/', PasswordResetView.as_view(), name='password_reset'),
path('api-auth/', include('rest_framework.urls')),
path('v1/', include('api.urls')),
path('swagger/', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
+ re_path('', TemplateView.as_view(template_name='index.html')),
]
if 'cloud_browser' in settings.INSTALLED_APPS:
| [Question] Docker deployment is obsolete in doccano 1.x.x?
Docker part [has been removed]( https://github.com/doccano/doccano/commit/1be0986c7a9faa5cf24d5d91900bb0d1058ba49f#diff-04c6e90faac2675aa89e2176d2eec7d8R64) from [ReadMe doocumentation](https://github.com/doccano/doccano/blob/master/README.md) some time ago.
In contrast, information about Docker deployment still stays in the [Getting Started documentation](https://github.com/doccano/doccano/blob/master/docs/getting-started.md#docker). (BTW, these documents are very different now, see #785).
Is Docker deployment is obsolete in doccano 1.x.x?
Also, we have https://hub.docker.com/u/doccano docker hub repository without mention in documentation.
| 2020-12-01T12:57:33 |
||
doccano/doccano | 1,100 | doccano__doccano-1100 | [
"1095"
] | 6be605289005ec715f9c9e54d91ca268f8641aa2 | diff --git a/app/app/settings.py b/app/app/settings.py
--- a/app/app/settings.py
+++ b/app/app/settings.py
@@ -10,9 +10,10 @@
Any setting that is configured via an environment variable may
also be set in a `.env` file in the project base directory.
"""
+import importlib.util
+import sys
from os import path
-import django_heroku
import dj_database_url
from environs import Env
from furl import furl
@@ -53,13 +54,13 @@
'django.contrib.staticfiles',
'server.apps.ServerConfig',
'api.apps.ApiConfig',
- 'widget_tweaks',
+ # 'widget_tweaks',
'rest_framework',
'rest_framework.authtoken',
'django_filters',
'social_django',
'polymorphic',
- 'webpack_loader',
+ # 'webpack_loader',
'corsheaders',
'drf_yasg'
]
@@ -83,7 +84,7 @@
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'social_django.middleware.SocialAuthExceptionMiddleware',
- 'applicationinsights.django.ApplicationInsightsMiddleware',
+ # 'applicationinsights.django.ApplicationInsightsMiddleware',
'corsheaders.middleware.CorsMiddleware',
]
@@ -277,7 +278,13 @@
LOGIN_REDIRECT_URL = '/projects/'
LOGOUT_REDIRECT_URL = '/'
-django_heroku.settings(locals(), test_runner=False)
+# dynamic import to avoid installing psycopg2 on pip installation.
+name = 'django_heroku'
+if (spec := importlib.util.find_spec(name)) is not None:
+ module = importlib.util.module_from_spec(spec)
+ sys.modules[name] = module
+ spec.loader.exec_module(module)
+ module.settings(locals(), test_runner=False)
# Change 'default' database configuration with $DATABASE_URL.
DATABASES['default'].update(dj_database_url.config(
@@ -309,7 +316,7 @@
CSRF_TRUSTED_ORIGINS = env.list('CSRF_TRUSTED_ORIGINS', [])
# Allow all host headers
-# ALLOWED_HOSTS = ['*']
+ALLOWED_HOSTS = ['*']
# Size of the batch for creating documents
# on the import phase
diff --git a/app/doccano/doccano.py b/app/doccano/doccano.py
--- a/app/doccano/doccano.py
+++ b/app/doccano/doccano.py
@@ -4,13 +4,13 @@
def main():
- parser = argparse.ArgumentParser(description='doccano.')
+ parser = argparse.ArgumentParser(description='doccano, text annotation for machine learning practitioners.')
parser.add_argument('--username', type=str, default='admin', help='admin username')
parser.add_argument('--password', type=str, default='password', help='admin password')
parser.add_argument('--email', type=str, default='[email protected]', help='admin email')
- parser.add_argument('--port', type=int, default=8000, help='port')
- parser.add_argument('--workers', type=int, default=1, help='workers')
- parser.add_argument('--database_url', type=str, default='sqlite:///doccano.db', help='data store')
+ parser.add_argument('--port', type=int, default=8000, help='port number')
+ # parser.add_argument('--workers', type=int, default=1, help='the number of workers')
+ parser.add_argument('--database_url', type=str, default='sqlite:///doccano.db', help='the database URL')
args = parser.parse_args()
os.environ.setdefault('DEBUG', 'False')
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -6,7 +6,7 @@
from setuptools import find_packages, setup
NAME = 'doccano'
-DESCRIPTION = 'doccano'
+DESCRIPTION = 'doccano, text annotation tool for machine learning practitioners'
URL = 'https://github.com/doccano/doccano'
EMAIL = '[email protected]'
AUTHOR = 'Hironsan'
@@ -16,8 +16,9 @@
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
-# required = ['requests', 'boto3', 'pydantic', 'jinja2']
-required = [line.rstrip() for line in io.open(os.path.join(here, 'app/requirements.txt')) if not line.startswith('psy')]
+# Todo: make a cleaned requirements.txt
+required = [line.rstrip() for line in io.open(os.path.join(here, 'app/requirements.txt'))
+ if not line.startswith(('psycopg2', 'django-heroku', 'django-mssql-backend'))]
setup(
name=NAME,
@@ -36,6 +37,10 @@
]
},
install_requires=required,
+ extras_require={
+ 'postgresql': ['psycopg2-binary>=2.8.6'],
+ 'mssql': ['django-mssql-backend>=2.8.1'],
+ },
include_package_data=True,
license=LICENSE,
classifiers=[
| Error: pg_config executable not found on pip installation
How to reproduce the behaviour
---------
If we install doccano by pip without PostgresQL, the installation failed:
```bash
ERROR: Command errored out with exit status 1:
command: /usr/local/bin/python -c 'import sys, setuptools, tokenize; sys.argv[0] = '"'"'/tmp/pip-install-ah3c9rb8/psycopg2_417f887249d841b688fd71f98d0c1c9d/setup.py'"'"'; __file__='"'"'/tmp/pip-install-ah3c9rb8/psycopg2_417f887249d841b688fd71f98d0c1c9d/setup.py'"'"';f=getattr(tokenize, '"'"'open'"'"', open)(__file__);code=f.read().replace('"'"'\r\n'"'"', '"'"'\n'"'"');f.close();exec(compile(code, __file__, '"'"'exec'"'"'))' egg_info --egg-base /tmp/pip-pip-egg-info-rx5i396x
cwd: /tmp/pip-install-ah3c9rb8/psycopg2_417f887249d841b688fd71f98d0c1c9d/
Complete output (23 lines):
running egg_info
creating /tmp/pip-pip-egg-info-rx5i396x/psycopg2.egg-info
writing /tmp/pip-pip-egg-info-rx5i396x/psycopg2.egg-info/PKG-INFO
writing dependency_links to /tmp/pip-pip-egg-info-rx5i396x/psycopg2.egg-info/dependency_links.txt
writing top-level names to /tmp/pip-pip-egg-info-rx5i396x/psycopg2.egg-info/top_level.txt
writing manifest file '/tmp/pip-pip-egg-info-rx5i396x/psycopg2.egg-info/SOURCES.txt'
Error: pg_config executable not found.
pg_config is required to build psycopg2 from source. Please add the directory
containing pg_config to the $PATH or specify the full executable path with the
option:
python setup.py build_ext --pg-config /path/to/pg_config build ...
or with the pg_config option in 'setup.cfg'.
If you prefer to avoid building psycopg2 from source, please install the PyPI
'psycopg2-binary' package instead.
For further information please check the 'doc/src/install.rst' file (also at
<https://www.psycopg.org/docs/install.html>).
----------------------------------------
ERROR: Command errored out with exit status 1: python setup.py egg_info Check the logs for full command output.
```
Your Environment
---------
<!-- Include details of your environment.-->
* Operating System: Debian Buster
* Python Version Used: 3.8.6
* When you install doccano: Now
* How did you install doccano (Heroku button etc): pip
| This is due to `django-heroku`:
```bash
django-heroku==0.3.1
- dj-database-url [required: >=0.5.0, installed: 0.5.0]
- django [required: Any, installed: 3.1.4]
- asgiref [required: >=3.2.10,<4, installed: 3.3.1]
- pytz [required: Any, installed: 2020.4]
- sqlparse [required: >=0.2.2, installed: 0.4.1]
- psycopg2 [required: Any, installed: 2.8.6]
- whitenoise [required: Any, installed: 5.2.0]
``` | 2020-12-08T05:43:55 |
|
doccano/doccano | 1,108 | doccano__doccano-1108 | [
"1107"
] | 69f8bce9d4fb7f147e0098a8984b5264b656ef8e | diff --git a/app/app/settings.py b/app/app/settings.py
--- a/app/app/settings.py
+++ b/app/app/settings.py
@@ -280,7 +280,8 @@
# dynamic import to avoid installing psycopg2 on pip installation.
name = 'django_heroku'
-if (spec := importlib.util.find_spec(name)) is not None:
+spec = importlib.util.find_spec(name)
+if spec is not None:
module = importlib.util.module_from_spec(spec)
sys.modules[name] = module
spec.loader.exec_module(module)
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -16,9 +16,26 @@
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
-# Todo: make a cleaned requirements.txt
-required = [line.rstrip() for line in io.open(os.path.join(here, 'app/requirements.txt'))
- if not line.startswith(('psycopg2', 'django-heroku', 'django-mssql-backend'))]
+required = [
+ 'apache-libcloud>=3.2.0',
+ 'colour>=0.1.5',
+ 'conllu>=4.2.2',
+ 'dj-database-url>=0.5.0',
+ 'django-cors-headers>=3.5.0',
+ 'django-filter>=2.4.0',
+ 'django-rest-polymorphic>=0.1.9',
+ 'djangorestframework-csv>=2.1.0',
+ 'djangorestframework-xml>=2.0.0',
+ 'drf-yasg>=1.20.0',
+ 'environs>=9.2.0',
+ 'furl>=2.1.0',
+ 'pyexcel>=0.6.6',
+ 'pyexcel-xlsx>=0.6.0',
+ 'python-jose>=3.2.0',
+ 'seqeval>=1.2.2',
+ 'social-auth-app-django>=4.0.0',
+ 'whitenoise>=5.2.0'
+]
setup(
name=NAME,
| Invalid syntax error when running the newly installed doccano
### System information
- **OS Platform and Distribution**: Fedora 32
- **Python version**: Python 3.8.6
### The problem
When opening using the docker-compose, the backend shows an error code about invalid syntax:
```
File "/src/app/app/settings.py", line 283
if (spec := importlib.util.find_spec(name)) is not None:
^
SyntaxError: invalid syntax
```
this results in `doccano_backend_1 exited with code 1`
I also tried downloading it through pip in my windows pc, and it had the same result.
When using the prod file on docker-compose, it also shows the following error
```
nginx_1 | 2020/12/10 20:27:46 [emerg] 1#1: host not found in upstream "backend" in /etc/nginx/conf.d/nginx.conf:15
nginx_1 | nginx: [emerg] host not found in upstream "backend" in /etc/nginx/conf.d/nginx.conf:15
doccano_nginx_1 exited with code 1
```
It's my first time using doccano and I may did something wrong, but I'm certain i followed the instructions correctly.
Thank you for the help in advance!
| 2020-12-10T20:45:07 |
||
doccano/doccano | 1,115 | doccano__doccano-1115 | [
"523"
] | 6440e1b8f8bfdb8f07971aeed721c07fff8130ee | diff --git a/app/api/views.py b/app/api/views.py
--- a/app/api/views.py
+++ b/app/api/views.py
@@ -184,6 +184,12 @@ def perform_create(self, serializer):
project = get_object_or_404(Project, pk=self.kwargs['project_id'])
serializer.save(project=project)
+ def delete(self, request, *args, **kwargs):
+ project = get_object_or_404(Project, pk=self.kwargs['project_id'])
+ queryset = project.documents
+ queryset.all().delete()
+ return Response(status=status.HTTP_204_NO_CONTENT)
+
class DocumentDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Document.objects.all()
@@ -221,6 +227,11 @@ def create(self, request, *args, **kwargs):
def perform_create(self, serializer):
serializer.save(document_id=self.kwargs['doc_id'], user=self.request.user)
+ def delete(self, request, *args, **kwargs):
+ queryset = self.get_queryset()
+ queryset.all().delete()
+ return Response(status=status.HTTP_204_NO_CONTENT)
+
@staticmethod
def check_single_class_classification(project_id, doc_id, user):
project = get_object_or_404(Project, pk=project_id)
| Feature Request: Bulk Delete
I want to clear all data but don't want delet the project. So i use the function in Dataset, i find something wrong.
1、Sometimes it doesn't show the remaining data unless i refresh the project. I want to delete 100 pieces of data at a time, so I have to choose 100 again. It is inconvenient.

2、So I hope there is a button to clear all the data.
| It also would be nice to separate the imported datasets with labels. Then just one dataset could be deleted
+1 defo want this feat! thanks! 🤩 | 2020-12-15T03:15:17 |
|
doccano/doccano | 1,161 | doccano__doccano-1161 | [
"713"
] | 7350f4eb9010e82ea01adc589ce45dc955939436 | diff --git a/app/api/models.py b/app/api/models.py
--- a/app/api/models.py
+++ b/app/api/models.py
@@ -6,7 +6,6 @@
from django.urls import reverse
from django.conf import settings
from django.contrib.auth.models import User
-from django.contrib.staticfiles.storage import staticfiles_storage
from django.core.exceptions import ValidationError
from polymorphic.models import PolymorphicModel
@@ -39,10 +38,6 @@ class Project(PolymorphicModel):
def get_absolute_url(self):
return reverse('upload', args=[self.id])
- @property
- def image(self):
- raise NotImplementedError()
-
def get_bundle_name(self):
raise NotImplementedError()
@@ -67,10 +62,6 @@ def __str__(self):
class TextClassificationProject(Project):
- @property
- def image(self):
- return staticfiles_storage.url('assets/images/cats/text_classification.jpg')
-
def get_bundle_name(self):
return 'document_classification'
@@ -94,10 +85,6 @@ def get_storage(self, data):
class SequenceLabelingProject(Project):
- @property
- def image(self):
- return staticfiles_storage.url('assets/images/cats/sequence_labeling.jpg')
-
def get_bundle_name(self):
return 'sequence_labeling'
@@ -121,10 +108,6 @@ def get_storage(self, data):
class Seq2seqProject(Project):
- @property
- def image(self):
- return staticfiles_storage.url('assets/images/cats/seq2seq.jpg')
-
def get_bundle_name(self):
return 'seq2seq'
@@ -148,10 +131,6 @@ def get_storage(self, data):
class Speech2textProject(Project):
- @property
- def image(self):
- return staticfiles_storage.url('images/cats/speech2text.jpg')
-
def get_bundle_name(self):
return 'speech2text'
diff --git a/app/api/serializers.py b/app/api/serializers.py
--- a/app/api/serializers.py
+++ b/app/api/serializers.py
@@ -119,9 +119,9 @@ def get_current_users_role(self, instance):
class Meta:
model = Project
- fields = ('id', 'name', 'description', 'guideline', 'users', 'current_users_role', 'project_type', 'image',
+ fields = ('id', 'name', 'description', 'guideline', 'users', 'current_users_role', 'project_type',
'updated_at', 'randomize_document_order', 'collaborative_annotation', 'single_class_classification')
- read_only_fields = ('image', 'updated_at', 'users', 'current_users_role')
+ read_only_fields = ('updated_at', 'users', 'current_users_role')
class TextClassificationProjectSerializer(ProjectSerializer):
@@ -152,9 +152,9 @@ class Speech2textProjectSerializer(ProjectSerializer):
class Meta:
model = Speech2textProject
- fields = ('id', 'name', 'description', 'guideline', 'users', 'current_users_role', 'project_type', 'image',
+ fields = ('id', 'name', 'description', 'guideline', 'users', 'current_users_role', 'project_type',
'updated_at', 'randomize_document_order')
- read_only_fields = ('image', 'updated_at', 'users', 'current_users_role')
+ read_only_fields = ('updated_at', 'users', 'current_users_role')
class ProjectPolymorphicSerializer(PolymorphicSerializer):
| diff --git a/app/api/tests/test_models.py b/app/api/tests/test_models.py
--- a/app/api/tests/test_models.py
+++ b/app/api/tests/test_models.py
@@ -17,10 +17,6 @@ class TestTextClassificationProject(TestCase):
def setUpTestData(cls):
cls.project = mommy.make('TextClassificationProject')
- def test_image(self):
- image_url = self.project.image
- self.assertTrue(image_url.endswith('.jpg'))
-
def test_get_bundle_name(self):
template = self.project.get_bundle_name()
self.assertEqual(template, 'document_classification')
@@ -41,10 +37,6 @@ class TestSequenceLabelingProject(TestCase):
def setUpTestData(cls):
cls.project = mommy.make('SequenceLabelingProject')
- def test_image(self):
- image_url = self.project.image
- self.assertTrue(image_url.endswith('.jpg'))
-
def test_get_bundle_name(self):
template = self.project.get_bundle_name()
self.assertEqual(template, 'sequence_labeling')
@@ -65,10 +57,6 @@ class TestSeq2seqProject(TestCase):
def setUpTestData(cls):
cls.project = mommy.make('Seq2seqProject')
- def test_image(self):
- image_url = self.project.image
- self.assertTrue(image_url.endswith('.jpg'))
-
def test_get_bundle_name(self):
template = self.project.get_bundle_name()
self.assertEqual(template, 'seq2seq')
@@ -89,10 +77,6 @@ class TestSpeech2textProject(TestCase):
def setUpTestData(cls):
cls.project = mommy.make('Speech2textProject')
- def test_image(self):
- image_url = self.project.image
- self.assertTrue(image_url.endswith('.jpg'))
-
def test_get_bundle_name(self):
template = self.project.get_bundle_name()
self.assertEqual(template, 'speech2text')
| Login found——Error: Request failed with status code 500
step1:
run: `docker-compose -f docker-compose.prod.yml up`

step2: login
username:admin
password:password

then remind me status code 500.

terminate response.

Then can create dataset and operate other. but can't annotation, i see Home->step5. annotate the dataset, but now i try annotate found not operate.

| Please provide your environment information following to the [ISSUE_TEMPLATE](https://github.com/doccano/doccano/blob/master/.github/ISSUE_TEMPLATE/01-question.md).
I also have the same issue
Steps to reproduce :
Login as admin -> Create project -> Logout -> Try to login as admin -> Pop up with status 500
I am running this code in Ubuntu via ` docker-compose -f docker-compose.dev.yml up`
It's a lack of information. Please show me network request/response by using chrome/safari developer tools.
I face similar issues. I was logged in as admin. I reset my password and created three users with their own passowrds. Then I created a project, and logged out. Finally I logged in as one of the users, but I got the status 500 popup:
```
Request Method: GET
Status Code: 500 Internal Server Error
```
Then I try to create a project and I receive a 403 error:
```
HTTP 403 Forbidden
Allow: GET, POST, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept
{
"detail": "Authentication credentials were not provided."
}
```
Operating System: Ubuntu 18.04
Python Version Used: 3.6
When you install doccano: May 1
How did you install doccano (Heroku button etc): Deployed to Digital Ocean droplet with `docker-compose -f docker-compose.prod.yml up`
EDIT: I don't get the error when I use Safari, but Firefox seems to have issues (?).
@zephyrous @greenspray9 hello, I have solved it. shows step.
# need docker-compose command
`sudo apt install docker-compose`
# doccano images download:
`sudo docker pull chakkiworks/doccano`
# then exec,:
`sudo docker run -d --rm --name doccano \
-e "ADMIN_USERNAME=admin" \
-e "[email protected]" \
-e "ADMIN_PASSWORD=password" \
-p 8000:8000 chakkiworks/doccano`
#### --rm exit delete images, note: data save
# chrome or safari input:
`http://127.0.0.1:8000/login/`
username:admin , password:password`
@zhanwen I was probably missing the --rm step. Will check, thanks!
I am also facing same error, how to solve this issue as i am unable to annotate.I have windows OS and have docker running but error is coming : request failed with status code 500???
Please help
@cmehak please see: my above comment. windows 10 can use windowspower shell execute command.
We are also facing the same issue, ran `docker-compose -f docker-compose.prod.yml up -d` on a Centos 7. No logs from the backend indicating on any issue. Perhaps it can be run with a debug mode to be able to trace the exception?
I am having the same problem as zhanwen and zephyrous Using Ubuntu 18.04, I get 500 error when I use Firefox, Chrome
Python Version 3.7.8
When you install doccano: September 22, 2020
Commit: 6d5bff4bd03c63babe2c7164579d77ff4f0597d1
How did you install doccano (Heroku button etc): `docker-compose -f docker-compose.prod.yml up`
When I checkout commit: 4ebc32c22bd58c7397f38109e55e3c34f25bf2bc I do not get any browser errors.
Was this issue fully resolved. I am running Debian 10 server and I installed doccano using: `docker-compose -f docker-compose.prod.yml up` . The login page loads fine and I can successfully login with `admin` and `password`. However, the 500 error still pops up. I have tried everything suggested above.
I'm having the same problem can someone help me solve please? I receive status error 500 with the administrator user
Please try to logout from the admin site after you change the password.
https://github.com/doccano/doccano/blob/master/docs/faq.md#i-want-to-change-users-or-admins-password
Are there any users of this web app that have made it possible to run docker-compose as of 2021 without this error? I am talking latest commit, no fork, no different version, docker-compose as of 2021. How do you make this work?
clean computer, clean everything, IT DOES NOT WORK, error 500.
I found the difference between 500 errors and no errors. When we meet the 500 error, The `Allow` header in the response is empty. If we replace `DEBUG: False` with `DEBUG: True` in `docker-compose.prod.yml`, the header won't be empty. But it's not a good solution for a production environment.
I found the cause. We need to remove `image` property from Project models and serializers. | 2021-01-18T12:16:42 |
doccano/doccano | 1,209 | doccano__doccano-1209 | [
"1208"
] | a3efab8cbf3a8c22dabf4556f8ba0ce4b7110dec | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -34,7 +34,8 @@
'python-jose>=3.2.0',
'seqeval>=1.2.2',
'social-auth-app-django>=4.0.0',
- 'whitenoise>=5.2.0'
+ 'whitenoise>=5.2.0',
+ 'auto-labeling-pipeline>=0.1.12'
]
setup(
| ModuleNotFoundError: No module named 'auto_labeling_pipeline'
How to reproduce the behaviour
---------
<!-- Before submitting an issue, make sure to check the docs and closed issues and FAQ to see if any of the solutions work for you. https://github.com/doccano/doccano/wiki/Frequently-Asked-Questions -->
<!-- Include a code example or the steps that led to the problem. Please try to be as specific as possible. -->
I just installed `Doccano==1.2.0` (released just now..) with `pip install doccano` on my Linux machine to check out the `auto_labeling` feature. However, I got the following error running `doccano` in the shell.
```
>>> doccano
```
```
Setup Database.
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/app/manage.py", line 15, in <module>
execute_from_command_line(sys.argv)
File "/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py", line 401, in execute_from_command_line
utility.execute()
File "/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py", line 377, in execute
django.setup()
File "/usr/local/lib/python3.6/dist-packages/django/__init__.py", line 24, in setup
apps.populate(settings.INSTALLED_APPS)
File "/usr/local/lib/python3.6/dist-packages/django/apps/registry.py", line 114, in populate
app_config.import_models()
File "/usr/local/lib/python3.6/dist-packages/django/apps/config.py", line 211, in import_models
self.models_module = import_module(models_module_name)
File "/usr/lib/python3.6/importlib/__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 994, in _gcd_import
File "<frozen importlib._bootstrap>", line 971, in _find_and_load
File "<frozen importlib._bootstrap>", line 955, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 665, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 678, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "/usr/local/lib/python3.6/dist-packages/app/api/models.py", line 3, in <module>
from auto_labeling_pipeline.models import RequestModelFactory
ModuleNotFoundError: No module named 'auto_labeling_pipeline'
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/app/manage.py", line 15, in <module>
execute_from_command_line(sys.argv)
File "/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py", line 401, in execute_from_command_line
utility.execute()
File "/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py", line 377, in execute
django.setup()
File "/usr/local/lib/python3.6/dist-packages/django/__init__.py", line 24, in setup
apps.populate(settings.INSTALLED_APPS)
File "/usr/local/lib/python3.6/dist-packages/django/apps/registry.py", line 114, in populate
app_config.import_models()
File "/usr/local/lib/python3.6/dist-packages/django/apps/config.py", line 211, in import_models
self.models_module = import_module(models_module_name)
File "/usr/lib/python3.6/importlib/__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 994, in _gcd_import
File "<frozen importlib._bootstrap>", line 971, in _find_and_load
File "<frozen importlib._bootstrap>", line 955, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 665, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 678, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "/usr/local/lib/python3.6/dist-packages/app/api/models.py", line 3, in <module>
from auto_labeling_pipeline.models import RequestModelFactory
ModuleNotFoundError: No module named 'auto_labeling_pipeline'
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/app/manage.py", line 15, in <module>
execute_from_command_line(sys.argv)
File "/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py", line 401, in execute_from_command_line
utility.execute()
File "/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py", line 377, in execute
django.setup()
File "/usr/local/lib/python3.6/dist-packages/django/__init__.py", line 24, in setup
apps.populate(settings.INSTALLED_APPS)
File "/usr/local/lib/python3.6/dist-packages/django/apps/registry.py", line 114, in populate
app_config.import_models()
File "/usr/local/lib/python3.6/dist-packages/django/apps/config.py", line 211, in import_models
self.models_module = import_module(models_module_name)
File "/usr/lib/python3.6/importlib/__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 994, in _gcd_import
File "<frozen importlib._bootstrap>", line 971, in _find_and_load
File "<frozen importlib._bootstrap>", line 955, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 665, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 678, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "/usr/local/lib/python3.6/dist-packages/app/api/models.py", line 3, in <module>
from auto_labeling_pipeline.models import RequestModelFactory
ModuleNotFoundError: No module named 'auto_labeling_pipeline'
Create admin user.
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/app/manage.py", line 15, in <module>
execute_from_command_line(sys.argv)
File "/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py", line 401, in execute_from_command_line
utility.execute()
File "/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py", line 377, in execute
django.setup()
File "/usr/local/lib/python3.6/dist-packages/django/__init__.py", line 24, in setup
apps.populate(settings.INSTALLED_APPS)
File "/usr/local/lib/python3.6/dist-packages/django/apps/registry.py", line 114, in populate
app_config.import_models()
File "/usr/local/lib/python3.6/dist-packages/django/apps/config.py", line 211, in import_models
self.models_module = import_module(models_module_name)
File "/usr/lib/python3.6/importlib/__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 994, in _gcd_import
File "<frozen importlib._bootstrap>", line 971, in _find_and_load
File "<frozen importlib._bootstrap>", line 955, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 665, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 678, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "/usr/local/lib/python3.6/dist-packages/app/api/models.py", line 3, in <module>
from auto_labeling_pipeline.models import RequestModelFactory
ModuleNotFoundError: No module named 'auto_labeling_pipeline'
Starting server with port 8000.
Exception in thread django-main-thread:
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/usr/lib/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "/usr/local/lib/python3.6/dist-packages/django/utils/autoreload.py", line 53, in wrapper
fn(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/django/core/management/commands/runserver.py", line 110, in inner_run
autoreload.raise_last_exception()
File "/usr/local/lib/python3.6/dist-packages/django/utils/autoreload.py", line 76, in raise_last_exception
raise _exception[1]
File "/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py", line 357, in execute
autoreload.check_errors(django.setup)()
File "/usr/local/lib/python3.6/dist-packages/django/utils/autoreload.py", line 53, in wrapper
fn(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/django/__init__.py", line 24, in setup
apps.populate(settings.INSTALLED_APPS)
File "/usr/local/lib/python3.6/dist-packages/django/apps/registry.py", line 114, in populate
app_config.import_models()
File "/usr/local/lib/python3.6/dist-packages/django/apps/config.py", line 211, in import_models
self.models_module = import_module(models_module_name)
File "/usr/lib/python3.6/importlib/__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 994, in _gcd_import
File "<frozen importlib._bootstrap>", line 971, in _find_and_load
File "<frozen importlib._bootstrap>", line 955, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 665, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 678, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "/usr/local/lib/python3.6/dist-packages/app/api/models.py", line 3, in <module>
from auto_labeling_pipeline.models import RequestModelFactory
ModuleNotFoundError: No module named 'auto_labeling_pipeline'
```
Your Environment
---------
<!-- Include details of your environment.-->
* Operating System: Ubuntu 18.04.5 LTS
* Python Version Used: 3.6.9
* When you install doccano: 02/19/21 7:40 AM GMT
* How did you install doccano (Heroku button etc): `pip install doccano`
* Doccano version: 1.2.0
| Oops, I forgot to add it to setup.py. | 2021-02-19T08:10:38 |
|
doccano/doccano | 1,222 | doccano__doccano-1222 | [
"913"
] | d620b47682624358ea528055bb7dccb501737685 | diff --git a/app/api/exceptions.py b/app/api/exceptions.py
--- a/app/api/exceptions.py
+++ b/app/api/exceptions.py
@@ -34,3 +34,8 @@ class AWSTokenError(ValidationError):
class SampleDataException(ValidationError):
default_detail = 'The response is empty. Maybe the sample data is not appropriate.' \
'Please specify another sample data which returns at least one label.'
+
+
+class LabelValidationError(APIException):
+ status_code = status.HTTP_400_BAD_REQUEST
+ default_detail = 'You cannot create a label with same name or shortcut key.'
diff --git a/app/api/views/label.py b/app/api/views/label.py
--- a/app/api/views/label.py
+++ b/app/api/views/label.py
@@ -9,6 +9,7 @@
from rest_framework.response import Response
from rest_framework.views import APIView
+from ..exceptions import LabelValidationError
from ..models import Label, Project
from ..permissions import IsInProjectReadOnlyOrAdmin, IsProjectAdmin
from ..serializers import LabelSerializer
@@ -27,6 +28,11 @@ def perform_create(self, serializer):
project = get_object_or_404(Project, pk=self.kwargs['project_id'])
serializer.save(project=project)
+ def delete(self, request, *args, **kwargs):
+ delete_ids = request.data['ids']
+ Label.objects.filter(pk__in=delete_ids).delete()
+ return Response(status=status.HTTP_204_NO_CONTENT)
+
class LabelDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Label.objects.all()
@@ -43,14 +49,14 @@ class LabelUploadAPI(APIView):
def post(self, request, *args, **kwargs):
if 'file' not in request.data:
raise ParseError('Empty content')
- labels = json.load(request.data['file'])
project = get_object_or_404(Project, pk=kwargs['project_id'])
try:
- for label in labels:
- serializer = LabelSerializer(data=label)
- serializer.is_valid(raise_exception=True)
- serializer.save(project=project)
+ labels = json.load(request.data['file'])
+ serializer = LabelSerializer(data=labels, many=True)
+ serializer.is_valid(raise_exception=True)
+ serializer.save(project=project)
return Response(status=status.HTTP_201_CREATED)
+ except json.decoder.JSONDecodeError:
+ raise ParseError('The file format is invalid.')
except IntegrityError:
- content = {'error': 'IntegrityError: you cannot create a label with same name or shortkey.'}
- return Response(content, status=status.HTTP_400_BAD_REQUEST)
+ raise LabelValidationError
| [Enhancement request] Meaningful error on labels naming conflict
Feature description
---------
Try rename a label to an existing name.
You get a 500 error.
Desired: a meaningful error.
Related: #601, #826.
| 2021-02-27T13:16:05 |
||
doccano/doccano | 1,230 | doccano__doccano-1230 | [
"822"
] | 3c98b44ea4f04d8708fe5e2380925eea5d06011a | diff --git a/app/api/migrations/0008_auto_20210302_1013.py b/app/api/migrations/0008_auto_20210302_1013.py
new file mode 100644
--- /dev/null
+++ b/app/api/migrations/0008_auto_20210302_1013.py
@@ -0,0 +1,18 @@
+# Generated by Django 3.1.6 on 2021-03-02 10:13
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('api', '0007_auto_20210301_0302'),
+ ]
+
+ operations = [
+ migrations.AlterField(
+ model_name='project',
+ name='guideline',
+ field=models.TextField(blank=True, default=''),
+ ),
+ ]
diff --git a/app/api/models.py b/app/api/models.py
--- a/app/api/models.py
+++ b/app/api/models.py
@@ -5,7 +5,7 @@
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.db import models
-from django.db.models.signals import post_save, pre_delete
+from django.db.models.signals import m2m_changed, post_save, pre_delete
from django.dispatch import receiver
from django.urls import reverse
from polymorphic.models import PolymorphicModel
@@ -28,7 +28,7 @@
class Project(PolymorphicModel):
name = models.CharField(max_length=100)
description = models.TextField(default='')
- guideline = models.TextField(default='')
+ guideline = models.TextField(default='', blank=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
users = models.ManyToManyField(User, related_name='projects')
@@ -325,33 +325,48 @@ def add_linked_project(sender, instance, created, **kwargs):
user.save()
-@receiver(post_save)
-def add_superusers_to_project(sender, instance, created, **kwargs):
- if not created:
- return
- if sender not in Project.__subclasses__():
- return
- superusers = User.objects.filter(is_superuser=True)
- admin_role = Role.objects.filter(name=settings.ROLE_PROJECT_ADMIN).first()
- if superusers and admin_role:
+# @receiver(post_save)
+# def add_superusers_to_project(sender, instance, created, **kwargs):
+# if not created:
+# return
+# if sender not in Project.__subclasses__():
+# return
+# superusers = User.objects.filter(is_superuser=True)
+# admin_role = Role.objects.filter(name=settings.ROLE_PROJECT_ADMIN).first()
+# if superusers and admin_role:
+# RoleMapping.objects.bulk_create(
+# [RoleMapping(role_id=admin_role.id, user_id=superuser.id, project_id=instance.id)
+# for superuser in superusers]
+# )
+#
+#
+# @receiver(post_save, sender=User)
+# def add_new_superuser_to_projects(sender, instance, created, **kwargs):
+# if created and instance.is_superuser:
+# admin_role = Role.objects.filter(name=settings.ROLE_PROJECT_ADMIN).first()
+# projects = Project.objects.all()
+# if admin_role and projects:
+# RoleMapping.objects.bulk_create(
+# [RoleMapping(role_id=admin_role.id, user_id=instance.id, project_id=project.id)
+# for project in projects]
+# )
+
+@receiver(m2m_changed, sender=Project.users.through)
+def remove_mapping_on_remove_user_from_project(sender, instance, action, reverse, **kwargs):
+ # if reverse is True, pk_set is project_ids and instance is user.
+ # else, pk_set is user_ids and instance is project.
+ user_ids = kwargs['pk_set']
+ if action.startswith('post_remove') and not reverse:
+ RoleMapping.objects.filter(user__in=user_ids, project=instance).delete()
+ elif action.startswith('post_add') and not reverse:
+ admin_role = Role.objects.get(name=settings.ROLE_PROJECT_ADMIN)
RoleMapping.objects.bulk_create(
- [RoleMapping(role_id=admin_role.id, user_id=superuser.id, project_id=instance.id)
- for superuser in superusers]
+ [RoleMapping(role=admin_role, project=instance, user_id=user)
+ for user in user_ids
+ if not RoleMapping.objects.filter(project=instance, user_id=user).exists()]
)
-@receiver(post_save, sender=User)
-def add_new_superuser_to_projects(sender, instance, created, **kwargs):
- if created and instance.is_superuser:
- admin_role = Role.objects.filter(name=settings.ROLE_PROJECT_ADMIN).first()
- projects = Project.objects.all()
- if admin_role and projects:
- RoleMapping.objects.bulk_create(
- [RoleMapping(role_id=admin_role.id, user_id=instance.id, project_id=project.id)
- for project in projects]
- )
-
-
@receiver(pre_delete, sender=RoleMapping)
def delete_linked_project(sender, instance, using, **kwargs):
userInstance = instance.user
diff --git a/app/api/views/project.py b/app/api/views/project.py
--- a/app/api/views/project.py
+++ b/app/api/views/project.py
@@ -1,7 +1,9 @@
-from rest_framework import generics
+from django.conf import settings
+from rest_framework import generics, status
from rest_framework.permissions import IsAuthenticated
+from rest_framework.response import Response
-from ..models import Project
+from ..models import Project, Role, RoleMapping
from ..permissions import IsInProjectReadOnlyOrAdmin
from ..serializers import ProjectPolymorphicSerializer, ProjectSerializer
@@ -9,7 +11,7 @@
class ProjectList(generics.ListCreateAPIView):
serializer_class = ProjectPolymorphicSerializer
pagination_class = None
- permission_classes = [IsAuthenticated & IsInProjectReadOnlyOrAdmin]
+ permission_classes = [IsAuthenticated, ]
def get_queryset(self):
return self.request.user.projects
@@ -17,6 +19,20 @@ def get_queryset(self):
def perform_create(self, serializer):
serializer.save(users=[self.request.user])
+ def delete(self, request, *args, **kwargs):
+ delete_ids = request.data['ids']
+ projects = Project.objects.filter(
+ role_mappings__user=self.request.user,
+ role_mappings__role__name=settings.ROLE_PROJECT_ADMIN,
+ pk__in=delete_ids
+ )
+ # Todo: I want to use bulk delete.
+ # But it causes the constraint error.
+ # See https://github.com/django-polymorphic/django-polymorphic/issues/229
+ for project in projects:
+ project.delete()
+ return Response(status=status.HTTP_204_NO_CONTENT)
+
class ProjectDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Project.objects.all()
| diff --git a/app/api/tests/test_api.py b/app/api/tests/test_api.py
--- a/app/api/tests/test_api.py
+++ b/app/api/tests/test_api.py
@@ -25,7 +25,13 @@ def create_default_roles():
def assign_user_to_role(project_member, project, role_name):
role, _ = Role.objects.get_or_create(name=role_name)
- RoleMapping.objects.get_or_create(role_id=role.id, user_id=project_member.id, project_id=project.id)
+ if RoleMapping.objects.filter(user=project_member, project=project).exists():
+ mapping = RoleMapping.objects.get(user=project_member, project=project)
+ mapping.role = role
+ mapping.save()
+ else:
+ mapping = RoleMapping.objects.get_or_create(role_id=role.id, user_id=project_member.id, project_id=project.id)
+ return mapping
def remove_all_role_mappings():
@@ -138,12 +144,6 @@ def test_allows_superuser_to_create_project_with_flags(self):
self.assertTrue(response.json().get('collaborative_annotation'))
self.assertTrue(response.json().get('randomize_document_order'))
- def test_disallows_project_member_to_create_project(self):
- self.client.login(username=self.main_project_member_name,
- password=self.main_project_member_pass)
- response = self.client.post(self.url, format='json', data=self.data)
- self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
-
@classmethod
def doCleanups(cls):
remove_all_role_mappings()
@@ -672,6 +672,7 @@ def setUpTestData(cls):
cls.approver_pass = 'approver_pass'
cls.project_admin_name = 'project_admin_name'
cls.project_admin_pass = 'project_admin_pass'
+ create_default_roles()
annotator = User.objects.create_user(username=cls.annotator_name,
password=cls.annotator_pass)
approver = User.objects.create_user(username=cls.approver_name,
@@ -681,7 +682,6 @@ def setUpTestData(cls):
project = mommy.make('TextClassificationProject', users=[annotator, approver, project_admin])
cls.doc = mommy.make('Document', project=project)
cls.url = reverse(viewname='approve_labels', args=[project.id, cls.doc.id])
- create_default_roles()
assign_user_to_role(project_member=annotator, project=project,
role_name=settings.ROLE_ANNOTATOR)
assign_user_to_role(project_member=approver, project=project,
@@ -1667,7 +1667,7 @@ def setUpTestData(cls):
password=cls.other_user_pass,
email='[email protected]')
- cls.project = mommy.make('TextClassificationProject', users=[super_user, other_user])
+ cls.project = mommy.make('TextClassificationProject', users=[super_user])
doc1 = mommy.make('Document', project=cls.project)
doc2 = mommy.make('Document', project=cls.project)
mommy.make('DocumentAnnotation', document=doc1, user=super_user)
@@ -1807,7 +1807,7 @@ def setUpTestData(cls):
cls.other_project = mommy.make('Project', users=[cls.second_project_member, project_admin])
cls.admin_role = Role.objects.get(name=settings.ROLE_PROJECT_ADMIN)
cls.role = mommy.make('Role', name='otherrole')
- mommy.make('RoleMapping', role=cls.admin_role, project=cls.main_project, user=project_admin)
+ assign_user_to_role(project_admin, cls.main_project, cls.admin_role)
cls.data = {'user': project_member.id, 'role': cls.admin_role.id, 'project': cls.main_project.id}
cls.other_url = reverse(viewname='rolemapping_list', args=[cls.other_project.id])
cls.url = reverse(viewname='rolemapping_list', args=[cls.main_project.id])
@@ -1818,23 +1818,24 @@ def test_returns_mappings_to_project_admin(self):
response = self.client.get(self.url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
- def test_allows_superuser_to_create_mapping(self):
- self.client.login(username=self.project_admin_name,
- password=self.project_admin_pass)
- response = self.client.post(self.url, format='json', data=self.data)
- self.assertEqual(response.status_code, status.HTTP_201_CREATED)
-
- def test_do_not_allow_nonadmin_to_create_mapping(self):
- self.client.login(username=self.project_member_name,
- password=self.project_member_pass)
- response = self.client.post(self.url, format='json', data=self.data)
- self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
-
- def test_do_not_return_mappings_to_nonadmin(self):
- self.client.login(username=self.project_member_name,
- password=self.project_member_pass)
- response = self.client.get(self.url, format='json')
- self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
+ # Todo: refactoring testing.
+ # def test_allows_superuser_to_create_mapping(self):
+ # self.client.login(username=self.project_admin_name,
+ # password=self.project_admin_pass)
+ # response = self.client.post(self.url, format='json', data=self.data)
+ # self.assertEqual(response.status_code, status.HTTP_201_CREATED)
+ #
+ # def test_do_not_allow_nonadmin_to_create_mapping(self):
+ # self.client.login(username=self.project_member_name,
+ # password=self.project_member_pass)
+ # response = self.client.post(self.url, format='json', data=self.data)
+ # self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
+ #
+ # def test_do_not_return_mappings_to_nonadmin(self):
+ # self.client.login(username=self.project_member_name,
+ # password=self.project_member_pass)
+ # response = self.client.get(self.url, format='json')
+ # self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
class TestRoleMappingDetailAPI(APITestCase):
@@ -1855,7 +1856,9 @@ def setUpTestData(cls):
User.objects.create_user(username=cls.non_project_member_name, password=cls.non_project_member_pass)
project = mommy.make('Project', users=[project_admin, project_member])
admin_role = Role.objects.get(name=settings.ROLE_PROJECT_ADMIN)
- cls.rolemapping = mommy.make('RoleMapping', role=admin_role, project=project, user=project_admin)
+ annotator_role = Role.objects.get(name=settings.ROLE_ANNOTATOR)
+ cls.rolemapping = assign_user_to_role(project_admin, project, admin_role)
+ assign_user_to_role(project_member, project, annotator_role)
cls.url = reverse(viewname='rolemapping_detail', args=[project.id, cls.rolemapping.id])
cls.data = {'role': admin_role.id}
| [Bug report] Annotator doesn't see the project
How to reproduce the behaviour
---------
I saw a situation: user `a_user` is an `annotator` at `a_project`'s Members page; but `a_user` doesn't see `a_project` at his Projects page.
At that moment I've removed `a_user`'s role and added it again (since situation has been seen during my job process so I had to solve it). It helped.
But I reproduced it synthetically:
1. Add `a_user` as an `annotator` of `a_project`.
2. Sign in to Django Admin.
3. Go to `Projects` page.
4. Go to `a_project`'s page.
5. On `Users` field, deselect `a_user` item.
6. Click `Save`.
Now: _user `a_user` is an `annotator` at `a_project`'s Members page; but `a_user` doesn't see `a_project` at his Projects page_.
**Questions:**
1. Where and how is list from p.5 stored?
2. How does result of pp.1-6 could be reached by interaction with doccano, what do you think?
Your Environment
---------
<!-- Include details of your environment.-->
* Operating System: Ubuntu 18.04.4 LTS
* Python Version Used: system Python is 3.6.9
* When you install doccano: 4b5a3c34310f9c7e9d27163928073e74cc7bb226
* How did you install doccano (Heroku button etc): Docker Compose
| This may be due to the difference between RoleMapping and `users` field in the Project model. | 2021-03-03T09:37:39 |
doccano/doccano | 1,261 | doccano__doccano-1261 | [
"1191"
] | cc36d5195a963c73190601f9d01210677608b146 | diff --git a/app/api/views/annotation.py b/app/api/views/annotation.py
--- a/app/api/views/annotation.py
+++ b/app/api/views/annotation.py
@@ -1,6 +1,5 @@
from django.shortcuts import get_object_or_404
from rest_framework import generics, status
-from rest_framework.exceptions import ValidationError
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
@@ -16,21 +15,24 @@ class AnnotationList(generics.ListCreateAPIView):
permission_classes = [IsAuthenticated & IsInProjectOrAdmin]
swagger_schema = None
+ @property
+ def project(self):
+ return get_object_or_404(Project, pk=self.kwargs['project_id'])
+
def get_serializer_class(self):
- project = get_object_or_404(Project, pk=self.kwargs['project_id'])
- self.serializer_class = project.get_annotation_serializer()
+ self.serializer_class = self.project.get_annotation_serializer()
return self.serializer_class
def get_queryset(self):
- project = get_object_or_404(Project, pk=self.kwargs['project_id'])
- model = project.get_annotation_class()
+ model = self.project.get_annotation_class()
queryset = model.objects.filter(document=self.kwargs['doc_id'])
- if not project.collaborative_annotation:
+ if not self.project.collaborative_annotation:
queryset = queryset.filter(user=self.request.user)
return queryset
def create(self, request, *args, **kwargs):
- self.check_single_class_classification(self.kwargs['project_id'], self.kwargs['doc_id'], request.user)
+ if self.project.single_class_classification:
+ self.get_queryset().delete()
request.data['document'] = self.kwargs['doc_id']
return super().create(request, args, kwargs)
@@ -42,20 +44,6 @@ def delete(self, request, *args, **kwargs):
queryset.all().delete()
return Response(status=status.HTTP_204_NO_CONTENT)
- @staticmethod
- def check_single_class_classification(project_id, doc_id, user):
- project = get_object_or_404(Project, pk=project_id)
- if not project.single_class_classification:
- return
-
- model = project.get_annotation_class()
- annotations = model.objects.filter(document_id=doc_id)
- if not project.collaborative_annotation:
- annotations = annotations.filter(user=user)
-
- if annotations.exists():
- raise ValidationError('requested to create duplicate annotation for single-class-classification project')
-
class AnnotationDetail(generics.RetrieveUpdateDestroyAPIView):
lookup_url_kwarg = 'annotation_id'
| diff --git a/app/api/tests/test_api.py b/app/api/tests/test_api.py
--- a/app/api/tests/test_api.py
+++ b/app/api/tests/test_api.py
@@ -921,7 +921,7 @@ def test_disallows_non_project_member_to_create_annotation(self):
response = self.client.post(self.url, format='json', data=self.post_data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
- def test_disallows_second_annotation_for_single_class_project(self):
+ def test_allow_replace_annotation_for_single_class_project(self):
self._patch_project(self.classification_project, 'single_class_classification', True)
self.client.login(username=self.project_member_name, password=self.project_member_pass)
@@ -931,9 +931,9 @@ def test_disallows_second_annotation_for_single_class_project(self):
response = self.client.post(self.classification_project_url, format='json',
data={'label': self.classification_project_label_2.id})
- self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
+ self.assertEqual(response.status_code, status.HTTP_201_CREATED)
- def test_disallows_second_annotation_for_single_class_shared_project(self):
+ def test_allow_replace_annotation_for_single_class_shared_project(self):
self._patch_project(self.classification_project, 'single_class_classification', True)
self._patch_project(self.classification_project, 'collaborative_annotation', True)
@@ -945,7 +945,7 @@ def test_disallows_second_annotation_for_single_class_shared_project(self):
self.client.login(username=self.another_project_member_name, password=self.another_project_member_pass)
response = self.client.post(self.classification_project_url, format='json',
data={'label': self.classification_project_label_2.id})
- self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
+ self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def _patch_project(self, project, attribute, value):
old_value = getattr(project, attribute, None)
| No way to restrict text classification labels to exactly one label to assign
Most classification tasks require exactly one label for each instance. This is also true for most text classification tasks, for example with sentiment classificaiton, and the possible labels negative, neutral, positive, each instance should receive one of the three labels, assigning e.g. both neutral and positive would make not sense.
Yet the text classification task in doccano still does not allow to restrict assignment to a single label, annotators are free to assign as many labels as they want, including all of them!
This limits the use of doccano for text classification tasks rather severely. The option to allow for any number of labels (0 to all of them) would still be good to have for multilabel classification tasks (e.g. assigning topics), but that is a much rarer annotation task in general.
| Actually, there is an option to do single-class classification in the backend. So if we implement the frontend, we can do it easily.
https://github.com/doccano/doccano/blob/b91944eb250a7340572580f6389168cd661a7326/app/api/models.py#L36
There was an earlier issue about this:
https://github.com/doccano/doccano/issues/705#issuecomment-616270208
(remove the "multiple" from the combobox in MultiClassClassification.vue)
But when I try this now and do a local install of doccano afterwards as shown in https://github.com/doccano/doccano/issues/1192 then when starting the annotation task, there is no way at all to add a label.
I do not understand this at all (all things javascript, html etc are way beyond my knowledge really)
but it surprises me as I thought the presence of this parameter just indicates if multiple values should be allowed in the combo-box.
One of the simplest ways is to implement the `BinaryClassification.vue` component. We can switch components between binary and multiple.
I'm now working on the auto labeling feature. It will be done in a week. After that, I can work on this problem.
Very rough implementation:

Super - This is exactly what I would need! How can I achieve this? Could you please provide a patch or instruction which lines to change?
I would be happy to run this from a patched checked out repo.
Change [MultiClassClassification.vue](https://github.com/doccano/doccano/blob/master/frontend/components/organisms/annotation/MultiClassClassification.vue) as follows:
1. remove `multiple` from `v-combobox`
2. update `get` and `set` methods as follows
```javascript
annotatedLabels: {
get() {
const labelIds = this.annotations.map(item => item.label)
return this.labels.find(item => labelIds.includes(item.id))
},
set(newValue) {
if (this.annotations.length === 1) {
this.remove(this.annotations[0].label)
}
this.add(newValue)
}
}
```
This is a quick fix. I will implement it more propery.
Thanks that works great for me!
For a proper implementation of this, there is another thing to be considered: if the classification tasks requires that exactly one label gets assigned (from k possible), then doccano could know which examples already have been annotated and which still have a missing label and show that number and ideally also allow to scroll to just those examples where a label is still missing.
Currently a use would need to make sure to annotate examples in sequence to better keep track of what has been annotated already ... I am not sure how this works when "shuffle" / "randomized document order" is enabled for the task.
Wasn't it implemented in #489, is it? | 2021-03-19T03:00:50 |
doccano/doccano | 1,280 | doccano__doccano-1280 | [
"1278"
] | 873b498cffbfb369717b2849b76983d0564d42a3 | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -35,7 +35,8 @@
'seqeval>=1.2.2',
'social-auth-app-django>=4.0.0',
'whitenoise>=5.2.0',
- 'auto-labeling-pipeline>=0.1.12'
+ 'auto-labeling-pipeline>=0.1.12',
+ 'dj-rest-auth>=2.1.4'
]
setup(
| ModuleNotFoundError: No module named 'dj_rest_auth'
<!-- Before submitting an issue, make sure to check the docs and closed issues and FAQ to see if any of the solutions work for you. https://github.com/doccano/doccano/wiki/Frequently-Asked-Questions -->
I was using `pip install` to install doccano, which is due to my lack of knowledge about docker. And I run into the following problem:
(To sum up, module `dj_rest_auth` was not found when setting up database and createing admin user.)
```bash
(pytorch) D:\pythonwork\NLP\grad>doccano
Setup Database.
Traceback (most recent call last):
File "d:\anaconda3\envs\pytorch\lib\site-packages\app\manage.py", line 15, in <module>
execute_from_command_line(sys.argv)
File "d:\anaconda3\envs\pytorch\lib\site-packages\django\core\management\__init__.py", line 401, in execute_from_command_line
utility.execute()
File "d:\anaconda3\envs\pytorch\lib\site-packages\django\core\management\__init__.py", line 377, in execute
django.setup()
File "d:\anaconda3\envs\pytorch\lib\site-packages\django\__init__.py", line 24, in setup
apps.populate(settings.INSTALLED_APPS)
File "d:\anaconda3\envs\pytorch\lib\site-packages\django\apps\registry.py", line 91, in populate
app_config = AppConfig.create(entry)
File "d:\anaconda3\envs\pytorch\lib\site-packages\django\apps\config.py", line 90, in create
module = import_module(entry)
File "d:\anaconda3\envs\pytorch\lib\importlib\__init__.py", line 127, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 1006, in _gcd_import
File "<frozen importlib._bootstrap>", line 983, in _find_and_load
File "<frozen importlib._bootstrap>", line 965, in _find_and_load_unlocked
ModuleNotFoundError: No module named 'dj_rest_auth'
Traceback (most recent call last):
File "d:\anaconda3\envs\pytorch\lib\site-packages\app\manage.py", line 15, in <module>
execute_from_command_line(sys.argv)
File "d:\anaconda3\envs\pytorch\lib\site-packages\django\core\management\__init__.py", line 401, in execute_from_command_line
utility.execute()
File "d:\anaconda3\envs\pytorch\lib\site-packages\django\core\management\__init__.py", line 377, in execute
django.setup()
File "d:\anaconda3\envs\pytorch\lib\site-packages\django\__init__.py", line 24, in setup
apps.populate(settings.INSTALLED_APPS)
File "d:\anaconda3\envs\pytorch\lib\site-packages\django\apps\registry.py", line 91, in populate
app_config = AppConfig.create(entry)
File "d:\anaconda3\envs\pytorch\lib\site-packages\django\apps\config.py", line 90, in create
module = import_module(entry)
File "d:\anaconda3\envs\pytorch\lib\importlib\__init__.py", line 127, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 1006, in _gcd_import
File "<frozen importlib._bootstrap>", line 983, in _find_and_load
File "<frozen importlib._bootstrap>", line 965, in _find_and_load_unlocked
ModuleNotFoundError: No module named 'dj_rest_auth'
Traceback (most recent call last):
File "d:\anaconda3\envs\pytorch\lib\site-packages\app\manage.py", line 15, in <module>
execute_from_command_line(sys.argv)
File "d:\anaconda3\envs\pytorch\lib\site-packages\django\core\management\__init__.py", line 401, in execute_from_command_line
utility.execute()
File "d:\anaconda3\envs\pytorch\lib\site-packages\django\core\management\__init__.py", line 377, in execute
django.setup()
File "d:\anaconda3\envs\pytorch\lib\site-packages\django\__init__.py", line 24, in setup
apps.populate(settings.INSTALLED_APPS)
File "d:\anaconda3\envs\pytorch\lib\site-packages\django\apps\registry.py", line 91, in populate
app_config = AppConfig.create(entry)
File "d:\anaconda3\envs\pytorch\lib\site-packages\django\apps\config.py", line 90, in create
module = import_module(entry)
File "d:\anaconda3\envs\pytorch\lib\importlib\__init__.py", line 127, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 1006, in _gcd_import
File "<frozen importlib._bootstrap>", line 983, in _find_and_load
File "<frozen importlib._bootstrap>", line 965, in _find_and_load_unlocked
ModuleNotFoundError: No module named 'dj_rest_auth'
Create admin user.
Traceback (most recent call last):
File "d:\anaconda3\envs\pytorch\lib\site-packages\app\manage.py", line 15, in <module>
execute_from_command_line(sys.argv)
File "d:\anaconda3\envs\pytorch\lib\site-packages\django\core\management\__init__.py", line 401, in execute_from_command_line
utility.execute()
File "d:\anaconda3\envs\pytorch\lib\site-packages\django\core\management\__init__.py", line 377, in execute
django.setup()
File "d:\anaconda3\envs\pytorch\lib\site-packages\django\__init__.py", line 24, in setup
apps.populate(settings.INSTALLED_APPS)
File "d:\anaconda3\envs\pytorch\lib\site-packages\django\apps\registry.py", line 91, in populate
app_config = AppConfig.create(entry)
File "d:\anaconda3\envs\pytorch\lib\site-packages\django\apps\config.py", line 90, in create
module = import_module(entry)
File "d:\anaconda3\envs\pytorch\lib\importlib\__init__.py", line 127, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 1006, in _gcd_import
File "<frozen importlib._bootstrap>", line 983, in _find_and_load
File "<frozen importlib._bootstrap>", line 965, in _find_and_load_unlocked
ModuleNotFoundError: No module named 'dj_rest_auth'
Starting server with port 8000.
Exception in thread django-main-thread:
Traceback (most recent call last):
File "d:\anaconda3\envs\pytorch\lib\threading.py", line 926, in _bootstrap_inner
self.run()
File "d:\anaconda3\envs\pytorch\lib\threading.py", line 870, in run
self._target(*self._args, **self._kwargs)
File "d:\anaconda3\envs\pytorch\lib\site-packages\django\utils\autoreload.py", line 53, in wrapper
fn(*args, **kwargs)
File "d:\anaconda3\envs\pytorch\lib\site-packages\django\core\management\commands\runserver.py", line 110, in inner_run
autoreload.raise_last_exception()
File "d:\anaconda3\envs\pytorch\lib\site-packages\django\utils\autoreload.py", line 76, in raise_last_exception
raise _exception[1]
File "d:\anaconda3\envs\pytorch\lib\site-packages\django\core\management\__init__.py", line 357, in execute
autoreload.check_errors(django.setup)()
File "d:\anaconda3\envs\pytorch\lib\site-packages\django\utils\autoreload.py", line 53, in wrapper
fn(*args, **kwargs)
File "d:\anaconda3\envs\pytorch\lib\site-packages\django\__init__.py", line 24, in setup
apps.populate(settings.INSTALLED_APPS)
File "d:\anaconda3\envs\pytorch\lib\site-packages\django\apps\registry.py", line 91, in populate
app_config = AppConfig.create(entry)
File "d:\anaconda3\envs\pytorch\lib\site-packages\django\apps\config.py", line 90, in create
module = import_module(entry)
File "d:\anaconda3\envs\pytorch\lib\importlib\__init__.py", line 127, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 1006, in _gcd_import
File "<frozen importlib._bootstrap>", line 983, in _find_and_load
File "<frozen importlib._bootstrap>", line 965, in _find_and_load_unlocked
ModuleNotFoundError: No module named 'dj_rest_auth'
Traceback (most recent call last):
File "d:\anaconda3\envs\pytorch\lib\site-packages\app\manage.py", line 15, in <module>
execute_from_command_line(sys.argv)
File "d:\anaconda3\envs\pytorch\lib\site-packages\django\core\management\__init__.py", line 401, in execute_from_command_line
utility.execute()
File "d:\anaconda3\envs\pytorch\lib\site-packages\django\core\management\__init__.py", line 395, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "d:\anaconda3\envs\pytorch\lib\site-packages\django\core\management\base.py", line 330, in run_from_argv
self.execute(*args, **cmd_options)
File "d:\anaconda3\envs\pytorch\lib\site-packages\django\core\management\commands\runserver.py", line 61, in execute
super().execute(*args, **options)
File "d:\anaconda3\envs\pytorch\lib\site-packages\django\core\management\base.py", line 371, in execute
output = self.handle(*args, **options)
File "d:\anaconda3\envs\pytorch\lib\site-packages\django\core\management\commands\runserver.py", line 96, in handle
self.run(**options)
File "d:\anaconda3\envs\pytorch\lib\site-packages\django\core\management\commands\runserver.py", line 103, in run
autoreload.run_with_reloader(self.inner_run, **options)
File "d:\anaconda3\envs\pytorch\lib\site-packages\django\utils\autoreload.py", line 618, in run_with_reloader
start_django(reloader, main_func, *args, **kwargs)
File "d:\anaconda3\envs\pytorch\lib\site-packages\django\utils\autoreload.py", line 603, in start_django
reloader.run(django_main_thread)
File "d:\anaconda3\envs\pytorch\lib\site-packages\django\utils\autoreload.py", line 318, in run
self.run_loop()
File "d:\anaconda3\envs\pytorch\lib\site-packages\django\utils\autoreload.py", line 324, in run_loop
next(ticker)
File "d:\anaconda3\envs\pytorch\lib\site-packages\django\utils\autoreload.py", line 364, in tick
for filepath, mtime in self.snapshot_files():
File "d:\anaconda3\envs\pytorch\lib\site-packages\django\utils\autoreload.py", line 380, in snapshot_files
for file in self.watched_files():
File "d:\anaconda3\envs\pytorch\lib\site-packages\django\utils\autoreload.py", line 278, in watched_files
yield from iter_all_python_module_files()
File "d:\anaconda3\envs\pytorch\lib\site-packages\django\utils\autoreload.py", line 105, in iter_all_python_module_files
return iter_modules_and_files(modules, frozenset(_error_files))
File "d:\anaconda3\envs\pytorch\lib\site-packages\django\utils\autoreload.py", line 141, in iter_modules_and_files
resolved_path = path.resolve(strict=True).absolute()
File "d:\anaconda3\envs\pytorch\lib\pathlib.py", line 1166, in resolve
s = self._flavour.resolve(self, strict=strict)
File "d:\anaconda3\envs\pytorch\lib\pathlib.py", line 200, in resolve
return self._ext_to_normal(_getfinalpathname(s))
OSError: [WinError 123] 文件名、目录名或卷标语法不正确。: '<frozen importlib._bootstrap>'
```
It seemed to be something wrong with `File <frozen importlib._bootstrap>`, but I cannot find the position of it.
Your Environment
---------
<!-- Include details of your environment.-->
* Operating System: Windows
* Python Version Used: 3.7.10
* When you install doccano: 2021.03.30
* How did you install doccano (Heroku button etc): pip install
| Oops, I forgot to add it to the dependency. | 2021-03-30T20:49:11 |
|
doccano/doccano | 1,331 | doccano__doccano-1331 | [
"1327"
] | a883f3d1c29f9f1eeab2fcfb2529a95f5b957fb6 | diff --git a/backend/app/settings.py b/backend/app/settings.py
--- a/backend/app/settings.py
+++ b/backend/app/settings.py
@@ -353,7 +353,12 @@
CELERY_BROKER_URL = env('CELERY_BROKER_URL')
except EnvError:
try:
- CELERY_BROKER_URL = 'sqla+{}'.format(env('DATABASE_URL'))
+ # quickfix for Heroku.
+ # See https://github.com/doccano/doccano/issues/1327.
+ uri = env('DATABASE_URL')
+ if uri.startswith('postgres://'):
+ uri = uri.replace('postgres://', 'postgresql://', 1)
+ CELERY_BROKER_URL = 'sqla+{}'.format(uri)
except EnvError:
CELERY_BROKER_URL = 'sqla+sqlite:///{}'.format(DATABASES['default']['NAME'])
CELERY_ACCEPT_CONTENT = ['application/json']
| After Quick Deploy on Heroku, App crash due to sqlalchemy
How to reproduce the behaviour
---------
Start the quick deploy to heroku
Wait for successful build and deploy
Open app and wait for crash.
**Logs**
`sqlalchemy.exc.NoSuchModuleError: Can't load plugin: sqlalchemy.dialects:postgres`
`celery crashed`
`Process exited with status 12021-04-23T17:02:29.297622+00:00 heroku[web.1]: State changed from up to crashed`
Apparently versions of SQLAlchemy 1.4+ require postgresql:// and do not accept postgre:// anymore.
The DATABASE_URL start with postgres:// after deployment on heroku
Your Environment
---------
Installed with the deploy to heroku from doccano's home page, built from Dockerfile.
SQLAlchemy was 1.4.7
| After forking and updating Pipfiles with SQLAlchemy==1.3.24, app is up and running on heroku.
SQLAlchemy related errors disappeared from logs.
Related:
- [Why is SQLAlchemy 1.4.x not connecting to Heroku Postgres?](https://help.heroku.com/ZKNTJQSK/why-is-sqlalchemy-1-4-x-not-connecting-to-heroku-postgres) | 2021-04-26T01:14:17 |
|
doccano/doccano | 1,529 | doccano__doccano-1529 | [
"1414"
] | 15272db5632cb80b69ac797979c36b1e8588440f | diff --git a/backend/cli.py b/backend/cli.py
--- a/backend/cli.py
+++ b/backend/cli.py
@@ -1,15 +1,13 @@
import argparse
import multiprocessing
import os
+import platform
import subprocess
import sys
-import gunicorn.app.base
-import gunicorn.util
-
from .app.celery import app
-
base = os.path.abspath(os.path.dirname(__file__))
+sys.path.append(base)
manage_path = os.path.join(base, 'manage.py')
parser = argparse.ArgumentParser(description='doccano, text annotation for machine learning practitioners.')
@@ -18,21 +16,37 @@ def number_of_workers():
return (multiprocessing.cpu_count() * 2) + 1
-class StandaloneApplication(gunicorn.app.base.BaseApplication):
+def run_on_nix(args):
+ import gunicorn.app.base
+ import gunicorn.util
+
+ class StandaloneApplication(gunicorn.app.base.BaseApplication):
- def __init__(self, options=None):
- self.options = options or {}
- super().__init__()
+ def __init__(self, options=None):
+ self.options = options or {}
+ super().__init__()
- def load_config(self):
- config = {key: value for key, value in self.options.items()
- if key in self.cfg.settings and value is not None}
- for key, value in config.items():
- self.cfg.set(key.lower(), value)
+ def load_config(self):
+ config = {key: value for key, value in self.options.items()
+ if key in self.cfg.settings and value is not None}
+ for key, value in config.items():
+ self.cfg.set(key.lower(), value)
- def load(self):
- sys.path.append(base)
- return gunicorn.util.import_app('app.wsgi')
+ def load(self):
+ return gunicorn.util.import_app('app.wsgi')
+
+ options = {
+ 'bind': '%s:%s' % ('0.0.0.0', args.port),
+ 'workers': number_of_workers(),
+ 'chdir': base
+ }
+ StandaloneApplication(options).run()
+
+
+def run_on_windows(args):
+ from waitress import serve
+ from app.wsgi import application
+ serve(application, port=args.port)
def command_db_init(args):
@@ -53,12 +67,10 @@ def command_user_create(args):
def command_run_webserver(args):
print(f'Starting server with port {args.port}.')
- options = {
- 'bind': '%s:%s' % ('0.0.0.0', args.port),
- 'workers': number_of_workers(),
- 'chdir': base
- }
- StandaloneApplication(options).run()
+ if platform.system() == 'Windows':
+ run_on_windows(args)
+ else:
+ run_on_nix(args)
def command_run_task_queue(args):
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -42,6 +42,7 @@
'django-drf-filepond>=0.3.0',
'sqlalchemy>=1.4.7',
'gunicorn>=20.1.0',
+ 'waitress>=2.0.0',
]
setup(
| ModuleNotFoundError: No module named 'fcntl'
How to reproduce the behaviour
---------
After downloading doccano and trying to start it via `doccano init` I get the following message:
```
doccano init
Traceback (most recent call last):
File "C:\Users\\AppData\Local\Programs\Python\Python39\Scripts\doccano-script.py", line 33, in <module>
sys.exit(load_entry_point('doccano==1.4.1', 'console_scripts', 'doccano')())
File "C:\Users\\AppData\Local\Programs\Python\Python39\Scripts\doccano-script.py", line 25, in importlib_load_entry_point
return next(matches).load()
File "c:\users\\appdata\local\programs\python\python39\lib\importlib\metadata.py", line 77, in load
module = import_module(match.group('module'))
File "c:\users\\appdata\local\programs\python\python39\lib\importlib\__init__.py", line 127, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 1030, in _gcd_import
File "<frozen importlib._bootstrap>", line 1007, in _find_and_load
File "<frozen importlib._bootstrap>", line 986, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 680, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 790, in exec_module
File "<frozen importlib._bootstrap>", line 228, in _call_with_frames_removed
File "c:\users\\appdata\local\programs\python\python39\lib\site-packages\backend\cli.py", line 7, in <module>
import gunicorn.app.base
File "c:\users\\appdata\local\programs\python\python39\lib\site-packages\gunicorn\app\base.py", line 11, in <module>
from gunicorn import util
File "c:\users\\appdata\local\programs\python\python39\lib\site-packages\gunicorn\util.py", line 8, in <module>
import fcntl
ModuleNotFoundError: No module named 'fcntl'
```
Your Environment
---------
* Operating System: Windows 10 1909
* Python Version Used: 3.9.4
* When you install doccano: 17.06.2021
* How did you install doccano (Heroku button etc): `pip install doccano`
Own Research:
----------
Apparently Windows doesn''t support `fcntl`. Therefore nobody that uses Windows can install doccano via pip.
| I ran into the same issue and used docker to install Doccano on windows.
Yes with docker everything seems fine, but I think this should be stated somewhere.
The problem is gunicorn. It doesn't support Windows.
There's now a plan to add Windows support, but it may be faster to use some alternatives.
- https://github.com/benoitc/gunicorn/issues/524
Alternative: use waitress in cli.py.
```python
from waitress import serve
# import gunicorn.app.base
# import gunicorn.util
def command_run_webserver(args):
sys.path.append(base)
from app.wsgi import application
serve(application, port=args.port)
``` | 2021-10-22T06:55:09 |
|
doccano/doccano | 1,530 | doccano__doccano-1530 | [
"1481"
] | 811cd5a541d3b77787b143e140e208dc67231512 | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -43,6 +43,8 @@
'sqlalchemy>=1.4.7',
'gunicorn>=20.1.0',
'waitress>=2.0.0',
+ 'pydantic>=1.8.2',
+ 'chardet>=4.0.0'
]
setup(
| doccano init causes a ModuleNotFoundError for chardet
How to reproduce the behaviour
---------
Create a fresh virtualenv in which to test, then install the latest release of doccano from PyPi (v1.4.1):
```
$ virtualenv env
[...virtualenv output removed...]
$ source env/bin/activate
(env) $ pip install doccano
[... main output removed...]
Successfully installed Django-3.2.6 MarkupSafe-2.0.1 PyJWT-2.1.0 amqp-5.0.6 apache-libcloud-3.3.1 asgiref-3.4.1 auto-labeling-pipeline-0.1.21 billiard-3.6.4.0 boto3-1.18.30 botocore-1.21.30 celery-5.1.2 certifi-2021.5.30 cffi-1.14.6 charset-normalizer-2.0.4 click-7.1.2 click-didyoumean-0.0.3 click-plugins-1.1.1 click-repl-0.2.0 colour-0.1.5 conllu-4.4.1 coreapi-2.3.3 coreschema-0.0.4 cryptography-3.4.8 defusedxml-0.7.1 dj-database-url-0.5.0 dj-rest-auth-2.1.11 django-celery-results-2.2.0 django-cors-headers-3.8.0 django-drf-filepond-0.4.0 django-filter-2.4.0 django-polymorphic-3.0.0 django-rest-polymorphic-0.1.9 django-storages-1.11.1 djangorestframework-3.12.4 djangorestframework-csv-2.1.1 djangorestframework-xml-2.0.0 doccano-1.4.1 drf-yasg-1.20.0 ecdsa-0.17.0 environs-9.3.3 et-xmlfile-1.1.0 furl-2.1.2 greenlet-1.1.1 gunicorn-20.1.0 idna-3.2 inflection-0.5.1 itypes-1.2.0 jinja2-3.0.1 jmespath-0.10.0 joblib-1.0.1 kombu-5.1.0 lml-0.1.0 marshmallow-3.13.0 numpy-1.21.2 oauthlib-3.1.1 openpyxl-3.0.7 orderedmultidict-1.0.1 packaging-21.0 prompt-toolkit-3.0.20 pyasn1-0.4.8 pycparser-2.20 pydantic-1.8.2 pyexcel-0.6.6 pyexcel-io-0.6.4 pyexcel-xlsx-0.6.0 pyparsing-2.4.7 python-dateutil-2.8.2 python-dotenv-0.19.0 python-jose-3.3.0 python3-openid-3.2.0 pytz-2021.1 requests-2.26.0 requests-oauthlib-1.3.0 rsa-4.7.2 ruamel.yaml-0.17.14 ruamel.yaml.clib-0.2.6 s3transfer-0.5.0 scikit-learn-0.24.2 scipy-1.7.1 seqeval-1.2.2 shortuuid-1.0.1 six-1.16.0 social-auth-app-django-5.0.0 social-auth-core-4.1.0 sqlalchemy-1.4.23 sqlparse-0.4.1 texttable-1.6.4 threadpoolctl-2.2.0 typing-extensions-3.10.0.0 unicodecsv-0.14.1 uritemplate-3.0.1 urllib3-1.26.6 vine-5.0.0 wcwidth-0.2.5 whitenoise-5.3.0
```
Now run `doccano init`:
```
(env) $ doccano init
```
This results in a set of long stack traces all rooted on [doccano/backend/api/views/upload/dataset.py:L7](https://github.com/doccano/doccano/blob/3bf91c1e30c00693362491932a6aa802235a5f95/backend/api/views/upload/dataset.py#L7) - `import chardet`
```
Traceback (most recent call last):
File "/env/lib/python3.8/site-packages/backend/manage.py", line 15, in <module>
execute_from_command_line(sys.argv)
File "/env/lib/python3.8/site-packages/django/core/management/__init__.py", line 419, in execute_from_command_line
utility.execute()
File "/env/lib/python3.8/site-packages/django/core/management/__init__.py", line 413, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/env/lib/python3.8/site-packages/django/core/management/base.py", line 354, in run_from_argv
self.execute(*args, **cmd_options)
[...traceback truncated...]
File "/env/lib/python3.8/site-packages/backend/api/urls.py", line 3, in <module>
from . import views
File "/env/lib/python3.8/site-packages/backend/api/views/__init__.py", line 5, in <module>
from .export_dataset import *
File "/env/lib/python3.8/site-packages/backend/api/views/export_dataset.py", line 11, in <module>
from ..tasks import export_dataset
File "/env/lib/python3.8/site-packages/backend/api/tasks.py", line 13, in <module>
from .views.upload.factory import (get_data_class, get_dataset_class,
File "/env/lib/python3.8/site-packages/backend/api/views/upload/factory.py", line 3, in <module>
from . import catalog, data, dataset, label
File "/env/lib/python3.8/site-packages/backend/api/views/upload/dataset.py", line 7, in <module>
import chardet
ModuleNotFoundError: No module named 'chardet'
```
`pip install chardet` resolves the issue and `doccano init` then completes successfully and I'm able to run the app.
Your Environment
---------
* **Operating System:** Tested on both macOS 10.15.7 and Ubuntu 20.04
* **Python Version Used:** 3.8.9 (macOS, via macports), 3.8.10 (Ubuntu)
* **When you install doccano:** 27th Aug 2021 - installing current release from PyPi, v1.4.1
* **How did you install doccano (Heroku button etc):** Installing v1.4.1 from PyPi using `pip install doccano` into a clean python virtualenv.
| 2021-10-26T00:55:25 |
||
doccano/doccano | 1,531 | doccano__doccano-1531 | [
"1408"
] | 647e09bffb4a96ca4859f4f098b4e9cd38ab0489 | diff --git a/backend/cli.py b/backend/cli.py
--- a/backend/cli.py
+++ b/backend/cli.py
@@ -6,6 +6,7 @@
import sys
from .app.celery import app
+os.environ['DEBUG'] = 'False'
base = os.path.abspath(os.path.dirname(__file__))
sys.path.append(base)
manage_path = os.path.join(base, 'manage.py')
| TemplateDoesNotExist Error on start from README instructions
How to reproduce the behaviour
---------
I was following the instructions on the main README to install and start doccano with pip (copied here)
```
pip install doccano
doccano init
doccano createuser --username admin --password pass
doccano webserver --port 8000
```
and then in another terminal
```
doccano task
```
This all looks fine, until I try the next step (visiting in the browser). I get the following error (included the last line of the non-error log for reference)
```
[2021-06-10 09:56:42 -0700] [1046] [INFO] Handling signal: winch
Internal Server Error: /
Traceback (most recent call last):
File "/projects/creisle_prj/git/doccano/venv_pipenv/lib/python3.7/site-packages/django/core/handlers/exception.py", line 47, in inner
response = get_response(request)
File "/projects/creisle_prj/git/doccano/venv_pipenv/lib/python3.7/site-packages/django/core/handlers/base.py", line 204, in _get_response
response = response.render()
File "/projects/creisle_prj/git/doccano/venv_pipenv/lib/python3.7/site-packages/django/template/response.py", line 105, in render
self.content = self.rendered_content
File "/projects/creisle_prj/git/doccano/venv_pipenv/lib/python3.7/site-packages/django/template/response.py", line 81, in rendered_content
template = self.resolve_template(self.template_name)
File "/projects/creisle_prj/git/doccano/venv_pipenv/lib/python3.7/site-packages/django/template/response.py", line 63, in resolve_template
return select_template(template, using=self.using)
File "/projects/creisle_prj/git/doccano/venv_pipenv/lib/python3.7/site-packages/django/template/loader.py", line 47, in select_template
raise TemplateDoesNotExist(', '.join(template_name_list), chain=chain)
django.template.exceptions.TemplateDoesNotExist: index.html
Internal Server Error: /favicon.ico
Traceback (most recent call last):
File "/projects/creisle_prj/git/doccano/venv_pipenv/lib/python3.7/site-packages/django/core/handlers/exception.py", line 47, in inner
response = get_response(request)
File "/projects/creisle_prj/git/doccano/venv_pipenv/lib/python3.7/site-packages/django/core/handlers/base.py", line 204, in _get_response
response = response.render()
File "/projects/creisle_prj/git/doccano/venv_pipenv/lib/python3.7/site-packages/django/template/response.py", line 105, in render
self.content = self.rendered_content
File "/projects/creisle_prj/git/doccano/venv_pipenv/lib/python3.7/site-packages/django/template/response.py", line 81, in rendered_content
template = self.resolve_template(self.template_name)
File "/projects/creisle_prj/git/doccano/venv_pipenv/lib/python3.7/site-packages/django/template/response.py", line 63, in resolve_template
return select_template(template, using=self.using)
File "/projects/creisle_prj/git/doccano/venv_pipenv/lib/python3.7/site-packages/django/template/loader.py", line 47, in select_template
raise TemplateDoesNotExist(', '.join(template_name_list), chain=chain)
django.template.exceptions.TemplateDoesNotExist: index.html
```
Your Environment
---------
* Operating System: centos07
* Python Version Used: 3.7.2 (virtual environment)
* When you install doccano: 2021-Jun-10 (Today)
* How did you install doccano (Heroku button etc): pip
| I am experiencing the same behavior.
me too.
> I am experiencing the same behavior.
i figured out. probably you should set `debug` to False in settings.py | 2021-10-26T02:29:03 |
|
doccano/doccano | 1,557 | doccano__doccano-1557 | [
"1497"
] | 217cc85348972fcf38f3c58284dd2168db2bd3bb | diff --git a/backend/api/views/download/data.py b/backend/api/views/download/data.py
--- a/backend/api/views/download/data.py
+++ b/backend/api/views/download/data.py
@@ -1,3 +1,4 @@
+import json
from typing import Any, Dict, List
@@ -16,4 +17,10 @@ def __init__(self,
self.metadata = metadata
def __str__(self):
- return f'{self.data}\t{self.label}'
+ return json.dumps({
+ 'id': self.id,
+ 'data': self.data,
+ 'label': self.label,
+ 'user': self.user,
+ 'metadata': self.metadata
+ })
diff --git a/backend/api/views/download/writer.py b/backend/api/views/download/writer.py
--- a/backend/api/views/download/writer.py
+++ b/backend/api/views/download/writer.py
@@ -90,7 +90,7 @@ def create_line(self, record) -> Dict:
def create_header(self, records: List[Record]) -> Iterable[str]:
header = ['id', 'data', 'label']
- header += list(itertools.chain(*[r.metadata.keys() for r in records]))
+ header += sorted(set(itertools.chain(*[r.metadata.keys() for r in records])))
return header
| diff --git a/backend/api/tests/download/__init__.py b/backend/api/tests/download/__init__.py
new file mode 100644
diff --git a/backend/api/tests/download/test_writer.py b/backend/api/tests/download/test_writer.py
new file mode 100644
--- /dev/null
+++ b/backend/api/tests/download/test_writer.py
@@ -0,0 +1,53 @@
+import unittest
+from unittest.mock import call, patch
+
+from ...views.download.data import Record
+from ...views.download.writer import CsvWriter
+
+
+class TestCSVWriter(unittest.TestCase):
+
+ def setUp(self):
+ self.records = [
+ Record(id=0, data='exampleA', label=['labelA'], user='admin', metadata={'hidden': 'secretA'}),
+ Record(id=1, data='exampleB', label=['labelB'], user='admin', metadata={'hidden': 'secretB'}),
+ Record(id=2, data='exampleC', label=['labelC'], user='admin', metadata={'meta': 'secretC'})
+ ]
+
+ def test_create_header(self):
+ writer = CsvWriter('.')
+ header = writer.create_header(self.records)
+ expected = ['id', 'data', 'label', 'hidden', 'meta']
+ self.assertEqual(header, expected)
+
+ def test_create_line(self):
+ writer = CsvWriter('.')
+ record = self.records[0]
+ line = writer.create_line(record)
+ expected = {
+ 'id': record.id,
+ 'data': record.data,
+ 'label': record.label[0],
+ 'hidden': 'secretA'
+ }
+ self.assertEqual(line, expected)
+
+ @patch('os.remove')
+ @patch('zipfile.ZipFile')
+ @patch('csv.DictWriter.writerow')
+ @patch('builtins.open')
+ def test_dump(self, mock_open_file, csv_io, zip_io, mock_remove_file):
+ writer = CsvWriter('.')
+ writer.write(self.records)
+
+ self.assertEqual(mock_open_file.call_count, 1)
+ mock_open_file.assert_called_with('./admin.csv', mode='a', encoding='utf-8')
+
+ self.assertEqual(csv_io.call_count, len(self.records) + 1) # +1 is for a header
+ calls = [
+ call({'id': 'id', 'data': 'data', 'label': 'label', 'hidden': 'hidden', 'meta': 'meta'}),
+ call({'id': 0, 'data': 'exampleA', 'label': 'labelA', 'hidden': 'secretA'}),
+ call({'id': 1, 'data': 'exampleB', 'label': 'labelB', 'hidden': 'secretB'}),
+ call({'id': 2, 'data': 'exampleC', 'label': 'labelC', 'meta': 'secretC'})
+ ]
+ csv_io.assert_has_calls(calls)
| Metadata column repeated when exported as csv
Hi I have recently come across a bug when you export data as csv
<environment.-->
* Operating System:MacOS 10.14
* Python Version Used: 3.9.5
* Doccano installed through pip3 install doccano
I have created a DocumentClassification project and have imported some json data.
The json data is in the format of
```bash
{"text":"The ravioli was excellent" , "hidden":"The FOOD was excellent"}
```
When these sentences are imported, the "hidden" : "The FOOD was excellent" becomes part of the Metadata. I have quite a few of these sentences and have labelled them with my own labels
The issue is when I export the dataset as csv, the Metadata column repeats. For example if I have 10 labelled sentences, the Metadata column is repeated 10 times per row of data in excel.
| 2021-11-11T01:37:39 |
|
doccano/doccano | 1,558 | doccano__doccano-1558 | [
"1466"
] | 0d7bf054e619c144ec84fcf18f9457af8822a204 | diff --git a/backend/api/views/download/writer.py b/backend/api/views/download/writer.py
--- a/backend/api/views/download/writer.py
+++ b/backend/api/views/download/writer.py
@@ -84,7 +84,7 @@ def create_line(self, record) -> Dict:
return {
'id': record.id,
'data': record.data,
- 'label': '#'.join(record.label),
+ 'label': '#'.join(sorted(record.label)),
**record.metadata
}
@@ -144,6 +144,7 @@ class FastTextWriter(LineWriter):
def create_line(self, record):
line = [f'__label__{label}' for label in record.label]
+ line.sort()
line.append(record.data)
line = ' '.join(line)
return line
| diff --git a/backend/api/tests/download/test_writer.py b/backend/api/tests/download/test_writer.py
--- a/backend/api/tests/download/test_writer.py
+++ b/backend/api/tests/download/test_writer.py
@@ -32,6 +32,16 @@ def test_create_line(self):
}
self.assertEqual(line, expected)
+ def test_label_order(self):
+ writer = CsvWriter('.')
+ record1 = Record(id=0, data='', label=['labelA', 'labelB'], user='', metadata={})
+ record2 = Record(id=0, data='', label=['labelB', 'labelA'], user='', metadata={})
+ line1 = writer.create_line(record1)
+ line2 = writer.create_line(record2)
+ expected = 'labelA#labelB'
+ self.assertEqual(line1['label'], expected)
+ self.assertEqual(line2['label'], expected)
+
@patch('os.remove')
@patch('zipfile.ZipFile')
@patch('csv.DictWriter.writerow')
| Mutli-label text classification export issues: same classes but in different orders
How to reproduce the behaviour
---------
<!-- Before submitting an issue, make sure to check the docs and closed issues and FAQ to see if any of the solutions work for you. https://github.com/doccano/doccano/wiki/Frequently-Asked-Questions -->
We are two annotators on a multi-label classification project. When I export the annotations, for some examples, me and my co-annotator have put the same labels, but on the exported CSV, they do not appear in the same order:
Annotator 1:
| text | labels |
| example 1 | label1#label2#label3 |
Annotator 2:
| text | labels |
| example 1 | label2#label3#label1 |
As I try to use these CSVs for comparing our annotations, this brings more difficulty.
<!-- Include a code example or the steps that led to the problem. Please try to be as specific as possible. -->
Your Environment
---------
<!-- Include details of your environment.-->
* Operating System: Debian
* Python Version Used: Don't know, I pulled the latest version from Docker Hub
* When you install doccano: 3 days ago
* How did you install doccano (Heroku button etc): Docker
| 2021-11-12T01:01:49 |
|
doccano/doccano | 1,583 | doccano__doccano-1583 | [
"1513"
] | 2a37d5d8409c9ff11c4383b53de91414ba2925ac | diff --git a/backend/api/models.py b/backend/api/models.py
--- a/backend/api/models.py
+++ b/backend/api/models.py
@@ -284,14 +284,21 @@ class Span(Annotation):
def validate_unique(self, exclude=None):
allow_overlapping = getattr(self.example.project, 'allow_overlapping', False)
+ is_collaborative = self.example.project.collaborative_annotation
if allow_overlapping:
super().validate_unique(exclude=exclude)
+ return
+
+ overlapping_span = Span.objects.exclude(id=self.id).filter(example=self.example).filter(
+ models.Q(start_offset__gte=self.start_offset, start_offset__lt=self.end_offset) |
+ models.Q(end_offset__gt=self.start_offset, end_offset__lte=self.end_offset) |
+ models.Q(start_offset__lte=self.start_offset, end_offset__gte=self.end_offset)
+ )
+ if is_collaborative:
+ if overlapping_span.exists():
+ raise ValidationError('This overlapping is not allowed in this project.')
else:
- if Span.objects.exclude(id=self.id).filter(example=self.example).filter(
- models.Q(start_offset__gte=self.start_offset, start_offset__lt=self.end_offset) |
- models.Q(end_offset__gt=self.start_offset, end_offset__lte=self.end_offset) |
- models.Q(start_offset__lte=self.start_offset, end_offset__gte=self.end_offset)
- ).exists():
+ if overlapping_span.filter(user=self.user).exists():
raise ValidationError('This overlapping is not allowed in this project.')
def save(self, force_insert=False, force_update=False, using=None,
diff --git a/backend/api/views/annotation.py b/backend/api/views/annotation.py
--- a/backend/api/views/annotation.py
+++ b/backend/api/views/annotation.py
@@ -1,3 +1,4 @@
+from django.core.exceptions import ValidationError
from django.shortcuts import get_object_or_404
from rest_framework import generics, status
from rest_framework.permissions import IsAuthenticated
@@ -34,7 +35,11 @@ def create(self, request, *args, **kwargs):
if self.project.single_class_classification:
self.get_queryset().delete()
request.data['example'] = self.kwargs['doc_id']
- return super().create(request, args, kwargs)
+ try:
+ response = super().create(request, args, kwargs)
+ except ValidationError as err:
+ response = Response({'detail': err.messages}, status=status.HTTP_400_BAD_REQUEST)
+ return response
def perform_create(self, serializer):
serializer.save(example_id=self.kwargs['doc_id'], user=self.request.user)
| diff --git a/backend/api/tests/api/utils.py b/backend/api/tests/api/utils.py
--- a/backend/api/tests/api/utils.py
+++ b/backend/api/tests/api/utils.py
@@ -48,7 +48,8 @@ def make_project(
task: str,
users: List[str],
roles: List[str] = None,
- collaborative_annotation=False):
+ collaborative_annotation=False,
+ **kwargs):
create_default_roles()
# create users.
@@ -68,7 +69,8 @@ def make_project(
_model=project_model,
project_type=task,
users=users,
- collaborative_annotation=collaborative_annotation
+ collaborative_annotation=collaborative_annotation,
+ **kwargs
)
# assign roles to the users.
@@ -119,7 +121,7 @@ def make_annotation(task, doc, user, **kwargs):
return mommy.make(annotation_model, example=doc, user=user, **kwargs)
-def prepare_project(task: str = 'Any', collaborative_annotation=False):
+def prepare_project(task: str = 'Any', collaborative_annotation=False, **kwargs):
return make_project(
task=task,
users=['admin', 'approver', 'annotator'],
@@ -128,7 +130,8 @@ def prepare_project(task: str = 'Any', collaborative_annotation=False):
settings.ROLE_ANNOTATION_APPROVER,
settings.ROLE_ANNOTATOR,
],
- collaborative_annotation=collaborative_annotation
+ collaborative_annotation=collaborative_annotation,
+ **kwargs
)
diff --git a/backend/api/tests/test_models.py b/backend/api/tests/test_models.py
--- a/backend/api/tests/test_models.py
+++ b/backend/api/tests/test_models.py
@@ -3,8 +3,9 @@
from django.test import TestCase, override_settings
from model_mommy import mommy
-from ..models import (Category, Label, Span, TextLabel,
+from ..models import (SEQUENCE_LABELING, Category, Label, Span, TextLabel,
generate_random_hex_color)
+from .api.utils import prepare_project
@override_settings(STATICFILES_STORAGE='django.contrib.staticfiles.storage.StaticFilesStorage')
@@ -117,7 +118,12 @@ def test_uniqueness(self):
Category(example=a.example, user=a.user, label=a.label).save()
-class TestSequenceAnnotation(TestCase):
+class TestSpan(TestCase):
+
+ def setUp(self):
+ self.project = prepare_project(SEQUENCE_LABELING, allow_overlapping=False)
+ self.example = mommy.make('Example', project=self.project.item)
+ self.user = self.project.users[0]
def test_start_offset_is_not_negative(self):
with self.assertRaises(IntegrityError):
@@ -131,33 +137,66 @@ def test_start_offset_is_less_than_end_offset(self):
with self.assertRaises(IntegrityError):
mommy.make('Span', start_offset=0, end_offset=0)
- def test_overlapping(self):
- project = mommy.make('SequenceLabelingProject', allow_overlapping=False)
- example = mommy.make('Example', project=project)
- mommy.make('Span', example=example, start_offset=5, end_offset=10)
- with self.assertRaises(ValidationError):
- mommy.make('Span', example=example, start_offset=5, end_offset=10)
- with self.assertRaises(ValidationError):
- mommy.make('Span', example=example, start_offset=5, end_offset=11)
- with self.assertRaises(ValidationError):
- mommy.make('Span', example=example, start_offset=4, end_offset=10)
- with self.assertRaises(ValidationError):
- mommy.make('Span', example=example, start_offset=6, end_offset=9)
- with self.assertRaises(ValidationError):
- mommy.make('Span', example=example, start_offset=9, end_offset=15)
- with self.assertRaises(ValidationError):
- mommy.make('Span', example=example, start_offset=0, end_offset=6)
- mommy.make('Span', example=example, start_offset=0, end_offset=5)
- mommy.make('Span', example=example, start_offset=10, end_offset=15)
+ def test_unique_constraint(self):
+ mommy.make('Span', example=self.example, start_offset=5, end_offset=10, user=self.user)
+ mommy.make('Span', example=self.example, start_offset=0, end_offset=5, user=self.user)
+ mommy.make('Span', example=self.example, start_offset=10, end_offset=15, user=self.user)
+
+ def test_unique_constraint_violated(self):
+ mommy.make('Span', example=self.example, start_offset=5, end_offset=10, user=self.user)
+ spans = [(5, 10), (5, 11), (4, 10), (6, 9), (9, 15), (0, 6)]
+ for start_offset, end_offset in spans:
+ with self.assertRaises(ValidationError):
+ mommy.make(
+ 'Span',
+ example=self.example,
+ start_offset=start_offset,
+ end_offset=end_offset,
+ user=self.user
+ )
+
+ def test_unique_constraint_if_overlapping_is_allowed(self):
+ project = prepare_project(SEQUENCE_LABELING, allow_overlapping=True)
+ example = mommy.make('Example', project=project.item)
+ user = project.users[0]
+ mommy.make('Span', example=example, start_offset=5, end_offset=10, user=user)
+ spans = [(5, 10), (5, 11), (4, 10), (6, 9), (9, 15), (0, 6)]
+ for start_offset, end_offset in spans:
+ mommy.make('Span', example=example, start_offset=start_offset, end_offset=end_offset, user=user)
def test_update(self):
- project = mommy.make('SequenceLabelingProject', allow_overlapping=False)
- example = mommy.make('Example', project=project)
- span = mommy.make('Span', example=example, start_offset=0, end_offset=5)
+ span = mommy.make('Span', example=self.example, start_offset=0, end_offset=5)
span.end_offset = 6
span.save()
+class TestSpanWithoutCollaborativeMode(TestCase):
+
+ def setUp(self):
+ self.project = prepare_project(SEQUENCE_LABELING, False, allow_overlapping=False)
+ self.example = mommy.make('Example', project=self.project.item)
+
+ def test_allow_users_to_create_same_spans(self):
+ mommy.make('Span', example=self.example, start_offset=5, end_offset=10, user=self.project.users[0])
+ mommy.make('Span', example=self.example, start_offset=5, end_offset=10, user=self.project.users[1])
+
+
+class TestSpanWithCollaborativeMode(TestCase):
+
+ def test_deny_users_to_create_same_spans(self):
+ project = prepare_project(SEQUENCE_LABELING, True, allow_overlapping=False)
+ example = mommy.make('Example', project=project.item)
+ mommy.make('Span', example=example, start_offset=5, end_offset=10, user=project.users[0])
+ with self.assertRaises(ValidationError):
+ mommy.make('Span', example=example, start_offset=5, end_offset=10, user=project.users[1])
+
+ def test_allow_users_to_create_same_spans_if_overlapping_is_allowed(self):
+ project = prepare_project(SEQUENCE_LABELING, True, allow_overlapping=True)
+ example = mommy.make('Example', project=project.item)
+ mommy.make('Span', example=example, start_offset=5, end_offset=10, user=project.users[0])
+ mommy.make('Span', example=example, start_offset=5, end_offset=10, user=project.users[1])
+
+
class TestSeq2seqAnnotation(TestCase):
def test_uniqueness(self):
| Unable to tag selected text in collab mode
### Environment
* Operating System: Linux - Alpine 3.11
* Python Version Used: 3.8.6
* When you install doccano: July 14, 2021
* How did you install doccano (Heroku button etc): one-click setup script provided on GitHub readme
### The problem
Bug: when attempting to tag text, words/characters about 10-13 characters over end up being tagged, as opposed to the selected text. This issue persists across browsers (Chrome and Safari) and across all of our uploaded documents. When `collaboration` mode is turned on, all team members experience the issue. However, when we turn it off, it begins working perfectly again. Please see video: https://user-images.githubusercontent.com/21316094/136839200-8384dbcb-8b8f-49a5-9ae9-3235be6e6807.mov
### To reproduce
Reproducible test case: multiple users should tag a particular document, turn collaborative mode on then try tagging that document and see if the text selected is the text that gets tagged.
| Would you write your environment? Thank you! | 2021-12-02T06:49:45 |
doccano/doccano | 1,632 | doccano__doccano-1632 | [
"1603"
] | fb459c2b2b97667f96704f052f69c31c4f541228 | diff --git a/backend/api/models.py b/backend/api/models.py
--- a/backend/api/models.py
+++ b/backend/api/models.py
@@ -1,7 +1,7 @@
+import abc
import random
import string
import uuid
-from typing import Literal
from auto_labeling_pipeline.models import RequestModelFactory
from django.contrib.auth.models import User
@@ -39,8 +39,30 @@ class Project(PolymorphicModel):
collaborative_annotation = models.BooleanField(default=False)
single_class_classification = models.BooleanField(default=False)
- def is_task_of(self, task: Literal['text', 'image', 'speech']):
- raise NotImplementedError()
+ @property
+ @abc.abstractmethod
+ def is_text_project(self) -> bool:
+ return False
+
+ @property
+ def can_define_label(self) -> bool:
+ """Whether or not the project can define label(ignoring the type of label)"""
+ return False
+
+ @property
+ def can_define_relation(self) -> bool:
+ """Whether or not the project can define relation."""
+ return False
+
+ @property
+ def can_define_category(self) -> bool:
+ """Whether or not the project can define category."""
+ return False
+
+ @property
+ def can_define_span(self) -> bool:
+ """Whether or not the project can define span."""
+ return False
def __str__(self):
return self.name
@@ -48,40 +70,82 @@ def __str__(self):
class TextClassificationProject(Project):
- def is_task_of(self, task: Literal['text', 'image', 'speech']):
- return task == 'text'
+ @property
+ def is_text_project(self) -> bool:
+ return True
+
+ @property
+ def can_define_label(self) -> bool:
+ return True
+
+ @property
+ def can_define_category(self) -> bool:
+ return True
class SequenceLabelingProject(Project):
allow_overlapping = models.BooleanField(default=False)
grapheme_mode = models.BooleanField(default=False)
- def is_task_of(self, task: Literal['text', 'image', 'speech']):
- return task == 'text'
+ @property
+ def is_text_project(self) -> bool:
+ return True
+
+ @property
+ def can_define_label(self) -> bool:
+ return True
+
+ @property
+ def can_define_span(self) -> bool:
+ return True
class Seq2seqProject(Project):
- def is_task_of(self, task: Literal['text', 'image', 'speech']):
- return task == 'text'
+ @property
+ def is_text_project(self) -> bool:
+ return True
class IntentDetectionAndSlotFillingProject(Project):
- def is_task_of(self, task: Literal['text', 'image', 'speech']):
- return task == 'text'
+ @property
+ def is_text_project(self) -> bool:
+ return True
+
+ @property
+ def can_define_label(self) -> bool:
+ return True
+
+ @property
+ def can_define_category(self) -> bool:
+ return True
+
+ @property
+ def can_define_span(self) -> bool:
+ return True
class Speech2textProject(Project):
- def is_task_of(self, task: Literal['text', 'image', 'speech']):
- return task == 'speech'
+ @property
+ def is_text_project(self) -> bool:
+ return False
class ImageClassificationProject(Project):
- def is_task_of(self, task: Literal['text', 'image', 'speech']):
- return task == 'image'
+ @property
+ def is_text_project(self) -> bool:
+ return False
+
+ @property
+ def can_define_label(self) -> bool:
+ return True
+
+ @property
+ def can_define_category(self) -> bool:
+ return True
def generate_random_hex_color():
diff --git a/backend/api/serializers.py b/backend/api/serializers.py
--- a/backend/api/serializers.py
+++ b/backend/api/serializers.py
@@ -173,11 +173,21 @@ class Meta:
'random_order',
'collaborative_annotation',
'single_class_classification',
+ 'is_text_project',
+ 'can_define_label',
+ 'can_define_relation',
+ 'can_define_category',
+ 'can_define_span',
'tags'
)
read_only_fields = (
'updated_at',
'users',
+ 'is_text_project',
+ 'can_define_label',
+ 'can_define_relation',
+ 'can_define_category',
+ 'can_define_span',
'tags'
)
diff --git a/backend/api/views/auto_labeling.py b/backend/api/views/auto_labeling.py
--- a/backend/api/views/auto_labeling.py
+++ b/backend/api/views/auto_labeling.py
@@ -142,7 +142,7 @@ def send_request(self, model, example):
def prepare_example(self):
text = self.request.data['text']
- if self.project.is_task_of('text'):
+ if self.project.is_text_project:
return text
else:
tu = TemporaryUpload.objects.get(upload_id=text)
@@ -221,7 +221,7 @@ def perform_create(self, serializer):
def get_example(self, project):
example = get_object_or_404(Example, pk=self.kwargs['example_id'])
- if project.is_task_of('text'):
+ if project.is_text_project:
return example.text
else:
return str(example.filename)
| Python 3.7 compatability
This PR fixes `doccano init` to fail on Python 3.7 with
```
Traceback (most recent call last):
File "/usr/lib/python3.7/code.py", line 90, in runcode
exec(code, self.locals)
File "<input>", line 1, in <module>
ImportError: cannot import name 'Literal' from 'typing' (/usr/lib/python3.7/typing.py)
```
see https://stackoverflow.com/questions/61206437/importerror-cannot-import-name-literal-from-typing?noredirect=1&lq=1
| 2022-01-14T01:39:26 |
||
doccano/doccano | 1,654 | doccano__doccano-1654 | [
"1651"
] | 1bc9d73523dbfd2f98712680024009f23d422e40 | diff --git a/backend/members/models.py b/backend/members/models.py
--- a/backend/members/models.py
+++ b/backend/members/models.py
@@ -56,7 +56,7 @@ class Member(models.Model):
objects = MemberManager()
def clean(self):
- members = self.objects.exclude(id=self.id)
+ members = self.__class__.objects.exclude(id=self.id)
if members.filter(user=self.user, project=self.project).exists():
message = 'This user is already assigned to a role in this project.'
raise ValidationError(message)
| diff --git a/backend/members/tests.py b/backend/members/tests.py
--- a/backend/members/tests.py
+++ b/backend/members/tests.py
@@ -1,6 +1,9 @@
from django.conf import settings
+from django.test import TestCase
+from django.core.exceptions import ValidationError
from rest_framework import status
from rest_framework.reverse import reverse
+from model_mommy import mommy
from roles.models import Role
from members.models import Member
@@ -16,30 +19,30 @@ def setUp(self):
self.data = {'user': self.non_member.id, 'role': admin_role.id, 'project': self.project.item.id}
self.url = reverse(viewname='member_list', args=[self.project.item.id])
- def test_allows_project_admin_to_get_mappings(self):
+ def test_allows_project_admin_to_know_members(self):
self.assert_fetch(self.project.users[0], status.HTTP_200_OK)
- def test_denies_non_project_admin_to_get_mappings(self):
+ def test_denies_non_project_admin_to_know_members(self):
for member in self.project.users[1:]:
self.assert_fetch(member, status.HTTP_403_FORBIDDEN)
- def test_denies_non_project_member_to_get_mappings(self):
+ def test_denies_non_project_member_to_know_members(self):
self.assert_fetch(self.non_member, status.HTTP_403_FORBIDDEN)
- def test_denies_unauthenticated_user_to_get_mappings(self):
+ def test_denies_unauthenticated_user_to_known_members(self):
self.assert_fetch(expected=status.HTTP_403_FORBIDDEN)
- def test_allows_project_admin_to_create_mapping(self):
+ def test_allows_project_admin_to_add_member(self):
self.assert_create(self.project.users[0], status.HTTP_201_CREATED)
- def test_denies_non_project_admin_to_create_mapping(self):
+ def test_denies_non_project_admin_to_add_member(self):
for member in self.project.users[1:]:
self.assert_create(member, status.HTTP_403_FORBIDDEN)
- def test_denies_non_project_member_to_create_mapping(self):
+ def test_denies_non_project_member_to_add_member(self):
self.assert_create(self.non_member, status.HTTP_403_FORBIDDEN)
- def test_denies_unauthenticated_user_to_create_mapping(self):
+ def test_denies_unauthenticated_user_to_add_member(self):
self.assert_create(expected=status.HTTP_403_FORBIDDEN)
def assert_bulk_delete(self, user=None, expected=status.HTTP_403_FORBIDDEN):
@@ -49,19 +52,19 @@ def assert_bulk_delete(self, user=None, expected=status.HTTP_403_FORBIDDEN):
response = self.client.delete(self.url, data={'ids': ids}, format='json')
self.assertEqual(response.status_code, expected)
- def test_allows_project_admin_to_bulk_delete(self):
+ def test_allows_project_admin_to_remove_members(self):
self.assert_bulk_delete(self.project.users[0], status.HTTP_204_NO_CONTENT)
response = self.client.get(self.url)
self.assertEqual(len(response.data), 1)
- def test_denies_non_project_admin_to_bulk_delete(self):
+ def test_denies_non_project_admin_to_remove_members(self):
for member in self.project.users[1:]:
self.assert_bulk_delete(member, status.HTTP_403_FORBIDDEN)
- def test_denies_non_project_member_to_bulk_delete(self):
+ def test_denies_non_project_member_to_remove_members(self):
self.assert_bulk_delete(self.non_member, status.HTTP_403_FORBIDDEN)
- def test_denies_unauthenticated_user_to_bulk_delete(self):
+ def test_denies_unauthenticated_user_to_remove_members(self):
self.assert_bulk_delete(expected=status.HTTP_403_FORBIDDEN)
@@ -71,34 +74,34 @@ def setUp(self):
self.project = prepare_project()
self.non_member = make_user()
admin_role = Role.objects.get(name=settings.ROLE_PROJECT_ADMIN)
- mapping = Member.objects.get(user=self.project.users[1])
- self.url = reverse(viewname='member_detail', args=[self.project.item.id, mapping.id])
+ member = Member.objects.get(user=self.project.users[1])
+ self.url = reverse(viewname='member_detail', args=[self.project.item.id, member.id])
self.data = {'role': admin_role.id}
- def test_allows_project_admin_to_get_mapping(self):
+ def test_allows_project_admin_to_known_member(self):
self.assert_fetch(self.project.users[0], status.HTTP_200_OK)
- def test_denies_non_project_admin_to_get_mapping(self):
+ def test_denies_non_project_admin_to_know_member(self):
for member in self.project.users[1:]:
self.assert_fetch(member, status.HTTP_403_FORBIDDEN)
- def test_denies_non_project_member_to_get_mapping(self):
+ def test_denies_non_project_member_to_know_member(self):
self.assert_fetch(self.non_member, status.HTTP_403_FORBIDDEN)
- def test_denies_unauthenticated_user_to_get_mapping(self):
+ def test_denies_unauthenticated_user_to_know_member(self):
self.assert_fetch(expected=status.HTTP_403_FORBIDDEN)
- def test_allows_project_admin_to_update_mapping(self):
+ def test_allows_project_admin_to_change_member_role(self):
self.assert_update(self.project.users[0], status.HTTP_200_OK)
- def test_denies_non_project_admin_to_update_mapping(self):
+ def test_denies_non_project_admin_to_change_member_role(self):
for member in self.project.users[1:]:
self.assert_update(member, status.HTTP_403_FORBIDDEN)
- def test_denies_non_project_member_to_update_mapping(self):
+ def test_denies_non_project_member_to_change_member_role(self):
self.assert_update(self.non_member, status.HTTP_403_FORBIDDEN)
- def test_denies_unauthenticated_user_to_update_mapping(self):
+ def test_denies_unauthenticated_user_to_change_member_role(self):
self.assert_update(expected=status.HTTP_403_FORBIDDEN)
@@ -116,9 +119,6 @@ def test_filter_role_by_user_id(self):
class TestMemberManager(CRUDMixin):
- def setUp(self):
- pass
-
def test_has_role(self):
project = prepare_project()
admin = project.users[0]
@@ -129,3 +129,12 @@ def test_has_role(self):
]
for role, expect in expected:
self.assertEqual(Member.objects.has_role(project.item, admin, role), expect)
+
+
+class TestMember(TestCase):
+
+ def test_clean(self):
+ member = mommy.make('Member')
+ same_user = Member(project=member.project, user=member.user, role=member.role)
+ with self.assertRaises(ValidationError):
+ same_user.clean()
| I can't add members in the Django admin page.
I can't add members in the Django admin page.
steps
- Add a member in the admin page (click a SAVE button).
- <img width="1273" alt="スクリーンショット 2022-01-27 9 52 17" src="https://user-images.githubusercontent.com/20487308/151271702-bf60ae7e-f131-45fe-8314-e7726e90f90c.png">
- However, I get a 500 error.
- <img width="1085" alt="スクリーンショット 2022-01-27 9 53 08" src="https://user-images.githubusercontent.com/20487308/151271872-c3fa75e8-c491-4aff-b88e-c9d970406ede.png">
- The endpoints of the POST request are different between admin page and member page.
- `POST /admin/members/member/add/`
- `POST /v1/projects/1/members`
Environment
---------
doccano v1.5.5
| 2022-01-27T04:50:49 |
|
doccano/doccano | 1,668 | doccano__doccano-1668 | [
"1666"
] | 61a8569ce15e06af8592e0b6d151cab7c837208c | diff --git a/backend/api/views/project.py b/backend/api/views/project.py
--- a/backend/api/views/project.py
+++ b/backend/api/views/project.py
@@ -1,5 +1,6 @@
from django.conf import settings
-from rest_framework import generics, status
+from django_filters.rest_framework import DjangoFilterBackend
+from rest_framework import filters, generics, status
from rest_framework.permissions import IsAdminUser, IsAuthenticated
from rest_framework.response import Response
@@ -11,7 +12,8 @@
class ProjectList(generics.ListCreateAPIView):
serializer_class = ProjectPolymorphicSerializer
- pagination_class = None
+ filter_backends = (DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter)
+ search_fields = ('name', 'description')
def get_permissions(self):
if self.request.method == 'GET':
| diff --git a/backend/api/tests/api/test_project.py b/backend/api/tests/api/test_project.py
--- a/backend/api/tests/api/test_project.py
+++ b/backend/api/tests/api/test_project.py
@@ -15,13 +15,13 @@ def setUpTestData(cls):
def test_return_projects_to_member(self):
for member in self.project.members:
response = self.assert_fetch(member, status.HTTP_200_OK)
- project = response.data[0]
- self.assertEqual(len(response.data), 1)
+ project = response.data['results'][0]
+ self.assertEqual(response.data['count'], 1)
self.assertEqual(project['id'], self.project.item.id)
def test_does_not_return_project_to_non_member(self):
response = self.assert_fetch(self.non_member, status.HTTP_200_OK)
- self.assertEqual(len(response.data), 0)
+ self.assertEqual(response.data['count'], 0)
class TestProjectCreate(CRUDMixin):
| Pagination of the project list
When fetching projects in the project list page, is it intentional that all projects are fetched at once even though there is pagination?
Endpoint of project list fetching: `/v1/projects`
When there are a lot of projects, it takes a long time to display them.
Your Environment
---------
doccano v1.5.5
| Yes. It's intentional.
I thought we didn't join too many projects.
Do you think it should be paginated? It's relatively easy.
I think it should be paginated on fetching.
Even if there are fewer projects, pagination does not let performance lower. | 2022-02-02T02:16:23 |
doccano/doccano | 1,670 | doccano__doccano-1670 | [
"1669"
] | 819d25f4cea65846b9b61f1bf0dcffb16b5d81fe | diff --git a/backend/api/migrations/0033_auto_20220127_0654.py b/backend/api/migrations/0033_auto_20220127_0654.py
--- a/backend/api/migrations/0033_auto_20220127_0654.py
+++ b/backend/api/migrations/0033_auto_20220127_0654.py
@@ -34,7 +34,7 @@ class Migration(migrations.Migration):
),
migrations.AlterModelTable(
name='SpanType',
- table='label_types_spanType'
+ table='label_types_spantype'
)
]
)
| Database table for SpanType has invalid name
How to reproduce the behaviour
---------
- Pull latest changes from master
- ./manage.py migrate
- ./api/migrations/0033_auto_20220127_0654.py will migrate the database table for `SpanType` to `label_types_spanType`
- Delete a project `Project.objects.first().delete()``
Exception:
<img width="511" alt="image" src="https://user-images.githubusercontent.com/6747788/152384221-a6a549b8-1cca-49c0-86e4-6a20f7d0a266.png">
The issue can be resolved by either renaming db table `label_types_spanType` to `label_types_spantype` or by explicitly setting `tb_table` for SpanType model like this: `db_table = "label_types_spanType"`
Your Environment
---------
* Operating System: macOS Monterey, doccano is locally executed
* Python Version Used: 3.9
| 2022-02-03T22:25:16 |
||
doccano/doccano | 1,744 | doccano__doccano-1744 | [
"1598"
] | 9f0f89fcf90f797cfa2367761f93a5f4119cc1d0 | diff --git a/backend/cli.py b/backend/cli.py
--- a/backend/cli.py
+++ b/backend/cli.py
@@ -51,7 +51,7 @@ def load(self):
options = {
"bind": "%s:%s" % ("0.0.0.0", args.port),
- "workers": number_of_workers(),
+ "workers": args.workers,
"chdir": base,
"capture_output": True,
"loglevel": "debug",
@@ -134,6 +134,7 @@ def main():
# Create a parser for web server.
parser_server = subparsers.add_parser("webserver", help="see `webserver -h`")
parser_server.add_argument("--port", type=int, default=8000, help="port number")
+ parser_server.add_argument("--workers", type=int, default=number_of_workers(), help="the number of workers")
parser_server.set_defaults(handler=command_run_webserver)
# Create a parser for task queue.
| How could I reduce number of workers?
Could I reduce the number_of_workers?
---------
I run the doccano in my machine use this code.
```
doccano init
doccano create user ***
doccano web server --port ***
```
And then I got this log:
```
Booting worker with pid: 19
Booting worker with pid: 20
...
Booting worker with pid: 157
```
It run lots of worker and it took up a lot of memory. So, can I change the number_of_worker varlible. I saw the default number_of_worker= ``` multiprocessing.cpu_count()*2+1 ```. How could I change it?
Your Environment
---------
* Operating System: Linux
* Python Version Used: Python38
* When you install doccano: 2021-11-30
* How did you install doccano (Heroku button etc): pip install doccano
| As I want to add a PR to solve this problem, I got this error:
```shell
fatal: 'origin' does not appear to be a git repository
fatal: Could not read from remote repository.
Please make sure you have the correct access rights
and the repository exists.
```
😅 | 2022-03-17T05:51:02 |
|
doccano/doccano | 1,764 | doccano__doccano-1764 | [
"1763"
] | 350bc5e95fa459d1dc124a320cb8e34385b73021 | diff --git a/backend/data_export/celery_tasks.py b/backend/data_export/celery_tasks.py
--- a/backend/data_export/celery_tasks.py
+++ b/backend/data_export/celery_tasks.py
@@ -13,7 +13,7 @@
@shared_task
def export_dataset(project_id, file_format: str, export_approved=False):
project = get_object_or_404(Project, pk=project_id)
- repository = create_repository(project)
+ repository = create_repository(project, file_format)
writer = create_writer(file_format)(settings.MEDIA_ROOT)
service = ExportApplicationService(repository, writer)
filepath = service.export(export_approved)
diff --git a/backend/data_export/pipeline/factories.py b/backend/data_export/pipeline/factories.py
--- a/backend/data_export/pipeline/factories.py
+++ b/backend/data_export/pipeline/factories.py
@@ -11,8 +11,8 @@
)
-def create_repository(project):
- if getattr(project, "use_relation", False):
+def create_repository(project, file_format: str):
+ if getattr(project, "use_relation", False) and file_format == catalog.JSONLRelation.name:
return repositories.RelationExtractionRepository(project)
mapping = {
DOCUMENT_CLASSIFICATION: repositories.TextClassificationRepository,
diff --git a/backend/data_export/pipeline/repositories.py b/backend/data_export/pipeline/repositories.py
--- a/backend/data_export/pipeline/repositories.py
+++ b/backend/data_export/pipeline/repositories.py
@@ -1,7 +1,7 @@
import abc
import itertools
from collections import defaultdict
-from typing import Dict, Iterator, List, Tuple, Union
+from typing import Any, Dict, Iterator, List, Tuple
from .data import Record
from examples.models import Example
@@ -10,13 +10,12 @@
SpanType = Tuple[int, int, str]
-class BaseRepository(abc.ABC):
+class BaseRepository:
def __init__(self, project: Project):
self.project = project
- @abc.abstractmethod
def list(self, export_approved=False) -> Iterator[Record]:
- pass
+ raise NotImplementedError()
class FileRepository(BaseRepository):
@@ -54,7 +53,7 @@ def label_per_user(self, example) -> Dict:
label_per_user[a.user.username].append(a.label.text)
return label_per_user
- def reduce_user(self, label_per_user: Dict[str, List]):
+ def reduce_user(self, label_per_user: Dict[str, Any]):
value = list(itertools.chain(*label_per_user.values()))
return {"all": value}
@@ -96,7 +95,7 @@ def list(self, export_approved=False):
def label_per_user(self, doc) -> Dict:
raise NotImplementedError()
- def reduce_user(self, label_per_user: Dict[str, List]):
+ def reduce_user(self, label_per_user: Dict[str, Any]):
value = list(itertools.chain(*label_per_user.values()))
return {"all": value}
@@ -161,6 +160,14 @@ def label_per_user(self, doc) -> Dict:
label_per_user[user]["entities"] = span
return label_per_user
+ def reduce_user(self, label_per_user: Dict[str, Any]):
+ entities = []
+ relations = []
+ for user, label in label_per_user.items():
+ entities.extend(label.get("entities", []))
+ relations.extend(label.get("relations", []))
+ return {"all": {"entities": entities, "relations": relations}}
+
class Seq2seqRepository(TextRepository):
@property
@@ -184,7 +191,7 @@ def docs(self):
def label_per_user(self, doc) -> Dict:
category_per_user: Dict[str, List[str]] = defaultdict(list)
span_per_user: Dict[str, List[SpanType]] = defaultdict(list)
- label_per_user: Dict[str, Dict[str, Union[List[str], List[SpanType]]]] = defaultdict(dict)
+ label_per_user: Dict[str, Dict[str, List]] = defaultdict(dict)
for a in doc.categories.all():
category_per_user[a.user.username].append(a.label.text)
for a in doc.spans.all():
@@ -193,4 +200,15 @@ def label_per_user(self, doc) -> Dict:
label_per_user[user]["cats"] = cats
for user, span in span_per_user.items():
label_per_user[user]["entities"] = span
+ for label in label_per_user.values():
+ label.setdefault("cats", [])
+ label.setdefault("entities", [])
return label_per_user
+
+ def reduce_user(self, label_per_user: Dict[str, Any]):
+ cats = []
+ entities = []
+ for user, label in label_per_user.items():
+ cats.extend(label.get("cats", []))
+ entities.extend(label.get("entities", []))
+ return {"all": {"entities": entities, "cats": cats}}
| diff --git a/backend/data_export/tests/test_repositories.py b/backend/data_export/tests/test_repositories.py
--- a/backend/data_export/tests/test_repositories.py
+++ b/backend/data_export/tests/test_repositories.py
@@ -3,72 +3,341 @@
from model_mommy import mommy
from ..pipeline.repositories import (
+ FileRepository,
IntentDetectionSlotFillingRepository,
RelationExtractionRepository,
+ Seq2seqRepository,
+ SequenceLabelingRepository,
+ Speech2TextRepository,
+ TextClassificationRepository,
+)
+from projects.models import (
+ DOCUMENT_CLASSIFICATION,
+ IMAGE_CLASSIFICATION,
+ INTENT_DETECTION_AND_SLOT_FILLING,
+ SEQ2SEQ,
+ SEQUENCE_LABELING,
+ SPEECH2TEXT,
)
-from projects.models import INTENT_DETECTION_AND_SLOT_FILLING, SEQUENCE_LABELING
from projects.tests.utils import prepare_project
-class TestCSVWriter(unittest.TestCase):
- def setUp(self):
- self.project = prepare_project(INTENT_DETECTION_AND_SLOT_FILLING)
+class TestRepository(unittest.TestCase):
+ def assert_records(self, repository, expected):
+ records = list(repository.list())
+ self.assertEqual(len(records), len(expected))
+ for record, expect in zip(records, expected):
+ self.assertEqual(record.data, expect["data"])
+ self.assertEqual(record.label, expect["label"])
+ self.assertEqual(record.user, expect["user"])
+
+
+class TestTextClassificationRepository(TestRepository):
+ def prepare_data(self, project):
+ self.example = mommy.make("Example", project=project.item, text="example")
+ self.category1 = mommy.make("Category", example=self.example, user=project.admin)
+ self.category2 = mommy.make("Category", example=self.example, user=project.annotator)
def test_list(self):
- example = mommy.make("Example", project=self.project.item, text="example")
- category = mommy.make("Category", example=example, user=self.project.admin)
- span = mommy.make("Span", example=example, user=self.project.admin, start_offset=0, end_offset=1)
- repository = IntentDetectionSlotFillingRepository(self.project.item)
+ project = prepare_project(DOCUMENT_CLASSIFICATION)
+ repository = TextClassificationRepository(project.item)
+ self.prepare_data(project)
expected = [
{
- "data": example.text,
+ "data": self.example.text,
+ "label": [self.category1.label.text],
+ "user": project.admin.username,
+ },
+ {
+ "data": self.example.text,
+ "label": [self.category2.label.text],
+ "user": project.annotator.username,
+ },
+ ]
+ self.assert_records(repository, expected)
+
+ def test_list_on_collaborative_annotation(self):
+ project = prepare_project(DOCUMENT_CLASSIFICATION, collaborative_annotation=True)
+ repository = TextClassificationRepository(project.item)
+ self.prepare_data(project)
+ expected = [
+ {
+ "data": self.example.text,
+ "label": [self.category1.label.text, self.category2.label.text],
+ "user": "all",
+ }
+ ]
+ self.assert_records(repository, expected)
+
+
+class TestSeq2seqRepository(TestRepository):
+ def prepare_data(self, project):
+ self.example = mommy.make("Example", project=project.item, text="example")
+ self.text1 = mommy.make("TextLabel", example=self.example, user=project.admin)
+ self.text2 = mommy.make("TextLabel", example=self.example, user=project.annotator)
+
+ def test_list(self):
+ project = prepare_project(SEQ2SEQ)
+ repository = Seq2seqRepository(project.item)
+ self.prepare_data(project)
+ expected = [
+ {
+ "data": self.example.text,
+ "label": [self.text1.text],
+ "user": project.admin.username,
+ },
+ {
+ "data": self.example.text,
+ "label": [self.text2.text],
+ "user": project.annotator.username,
+ },
+ ]
+ self.assert_records(repository, expected)
+
+ def test_list_on_collaborative_annotation(self):
+ project = prepare_project(SEQ2SEQ, collaborative_annotation=True)
+ repository = Seq2seqRepository(project.item)
+ self.prepare_data(project)
+ expected = [
+ {
+ "data": self.example.text,
+ "label": [self.text1.text, self.text2.text],
+ "user": "all",
+ }
+ ]
+ self.assert_records(repository, expected)
+
+
+class TestIntentDetectionSlotFillingRepository(TestRepository):
+ def prepare_data(self, project):
+ self.example = mommy.make("Example", project=project.item, text="example")
+ self.category1 = mommy.make("Category", example=self.example, user=project.admin)
+ self.category2 = mommy.make("Category", example=self.example, user=project.annotator)
+ self.span = mommy.make("Span", example=self.example, user=project.admin, start_offset=0, end_offset=1)
+
+ def test_list(self):
+ project = prepare_project(INTENT_DETECTION_AND_SLOT_FILLING)
+ repository = IntentDetectionSlotFillingRepository(project.item)
+ self.prepare_data(project)
+ expected = [
+ {
+ "data": self.example.text,
+ "label": {
+ "cats": [self.category1.label.text],
+ "entities": [(self.span.start_offset, self.span.end_offset, self.span.label.text)],
+ },
+ "user": project.admin.username,
+ },
+ {
+ "data": self.example.text,
+ "label": {
+ "cats": [self.category2.label.text],
+ "entities": [],
+ },
+ "user": project.annotator.username,
+ },
+ ]
+ self.assert_records(repository, expected)
+
+ def test_list_on_collaborative_annotation(self):
+ project = prepare_project(INTENT_DETECTION_AND_SLOT_FILLING, collaborative_annotation=True)
+ repository = IntentDetectionSlotFillingRepository(project.item)
+ self.prepare_data(project)
+ expected = [
+ {
+ "data": self.example.text,
"label": {
- "cats": [category.label.text],
- "entities": [(span.start_offset, span.end_offset, span.label.text)],
+ "cats": [self.category1.label.text, self.category2.label.text],
+ "entities": [(self.span.start_offset, self.span.end_offset, self.span.label.text)],
},
+ "user": "all",
}
]
- records = list(repository.list())
- self.assertEqual(len(records), len(expected))
- for record, expect in zip(records, expected):
- self.assertEqual(record.data, expect["data"])
- self.assertEqual(record.label["cats"], expect["label"]["cats"])
- self.assertEqual(record.label["entities"], expect["label"]["entities"])
-
-
-class TestRelationExtractionRepository(unittest.TestCase):
- def setUp(self):
- self.project = prepare_project(SEQUENCE_LABELING, use_relation=True)
-
- def test_label_per_user(self):
- from_entity = mommy.make("Span", start_offset=0, end_offset=1, user=self.project.admin)
- to_entity = mommy.make(
- "Span", start_offset=1, end_offset=2, example=from_entity.example, user=self.project.admin
- )
- relation = mommy.make(
- "Relation", from_id=from_entity, to_id=to_entity, example=from_entity.example, user=self.project.admin
- )
- repository = RelationExtractionRepository(self.project.item)
- expected = {
- "admin": {
- "entities": [
- {
- "id": from_entity.id,
- "start_offset": from_entity.start_offset,
- "end_offset": from_entity.end_offset,
- "label": from_entity.label.text,
- },
- {
- "id": to_entity.id,
- "start_offset": to_entity.start_offset,
- "end_offset": to_entity.end_offset,
- "label": to_entity.label.text,
- },
- ],
- "relations": [
- {"id": relation.id, "from_id": from_entity.id, "to_id": to_entity.id, "type": relation.type.text}
+ self.assert_records(repository, expected)
+
+
+class TestSequenceLabelingRepository(TestRepository):
+ def prepare_data(self, project):
+ self.example = mommy.make("Example", project=project.item, text="example")
+ self.span1 = mommy.make("Span", example=self.example, user=project.admin, start_offset=0, end_offset=1)
+ self.span2 = mommy.make("Span", example=self.example, user=project.annotator, start_offset=1, end_offset=2)
+
+ def test_list(self):
+ project = prepare_project(SEQUENCE_LABELING)
+ repository = SequenceLabelingRepository(project.item)
+ self.prepare_data(project)
+ expected = [
+ {
+ "data": self.example.text,
+ "label": [(self.span1.start_offset, self.span1.end_offset, self.span1.label.text)],
+ "user": project.admin.username,
+ },
+ {
+ "data": self.example.text,
+ "label": [(self.span2.start_offset, self.span2.end_offset, self.span2.label.text)],
+ "user": project.annotator.username,
+ },
+ ]
+ self.assert_records(repository, expected)
+
+ def test_list_on_collaborative_annotation(self):
+ project = prepare_project(SEQUENCE_LABELING, collaborative_annotation=True)
+ repository = SequenceLabelingRepository(project.item)
+ self.prepare_data(project)
+ expected = [
+ {
+ "data": self.example.text,
+ "label": [
+ (self.span1.start_offset, self.span1.end_offset, self.span1.label.text),
+ (self.span2.start_offset, self.span2.end_offset, self.span2.label.text),
],
+ "user": "all",
+ }
+ ]
+ self.assert_records(repository, expected)
+
+
+class TestRelationExtractionRepository(TestRepository):
+ def test_list(self):
+ project = prepare_project(SEQUENCE_LABELING, use_relation=True)
+ example = mommy.make("Example", project=project.item, text="example")
+ span1 = mommy.make("Span", example=example, user=project.admin, start_offset=0, end_offset=1)
+ span2 = mommy.make("Span", example=example, user=project.admin, start_offset=1, end_offset=2)
+ relation = mommy.make("Relation", from_id=span1, to_id=span2, example=example, user=project.admin)
+ repository = RelationExtractionRepository(project.item)
+ expected = [
+ {
+ "data": example.text,
+ "label": {
+ "entities": [
+ {
+ "id": span1.id,
+ "start_offset": span1.start_offset,
+ "end_offset": span1.end_offset,
+ "label": span1.label.text,
+ },
+ {
+ "id": span2.id,
+ "start_offset": span2.start_offset,
+ "end_offset": span2.end_offset,
+ "label": span2.label.text,
+ },
+ ],
+ "relations": [
+ {"id": relation.id, "from_id": span1.id, "to_id": span2.id, "type": relation.type.text}
+ ],
+ },
+ "user": project.admin.username,
+ }
+ ]
+ self.assert_records(repository, expected)
+
+ def test_list_on_collaborative_annotation(self):
+ project = prepare_project(SEQUENCE_LABELING, collaborative_annotation=True, use_relation=True)
+ example = mommy.make("Example", project=project.item, text="example")
+ span1 = mommy.make("Span", example=example, user=project.admin, start_offset=0, end_offset=1)
+ span2 = mommy.make("Span", example=example, user=project.annotator, start_offset=1, end_offset=2)
+ relation = mommy.make("Relation", from_id=span1, to_id=span2, example=example, user=project.admin)
+ repository = RelationExtractionRepository(project.item)
+ expected = [
+ {
+ "data": example.text,
+ "label": {
+ "entities": [
+ {
+ "id": span1.id,
+ "start_offset": span1.start_offset,
+ "end_offset": span1.end_offset,
+ "label": span1.label.text,
+ },
+ {
+ "id": span2.id,
+ "start_offset": span2.start_offset,
+ "end_offset": span2.end_offset,
+ "label": span2.label.text,
+ },
+ ],
+ "relations": [
+ {"id": relation.id, "from_id": span1.id, "to_id": span2.id, "type": relation.type.text}
+ ],
+ },
+ "user": "all",
+ }
+ ]
+ self.assert_records(repository, expected)
+
+
+class TestSpeech2TextRepository(TestRepository):
+ def prepare_data(self, project):
+ self.example = mommy.make("Example", project=project.item, text="example")
+ self.text1 = mommy.make("TextLabel", example=self.example, user=project.admin)
+ self.text2 = mommy.make("TextLabel", example=self.example, user=project.annotator)
+
+ def test_list(self):
+ project = prepare_project(SPEECH2TEXT)
+ repository = Speech2TextRepository(project.item)
+ self.prepare_data(project)
+ expected = [
+ {
+ "data": self.example.filename,
+ "label": [self.text1.text],
+ "user": project.admin.username,
+ },
+ {
+ "data": self.example.filename,
+ "label": [self.text2.text],
+ "user": project.annotator.username,
+ },
+ ]
+ self.assert_records(repository, expected)
+
+ def test_list_on_collaborative_annotation(self):
+ project = prepare_project(SPEECH2TEXT, collaborative_annotation=True)
+ repository = Speech2TextRepository(project.item)
+ self.prepare_data(project)
+ expected = [
+ {
+ "data": self.example.filename,
+ "label": [self.text1.text, self.text2.text],
+ "user": "all",
+ }
+ ]
+ self.assert_records(repository, expected)
+
+
+class TestFileRepository(TestRepository):
+ def prepare_data(self, project):
+ self.example = mommy.make("Example", project=project.item, text="example")
+ self.category1 = mommy.make("Category", example=self.example, user=project.admin)
+ self.category2 = mommy.make("Category", example=self.example, user=project.annotator)
+
+ def test_list(self):
+ project = prepare_project(IMAGE_CLASSIFICATION)
+ repository = FileRepository(project.item)
+ self.prepare_data(project)
+ expected = [
+ {
+ "data": self.example.filename,
+ "label": [self.category1.label.text],
+ "user": project.admin.username,
+ },
+ {
+ "data": self.example.filename,
+ "label": [self.category2.label.text],
+ "user": project.annotator.username,
+ },
+ ]
+ self.assert_records(repository, expected)
+
+ def test_list_on_collaborative_annotation(self):
+ project = prepare_project(IMAGE_CLASSIFICATION, collaborative_annotation=True)
+ repository = FileRepository(project.item)
+ self.prepare_data(project)
+ expected = [
+ {
+ "data": self.example.filename,
+ "label": [self.category1.label.text, self.category2.label.text],
+ "user": "all",
}
- }
- actual = repository.label_per_user(from_entity.example)
- self.assertDictEqual(actual, expected)
+ ]
+ self.assert_records(repository, expected)
| Empty export of entity-relation-labeling
How to reproduce the behaviour
---------
- Project - Create - Sequence Labeling
- check `Allow overlapping entity`, `Use relation labeling`, `Count grapheme clusters as one character`, `Share annotations across all users`
- then import data and do some labeling, then export, without checking `Export only approved documents`. export shows empty result as below
```json
{"id": 4, "text": "Terrible customer service.", "relations": [], "entities": []}
```
Your Environment
---------
<!-- Include details of your environment.-->
* Operating System: wsl2+ubuntu20.04
* Python Version Used: 3.8, 3.10
* When you install doccano: 3 hours ago
* How did you install doccano (Heroku button etc): first-time pip, then from source code
| https://github.com/doccano/doccano/blob/e8220d22b933407bce45d8ddfc4bba112c3d91ad/backend/data_export/pipeline/repositories.py#L57
This should consider the situation that `label_per_user` is `Dict[str, Dict])` when exporting entity-relation-labeling data | 2022-04-02T18:04:19 |
doccano/doccano | 1,770 | doccano__doccano-1770 | [
"1769"
] | 350bc5e95fa459d1dc124a320cb8e34385b73021 | diff --git a/backend/metrics/views.py b/backend/metrics/views.py
--- a/backend/metrics/views.py
+++ b/backend/metrics/views.py
@@ -1,5 +1,6 @@
import abc
+from django.shortcuts import get_object_or_404
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
@@ -8,7 +9,7 @@
from examples.models import Example, ExampleState
from label_types.models import CategoryType, LabelType, RelationType, SpanType
from labels.models import Category, Label, Relation, Span
-from projects.models import Member
+from projects.models import Member, Project
from projects.permissions import IsProjectAdmin, IsProjectStaffAndReadOnly
@@ -18,7 +19,11 @@ class ProgressAPI(APIView):
def get(self, request, *args, **kwargs):
examples = Example.objects.filter(project=self.kwargs["project_id"]).values("id")
total = examples.count()
- complete = ExampleState.objects.count_done(examples, user=self.request.user)
+ project = get_object_or_404(Project, pk=self.kwargs["project_id"])
+ if project.collaborative_annotation:
+ complete = ExampleState.objects.count_done(examples)
+ else:
+ complete = ExampleState.objects.count_done(examples, user=self.request.user)
data = {"total": total, "remaining": total - complete, "complete": complete}
return Response(data=data, status=status.HTTP_200_OK)
| diff --git a/backend/metrics/tests.py b/backend/metrics/tests.py
--- a/backend/metrics/tests.py
+++ b/backend/metrics/tests.py
@@ -28,6 +28,41 @@ def test_fetch_progress(self):
self.assertEqual(response.data, {"total": 1, "progress": expected_progress})
+class TestProgressHelper(CRUDMixin):
+ collaborative_annotation = False
+
+ def setUp(self):
+ self.project = prepare_project(DOCUMENT_CLASSIFICATION, collaborative_annotation=self.collaborative_annotation)
+ self.example = make_doc(self.project.item)
+ mommy.make("ExampleState", example=self.example, confirmed_by=self.project.admin)
+ self.url = reverse(viewname="progress", args=[self.project.item.id])
+
+
+class TestProgress(TestProgressHelper):
+ collaborative_annotation = False
+
+ def test_fetch_progress(self):
+ response = self.assert_fetch(self.project.admin, status.HTTP_200_OK)
+ expected = {"total": 1, "remaining": 0, "complete": 1}
+ self.assertEqual(response.data, expected)
+
+ def test_cannot_affect_others_progress(self):
+ for member in self.project.staffs:
+ response = self.assert_fetch(member, status.HTTP_200_OK)
+ expected = {"total": 1, "remaining": 1, "complete": 0}
+ self.assertEqual(response.data, expected)
+
+
+class TestProgressOnCollaborativeAnnotation(TestProgressHelper):
+ collaborative_annotation = True
+
+ def test_fetch_progress(self):
+ for member in self.project.members:
+ response = self.assert_fetch(member, status.HTTP_200_OK)
+ expected = {"total": 1, "remaining": 0, "complete": 1}
+ self.assertEqual(response.data, expected)
+
+
class TestCategoryDistribution(CRUDMixin):
def setUp(self):
self.project = prepare_project(DOCUMENT_CLASSIFICATION)
| Wrong progress in collaborative annotation ('Share annotations across all users')
How to reproduce the behaviour
---------
Progress is shown as individual progress instead of total progress when 'Share annotations across all users' is ticked in project setting.
Your Environment
---------
<!-- Include details of your environment.-->
* Operating System: wsl2+ubuntu20.04
* Python Version Used: 3.8
* When you install doccano: 20220403
* How did you install doccano (Heroku button etc): source
| 2022-04-05T19:38:38 |
|
doccano/doccano | 1,783 | doccano__doccano-1783 | [
"1422"
] | f27c42810a9b68d3bf82b697fda3b2c2c8f72b59 | diff --git a/backend/data_export/pipeline/repositories.py b/backend/data_export/pipeline/repositories.py
--- a/backend/data_export/pipeline/repositories.py
+++ b/backend/data_export/pipeline/repositories.py
@@ -17,12 +17,15 @@ def __init__(self, project: Project):
def list(self, export_approved=False) -> Iterator[Record]:
raise NotImplementedError()
+ def create_unlabeled_record(self, example: Example) -> Record:
+ raise NotImplementedError()
+
class FileRepository(BaseRepository):
def list(self, export_approved=False) -> Iterator[Record]:
examples = self.project.examples.all()
if export_approved:
- examples = examples.exclude(annotations_approved_by=None)
+ examples = examples.exclude(states=None)
for example in examples:
label_per_user = self.label_per_user(example)
@@ -43,7 +46,10 @@ def list(self, export_approved=False) -> Iterator[Record]:
# with the user who approved the doc.
# This means I will allow each user to be able to approve the doc.
if len(label_per_user) == 0:
- yield Record(data_id=example.id, data=example.upload_name, label=[], user="unknown", metadata={})
+ yield self.create_unlabeled_record(example)
+
+ def create_unlabeled_record(self, example: Example) -> Record:
+ return Record(data_id=example.id, data=example.upload_name, label=[], user="unknown", metadata=example.meta)
def label_per_user(self, example) -> Dict:
label_per_user = defaultdict(list)
@@ -72,7 +78,7 @@ def docs(self):
def list(self, export_approved=False):
docs = self.docs
if export_approved:
- docs = docs.exclude(annotations_approved_by=None)
+ docs = docs.exclude(states=None)
for doc in docs:
label_per_user = self.label_per_user(doc)
@@ -87,7 +93,10 @@ def list(self, export_approved=False):
# with the user who approved the doc.
# This means I will allow each user to be able to approve the doc.
if len(label_per_user) == 0:
- yield Record(data_id=doc.id, data=doc.text, label=[], user="unknown", metadata={})
+ yield self.create_unlabeled_record(doc)
+
+ def create_unlabeled_record(self, example: Example) -> Record:
+ return Record(data_id=example.id, data=example.text, label=[], user="unknown", metadata=example.meta)
@abc.abstractmethod
def label_per_user(self, doc) -> Dict:
@@ -130,6 +139,15 @@ def docs(self):
"spans__user", "spans__label", "relations__user", "relations__type"
)
+ def create_unlabeled_record(self, example: Example) -> Record:
+ return Record(
+ data_id=example.id,
+ data=example.text,
+ label={"entities": [], "relations": []},
+ user="unknown",
+ metadata=example.meta,
+ )
+
def label_per_user(self, doc) -> Dict:
relation_per_user: Dict = defaultdict(list)
span_per_user: Dict = defaultdict(list)
@@ -186,6 +204,15 @@ def docs(self):
"categories__user", "categories__label", "spans__user", "spans__label"
)
+ def create_unlabeled_record(self, example: Example) -> Record:
+ return Record(
+ data_id=example.id,
+ data=example.text,
+ label={"entities": [], "cats": []},
+ user="unknown",
+ metadata=example.meta,
+ )
+
def label_per_user(self, doc) -> Dict:
category_per_user: Dict[str, List[str]] = defaultdict(list)
span_per_user: Dict[str, List[SpanType]] = defaultdict(list)
| diff --git a/backend/data_export/tests/test_repositories.py b/backend/data_export/tests/test_repositories.py
--- a/backend/data_export/tests/test_repositories.py
+++ b/backend/data_export/tests/test_repositories.py
@@ -23,8 +23,8 @@
class TestRepository(unittest.TestCase):
- def assert_records(self, repository, expected):
- records = list(repository.list())
+ def assert_records(self, repository, expected, confirmed_only=False):
+ records = list(repository.list(export_approved=confirmed_only))
self.assertEqual(len(records), len(expected))
for record, expect in zip(records, expected):
self.assertEqual(record.data, expect["data"])
@@ -34,9 +34,11 @@ def assert_records(self, repository, expected):
class TestTextClassificationRepository(TestRepository):
def prepare_data(self, project):
- self.example = mommy.make("Example", project=project.item, text="example")
- self.category1 = mommy.make("Category", example=self.example, user=project.admin)
- self.category2 = mommy.make("Category", example=self.example, user=project.annotator)
+ self.confirmed_example = mommy.make("Example", project=project.item, text="confirmed")
+ self.category1 = mommy.make("Category", example=self.confirmed_example, user=project.admin)
+ self.category2 = mommy.make("Category", example=self.confirmed_example, user=project.annotator)
+ mommy.make("ExampleState", example=self.confirmed_example, confirmed_by=project.admin)
+ self.unconfirmed_example = mommy.make("Example", project=project.item, text="unconfirmed")
def test_list(self):
project = prepare_project(DOCUMENT_CLASSIFICATION)
@@ -44,15 +46,16 @@ def test_list(self):
self.prepare_data(project)
expected = [
{
- "data": self.example.text,
+ "data": self.confirmed_example.text,
"label": [self.category1.label.text],
"user": project.admin.username,
},
{
- "data": self.example.text,
+ "data": self.confirmed_example.text,
"label": [self.category2.label.text],
"user": project.annotator.username,
},
+ {"data": self.unconfirmed_example.text, "label": [], "user": "unknown"},
]
self.assert_records(repository, expected)
@@ -62,19 +65,44 @@ def test_list_on_collaborative_annotation(self):
self.prepare_data(project)
expected = [
{
- "data": self.example.text,
+ "data": self.confirmed_example.text,
"label": [self.category1.label.text, self.category2.label.text],
"user": "all",
- }
+ },
+ {
+ "data": self.unconfirmed_example.text,
+ "label": [],
+ "user": "all",
+ },
]
self.assert_records(repository, expected)
+ def test_list_confirmed_example_only(self):
+ project = prepare_project(DOCUMENT_CLASSIFICATION)
+ repository = TextClassificationRepository(project.item)
+ self.prepare_data(project)
+ expected = [
+ {
+ "data": self.confirmed_example.text,
+ "label": [self.category1.label.text],
+ "user": project.admin.username,
+ },
+ {
+ "data": self.confirmed_example.text,
+ "label": [self.category2.label.text],
+ "user": project.annotator.username,
+ },
+ ]
+ self.assert_records(repository, expected, confirmed_only=True)
+
class TestSeq2seqRepository(TestRepository):
def prepare_data(self, project):
- self.example = mommy.make("Example", project=project.item, text="example")
- self.text1 = mommy.make("TextLabel", example=self.example, user=project.admin)
- self.text2 = mommy.make("TextLabel", example=self.example, user=project.annotator)
+ self.confirmed_example = mommy.make("Example", project=project.item, text="confirmed")
+ self.text1 = mommy.make("TextLabel", example=self.confirmed_example, user=project.admin)
+ self.text2 = mommy.make("TextLabel", example=self.confirmed_example, user=project.annotator)
+ mommy.make("ExampleState", example=self.confirmed_example, confirmed_by=project.admin)
+ self.unconfirmed_example = mommy.make("Example", project=project.item, text="unconfirmed")
def test_list(self):
project = prepare_project(SEQ2SEQ)
@@ -82,15 +110,16 @@ def test_list(self):
self.prepare_data(project)
expected = [
{
- "data": self.example.text,
+ "data": self.confirmed_example.text,
"label": [self.text1.text],
"user": project.admin.username,
},
{
- "data": self.example.text,
+ "data": self.confirmed_example.text,
"label": [self.text2.text],
"user": project.annotator.username,
},
+ {"data": self.unconfirmed_example.text, "label": [], "user": "unknown"},
]
self.assert_records(repository, expected)
@@ -100,20 +129,45 @@ def test_list_on_collaborative_annotation(self):
self.prepare_data(project)
expected = [
{
- "data": self.example.text,
+ "data": self.confirmed_example.text,
"label": [self.text1.text, self.text2.text],
"user": "all",
- }
+ },
+ {
+ "data": self.unconfirmed_example.text,
+ "label": [],
+ "user": "all",
+ },
]
self.assert_records(repository, expected)
+ def test_list_confirmed_example_only(self):
+ project = prepare_project(SEQ2SEQ)
+ repository = Seq2seqRepository(project.item)
+ self.prepare_data(project)
+ expected = [
+ {
+ "data": self.confirmed_example.text,
+ "label": [self.text1.text],
+ "user": project.admin.username,
+ },
+ {
+ "data": self.confirmed_example.text,
+ "label": [self.text2.text],
+ "user": project.annotator.username,
+ },
+ ]
+ self.assert_records(repository, expected, confirmed_only=True)
+
class TestIntentDetectionSlotFillingRepository(TestRepository):
def prepare_data(self, project):
- self.example = mommy.make("Example", project=project.item, text="example")
- self.category1 = mommy.make("Category", example=self.example, user=project.admin)
- self.category2 = mommy.make("Category", example=self.example, user=project.annotator)
- self.span = mommy.make("Span", example=self.example, user=project.admin, start_offset=0, end_offset=1)
+ self.confirmed_example = mommy.make("Example", project=project.item, text="confirmed")
+ self.category1 = mommy.make("Category", example=self.confirmed_example, user=project.admin)
+ self.category2 = mommy.make("Category", example=self.confirmed_example, user=project.annotator)
+ self.span = mommy.make("Span", example=self.confirmed_example, user=project.admin, start_offset=0, end_offset=1)
+ mommy.make("ExampleState", example=self.confirmed_example, confirmed_by=project.admin)
+ self.unconfirmed_example = mommy.make("Example", project=project.item, text="unconfirmed")
def test_list(self):
project = prepare_project(INTENT_DETECTION_AND_SLOT_FILLING)
@@ -121,7 +175,7 @@ def test_list(self):
self.prepare_data(project)
expected = [
{
- "data": self.example.text,
+ "data": self.confirmed_example.text,
"label": {
"cats": [self.category1.label.text],
"entities": [(self.span.start_offset, self.span.end_offset, self.span.label.text)],
@@ -129,13 +183,14 @@ def test_list(self):
"user": project.admin.username,
},
{
- "data": self.example.text,
+ "data": self.confirmed_example.text,
"label": {
"cats": [self.category2.label.text],
"entities": [],
},
"user": project.annotator.username,
},
+ {"data": self.unconfirmed_example.text, "label": {"cats": [], "entities": []}, "user": "unknown"},
]
self.assert_records(repository, expected)
@@ -145,22 +200,53 @@ def test_list_on_collaborative_annotation(self):
self.prepare_data(project)
expected = [
{
- "data": self.example.text,
+ "data": self.confirmed_example.text,
"label": {
"cats": [self.category1.label.text, self.category2.label.text],
"entities": [(self.span.start_offset, self.span.end_offset, self.span.label.text)],
},
"user": "all",
- }
+ },
+ {"data": self.unconfirmed_example.text, "label": {"cats": [], "entities": []}, "user": "all"},
]
self.assert_records(repository, expected)
+ def test_list_confirmed_example_only(self):
+ project = prepare_project(INTENT_DETECTION_AND_SLOT_FILLING)
+ repository = IntentDetectionSlotFillingRepository(project.item)
+ self.prepare_data(project)
+ expected = [
+ {
+ "data": self.confirmed_example.text,
+ "label": {
+ "cats": [self.category1.label.text],
+ "entities": [(self.span.start_offset, self.span.end_offset, self.span.label.text)],
+ },
+ "user": project.admin.username,
+ },
+ {
+ "data": self.confirmed_example.text,
+ "label": {
+ "cats": [self.category2.label.text],
+ "entities": [],
+ },
+ "user": project.annotator.username,
+ },
+ ]
+ self.assert_records(repository, expected, confirmed_only=True)
+
class TestSequenceLabelingRepository(TestRepository):
def prepare_data(self, project):
- self.example = mommy.make("Example", project=project.item, text="example")
- self.span1 = mommy.make("Span", example=self.example, user=project.admin, start_offset=0, end_offset=1)
- self.span2 = mommy.make("Span", example=self.example, user=project.annotator, start_offset=1, end_offset=2)
+ self.confirmed_example = mommy.make("Example", project=project.item, text="confirmed")
+ self.span1 = mommy.make(
+ "Span", example=self.confirmed_example, user=project.admin, start_offset=0, end_offset=1
+ )
+ self.span2 = mommy.make(
+ "Span", example=self.confirmed_example, user=project.annotator, start_offset=1, end_offset=2
+ )
+ mommy.make("ExampleState", example=self.confirmed_example, confirmed_by=project.admin)
+ self.unconfirmed_example = mommy.make("Example", project=project.item, text="unconfirmed")
def test_list(self):
project = prepare_project(SEQUENCE_LABELING)
@@ -168,15 +254,16 @@ def test_list(self):
self.prepare_data(project)
expected = [
{
- "data": self.example.text,
+ "data": self.confirmed_example.text,
"label": [(self.span1.start_offset, self.span1.end_offset, self.span1.label.text)],
"user": project.admin.username,
},
{
- "data": self.example.text,
+ "data": self.confirmed_example.text,
"label": [(self.span2.start_offset, self.span2.end_offset, self.span2.label.text)],
"user": project.annotator.username,
},
+ {"data": self.unconfirmed_example.text, "label": [], "user": "unknown"},
]
self.assert_records(repository, expected)
@@ -186,28 +273,49 @@ def test_list_on_collaborative_annotation(self):
self.prepare_data(project)
expected = [
{
- "data": self.example.text,
+ "data": self.confirmed_example.text,
"label": [
(self.span1.start_offset, self.span1.end_offset, self.span1.label.text),
(self.span2.start_offset, self.span2.end_offset, self.span2.label.text),
],
"user": "all",
- }
+ },
+ {"data": self.unconfirmed_example.text, "label": [], "user": "all"},
]
self.assert_records(repository, expected)
+ def test_list_confirmed_example_only(self):
+ project = prepare_project(SEQUENCE_LABELING)
+ repository = SequenceLabelingRepository(project.item)
+ self.prepare_data(project)
+ expected = [
+ {
+ "data": self.confirmed_example.text,
+ "label": [(self.span1.start_offset, self.span1.end_offset, self.span1.label.text)],
+ "user": project.admin.username,
+ },
+ {
+ "data": self.confirmed_example.text,
+ "label": [(self.span2.start_offset, self.span2.end_offset, self.span2.label.text)],
+ "user": project.annotator.username,
+ },
+ ]
+ self.assert_records(repository, expected, confirmed_only=True)
+
class TestRelationExtractionRepository(TestRepository):
def test_list(self):
project = prepare_project(SEQUENCE_LABELING, use_relation=True)
- example = mommy.make("Example", project=project.item, text="example")
- span1 = mommy.make("Span", example=example, user=project.admin, start_offset=0, end_offset=1)
- span2 = mommy.make("Span", example=example, user=project.admin, start_offset=1, end_offset=2)
- relation = mommy.make("Relation", from_id=span1, to_id=span2, example=example, user=project.admin)
+ confirmed_example = mommy.make("Example", project=project.item, text="example")
+ span1 = mommy.make("Span", example=confirmed_example, user=project.admin, start_offset=0, end_offset=1)
+ span2 = mommy.make("Span", example=confirmed_example, user=project.admin, start_offset=1, end_offset=2)
+ relation = mommy.make("Relation", from_id=span1, to_id=span2, example=confirmed_example, user=project.admin)
+ mommy.make("ExampleState", example=confirmed_example, confirmed_by=project.admin)
+ unconfirmed_example = mommy.make("Example", project=project.item, text="unconfirmed")
repository = RelationExtractionRepository(project.item)
expected = [
{
- "data": example.text,
+ "data": confirmed_example.text,
"label": {
"entities": [
{
@@ -228,7 +336,8 @@ def test_list(self):
],
},
"user": project.admin.username,
- }
+ },
+ {"data": unconfirmed_example.text, "label": {"entities": [], "relations": []}, "user": "unknown"},
]
self.assert_records(repository, expected)
@@ -266,12 +375,50 @@ def test_list_on_collaborative_annotation(self):
]
self.assert_records(repository, expected)
+ def test_list_confirmed_example_only(self):
+ project = prepare_project(SEQUENCE_LABELING, use_relation=True)
+ confirmed_example = mommy.make("Example", project=project.item, text="example")
+ span1 = mommy.make("Span", example=confirmed_example, user=project.admin, start_offset=0, end_offset=1)
+ span2 = mommy.make("Span", example=confirmed_example, user=project.admin, start_offset=1, end_offset=2)
+ relation = mommy.make("Relation", from_id=span1, to_id=span2, example=confirmed_example, user=project.admin)
+ mommy.make("ExampleState", example=confirmed_example, confirmed_by=project.admin)
+ mommy.make("Example", project=project.item, text="unconfirmed")
+ repository = RelationExtractionRepository(project.item)
+ expected = [
+ {
+ "data": confirmed_example.text,
+ "label": {
+ "entities": [
+ {
+ "id": span1.id,
+ "start_offset": span1.start_offset,
+ "end_offset": span1.end_offset,
+ "label": span1.label.text,
+ },
+ {
+ "id": span2.id,
+ "start_offset": span2.start_offset,
+ "end_offset": span2.end_offset,
+ "label": span2.label.text,
+ },
+ ],
+ "relations": [
+ {"id": relation.id, "from_id": span1.id, "to_id": span2.id, "type": relation.type.text}
+ ],
+ },
+ "user": project.admin.username,
+ },
+ ]
+ self.assert_records(repository, expected, confirmed_only=True)
+
class TestSpeech2TextRepository(TestRepository):
def prepare_data(self, project):
- self.example = mommy.make("Example", project=project.item, text="example")
- self.text1 = mommy.make("TextLabel", example=self.example, user=project.admin)
- self.text2 = mommy.make("TextLabel", example=self.example, user=project.annotator)
+ self.confirmed_example = mommy.make("Example", project=project.item)
+ self.text1 = mommy.make("TextLabel", example=self.confirmed_example, user=project.admin)
+ self.text2 = mommy.make("TextLabel", example=self.confirmed_example, user=project.annotator)
+ mommy.make("ExampleState", example=self.confirmed_example, confirmed_by=project.admin)
+ self.unconfirmed_example = mommy.make("Example", project=project.item, text="unconfirmed")
def test_list(self):
project = prepare_project(SPEECH2TEXT)
@@ -279,15 +426,16 @@ def test_list(self):
self.prepare_data(project)
expected = [
{
- "data": self.example.upload_name,
+ "data": self.confirmed_example.upload_name,
"label": [self.text1.text],
"user": project.admin.username,
},
{
- "data": self.example.upload_name,
+ "data": self.confirmed_example.upload_name,
"label": [self.text2.text],
"user": project.annotator.username,
},
+ {"data": self.unconfirmed_example.upload_name, "label": [], "user": "unknown"},
]
self.assert_records(repository, expected)
@@ -297,19 +445,40 @@ def test_list_on_collaborative_annotation(self):
self.prepare_data(project)
expected = [
{
- "data": self.example.upload_name,
+ "data": self.confirmed_example.upload_name,
"label": [self.text1.text, self.text2.text],
"user": "all",
- }
+ },
+ {"data": self.unconfirmed_example.upload_name, "label": [], "user": "all"},
]
self.assert_records(repository, expected)
+ def test_list_confirmed_example_only(self):
+ project = prepare_project(SPEECH2TEXT)
+ repository = Speech2TextRepository(project.item)
+ self.prepare_data(project)
+ expected = [
+ {
+ "data": self.confirmed_example.upload_name,
+ "label": [self.text1.text],
+ "user": project.admin.username,
+ },
+ {
+ "data": self.confirmed_example.upload_name,
+ "label": [self.text2.text],
+ "user": project.annotator.username,
+ },
+ ]
+ self.assert_records(repository, expected, confirmed_only=True)
+
class TestFileRepository(TestRepository):
def prepare_data(self, project):
- self.example = mommy.make("Example", project=project.item, text="example")
- self.category1 = mommy.make("Category", example=self.example, user=project.admin)
- self.category2 = mommy.make("Category", example=self.example, user=project.annotator)
+ self.confirmed_example = mommy.make("Example", project=project.item, text="example")
+ self.category1 = mommy.make("Category", example=self.confirmed_example, user=project.admin)
+ self.category2 = mommy.make("Category", example=self.confirmed_example, user=project.annotator)
+ mommy.make("ExampleState", example=self.confirmed_example, confirmed_by=project.admin)
+ self.unconfirmed_example = mommy.make("Example", project=project.item, text="unconfirmed")
def test_list(self):
project = prepare_project(IMAGE_CLASSIFICATION)
@@ -317,15 +486,16 @@ def test_list(self):
self.prepare_data(project)
expected = [
{
- "data": self.example.upload_name,
+ "data": self.confirmed_example.upload_name,
"label": [self.category1.label.text],
"user": project.admin.username,
},
{
- "data": self.example.upload_name,
+ "data": self.confirmed_example.upload_name,
"label": [self.category2.label.text],
"user": project.annotator.username,
},
+ {"data": self.unconfirmed_example.upload_name, "label": [], "user": "unknown"},
]
self.assert_records(repository, expected)
@@ -335,9 +505,28 @@ def test_list_on_collaborative_annotation(self):
self.prepare_data(project)
expected = [
{
- "data": self.example.upload_name,
+ "data": self.confirmed_example.upload_name,
"label": [self.category1.label.text, self.category2.label.text],
"user": "all",
- }
+ },
+ {"data": self.unconfirmed_example.upload_name, "label": [], "user": "all"},
]
self.assert_records(repository, expected)
+
+ def test_list_confirmed_example_only(self):
+ project = prepare_project(IMAGE_CLASSIFICATION)
+ repository = FileRepository(project.item)
+ self.prepare_data(project)
+ expected = [
+ {
+ "data": self.confirmed_example.upload_name,
+ "label": [self.category1.label.text],
+ "user": project.admin.username,
+ },
+ {
+ "data": self.confirmed_example.upload_name,
+ "label": [self.category2.label.text],
+ "user": project.annotator.username,
+ },
+ ]
+ self.assert_records(repository, expected, confirmed_only=True)
| Export data will get nothing, if I choose only approved documents.
if I choose only approved documents, the zip file contains nothing
if I don't choose it, the zip file contains "all.json".
But those txt I have checked.
| Would you write your environment? Thank you!
same here, sequence labeling project
I'm new in Doccano and had this issue too.
I found since https://github.com/doccano/doccano/pull/1402/ the "check" button toggle documents as "confirmed", it doesn't approve them anymore. So in database, the approver id remains empty, and there are no document to export. I didn't find any way to approve document anymore, this is sort of disconnected from the frontend.
I'm not sure about the new workflow with document state related in discussions. Do we need another "approve" button in addition to the "complete" button ? Or can it be done in one step ? Or maybe a new export setting to allow confirmed documents exports ?
Is confirmed used for anything aside from filtering? If so, they should probably just be merged into one status/flag.
i've just run into this issue myself, i worked around with this minor change to sync the approved_by_user with the example state:
```
diff --git a/backend/api/views/example_state.py b/backend/api/views/example_state.py
index 38a82217..57ea018d 100644
--- a/backend/api/views/example_state.py
+++ b/backend/api/views/example_state.py
@@ -1,8 +1,9 @@
from django.shortcuts import get_object_or_404
from rest_framework import generics
from rest_framework.permissions import IsAuthenticated
from ..permissions import IsInProjectOrAdmin
from ..serializers import ExampleStateSerializer
@@ -24,8 +25,42 @@ class ExampleStateList(generics.ListCreateAPIView):
def perform_create(self, serializer):
queryset = self.get_queryset()
+ example_id = self.kwargs['example_id']
+ example = Example.objects.get(pk=example_id)
+
if queryset.exists():
+ example.annotations_approved_by = None
+ example.save()
queryset.delete()
else:
example = get_object_or_404(Example, pk=self.kwargs['example_id'])
+ example.annotations_approved_by = self.request.user
+ example.save()
serializer.save(example=example, confirmed_by=self.request.user)
```
I also have the same problem. I have around 30% of the corpus annotated and checked but if I choose to export only the confirmed documents I only get a 22 byte all.json empty file. Running Doccano as installed by Docker Compose on an Apple M1 chip Mac.
Has there been any progress on fixing this bug? Or is there a way for us to manually edit the DB to convert "confirmed" to "approved"?
@daleevans Can we use an adapted version of your code to auto-approve all documents marked as confirmed/done by a specific user?
It would also already be helpful to just get a list of all document ids that were marked as confirmed/done by a specific user. Then we could filter the all.json manually after exporting.
Does anyone know how to query the database to get such list?
@mnschmit I ran into this problem and have written some code to extract all ids of examples that were confirmed by a specific user. Take a look:
```
from doccano_api_client import DoccanoClient
client = DoccanoClient(DOCCANO_URL, USERNAME, PASSWORD)
# Get list of all example ids
ids = [example["id"] for example in client.get_examples(4, {"limit": [1200], "offset": [0]})["results"]]
# Query the database for the example states
example_states = [client.get_example_states(4, idx) for idx in ids]
# Filter the states based on if they were confirmed by the desired user and store the approved ids in a list
user_id = 1
confirmed_idx = [state["results"][0]["example"] for state in example_states if state["results"] and state["results"][0]["confirmed_by"] == user_id]
```
`confirmed_idx` then is a list of example ids approved by the specified user
Note: this solution requires the most recent version of [Doccano Client](https://github.com/doccano/doccano-client) (clone the repo and pip install it, the release on pypi is outdated).
@wpnbos Thank you for sharing that approach! I didn't know about the doccano client! | 2022-04-14T04:44:29 |
doccano/doccano | 1,842 | doccano__doccano-1842 | [
"1762"
] | c146a4ccad0eca63304bdc278281b21d38392944 | diff --git a/backend/cli.py b/backend/cli.py
--- a/backend/cli.py
+++ b/backend/cli.py
@@ -66,7 +66,7 @@ def run_on_windows(args):
from config.wsgi import application
- serve(application, port=args.port)
+ serve(application, port=args.port, threads=args.workers)
def command_db_init(args):
| diff --git a/frontend/test/unit/components/tasks/toolbar/forms/formGuideline.spec.js b/frontend/test/unit/components/tasks/toolbar/forms/formGuideline.spec.js
--- a/frontend/test/unit/components/tasks/toolbar/forms/formGuideline.spec.js
+++ b/frontend/test/unit/components/tasks/toolbar/forms/formGuideline.spec.js
@@ -11,7 +11,7 @@ const factory = () => {
propsData: {
guidelineText: 'Hello'
},
- mocks:{ $t }
+ mocks: { $t }
})
}
| Doccano is not importing any text data
Hello,
Doccano is not importing any text data. When importing the text data the following browser loading is going on:

The command line terminal is showing the following:-
```
<Starting server with port 8000.
WARNING:waitress.queue:Task queue depth is 1
WARNING:waitress.queue:Task queue depth is 2
Bad Request: /v1/auth/login/
WARNING:django.request:Bad Request: /v1/auth/login/
WARNING:waitress.queue:Task queue depth is 1
WARNING:waitress.queue:Task queue depth is 2
WARNING:waitress.queue:Task queue depth is 1
WARNING:waitress.queue:Task queue depth is 1
WARNING:waitress.queue:Task queue depth is 1>
```
Your Environment
---------
* Operating System: Windows 10
* Python Version Used: 3.10
* When you install doccano: Few days back
* How did you install doccano (Heroku button etc): Command Line
| Related to:
- [How to find the cause of "task queue depth" warnings from waitress?](https://stackoverflow.com/questions/55857058/how-to-find-the-cause-of-task-queue-depth-warnings-from-waitress)
Currently, we can't pass the number of workers to the server. We need to change the code to pass `threads=args.workers`.
- [Arguments to waitress.serve](https://docs.pylonsproject.org/projects/waitress/en/stable/arguments.html)
https://github.com/doccano/doccano/blob/350bc5e95fa459d1dc124a320cb8e34385b73021/backend/cli.py#L62-L67
Facing the same issue. May I know how to tackle this? Or do I just need to wait until the developers end to fix the bug? @Hironsan
The following may solve the problem:
https://github.com/doccano/doccano/issues/1762#issuecomment-1087141902
Unfortunately, I can't reproduce this problem.
> The following may solve the problem: [#1762 (comment)](https://github.com/doccano/doccano/issues/1762#issuecomment-1087141902)
>
> Unfortunately, I can't reproduce this problem.
Where should I paste the code? My Doccano folder don't have cli.py

```bash
> which doccano
/venv/bin/doccano
> ls /venv/lib/python3.8/site-packages/backend/cli.py
```
Do you mean paste the code above at `cli.py`? Which line should i insert `threads=args.workers`?

This line:
```python
serve(application, port=args.port, threads=args.workers)
```
> This line:
>
> ```python
> serve(application, port=args.port, threads=args.workers)
> ```
I've altered & saved the code. But did not solve the loading issue.

Can you increase the number of workers?
```bash
doccano webserver --workers=8
```
> Can you increase the number of workers?
>
> ```shell
> doccano webserver --workers=8
> ```
Yup, have increased it. But the issue still persisted. BTW, is it normal there is no updates on the command prompt when running Doccano?

> BTW, is it normal there is no updates on the command prompt when running Doccano?
Yes, because the worker is not printed.
By the way, did you run the `doccano task` command?
- [Install with pip](https://doccano.github.io/doccano/install-and-upgrade-doccano/#install-with-pip)
> > BTW, is it normal there is no updates on the command prompt when running Doccano?
>
> Yes, because the worker is not printed.
>
> By the way, did you run the `doccano task` command?
>
> * [Install with pip](https://doccano.github.io/doccano/install-and-upgrade-doccano/#install-with-pip)
No, I didn't. I just run it after you told me. And now everything seems work perfectly! Thank youu!
Good! your welcome. | 2022-05-27T02:28:54 |
doccano/doccano | 1,907 | doccano__doccano-1907 | [
"1778"
] | aa78d8feebb424b943ce52ef9209dbc6eb13f202 | diff --git a/backend/config/settings/heroku.py b/backend/config/settings/heroku.py
--- a/backend/config/settings/heroku.py
+++ b/backend/config/settings/heroku.py
@@ -2,4 +2,4 @@
from .base import * # noqa: F401,F403
-django_heroku.settings(locals(), test_runner=False)
+django_heroku.settings(locals(), test_runner=False, staticfiles=False)
| Cannot access Django admin panel in a Heroku deployment
How to reproduce the behaviour
---------
The FAQ describes how to [create a user via the Django admin panel](https://github.com/doccano/doccano/blob/master/docs/faq.md#how-to-create-a-user) for a locally hosted Doccano. When run locally, I have no problem to reach the admin panel on `http://localhost:8000/admin/`, in Heroku however it is not working.
I have tried to reach it on
- `https://mydeployment.herokuapp.com/admin/`
- `https://mydeployment.herokuapp.com/admin/login`
- `https://mydeployment.herokuapp.com/admin/login/`
- `http://mydeployment.herokuapp.com/admin/`
Those urls all result in a `500 Internal Server Error`.
Am I missing something here, or is this perhaps a bug?
Your Environment
---------
<!-- Include details of your environment. -->
* Operating System: -
* Python Version Used: -
* When did you install doccano: A few days ago
* How did you install doccano (Heroku button etc): Heroku button
| Having the exact same problem, and cannot see anything in the logs as it is an internal error
Same problem here!
I'm having the same issue. Has any1 here found a solution?
Same problem, does anybody have a solution?
@leugh I found a very simple solution.
Devs for doccano are either extremely busy or don't really care, so [lighttag](https://www.lighttag.io) is a great alternative, think there's a limit of around 5000 annotations before you have to pay but it works great for smaller datasets.
same problem here
Probably need to set [heroku.py](https://github.com/doccano/doccano/blob/master/backend/config/settings/heroku.py) instead of [production.py](https://github.com/doccano/doccano/blob/master/backend/config/settings/production.py) in [Dockerfile.heroku](https://github.com/doccano/doccano/blob/master/docker/Dockerfile.heroku) and fix some settings. | 2022-07-14T22:34:58 |
|
doccano/doccano | 1,958 | doccano__doccano-1958 | [
"1241"
] | 55ff41b886242497419304ce9bec0afdb9ddf04c | diff --git a/backend/config/urls.py b/backend/config/urls.py
--- a/backend/config/urls.py
+++ b/backend/config/urls.py
@@ -15,6 +15,7 @@
"""
import os
import re
+from pathlib import Path
from django.conf import settings
from django.contrib import admin
@@ -36,6 +37,7 @@
urlpatterns = []
if settings.DEBUG or os.environ.get("STANDALONE", False):
+ static_dir = Path(__file__).resolve().parent.parent / "client" / "dist"
# For showing images and audios in the case of pip and Docker.
urlpatterns.append(
re_path(
@@ -44,6 +46,8 @@
{"document_root": settings.MEDIA_ROOT},
)
)
+ # For showing favicon on the case of pip and Docker.
+ urlpatterns.append(path("favicon.ico", serve, {"document_root": static_dir, "path": "favicon.ico"}))
urlpatterns += [
path("admin/", admin.site.urls),
| [Bug report] Static files are not copied on pip installation
How to reproduce the behaviour
---------
Seems like (some?) static files are not copied on pip installation.
For instance `http://site.com/favicon.ico` is available on Docker Compose installation. But it is 404'd on pip installation.
Your Environment
---------
<!-- Include details of your environment.-->
* Operating System: CentOS 8.3
* Python Version Used: 3.8.8
* When you install doccano: v1.2.1
* How did you install doccano (Heroku button etc): pip
| 2022-08-19T01:07:36 |
||
doccano/doccano | 1,985 | doccano__doccano-1985 | [
"687"
] | 5007ae5d8e60ba67cfc63aa0f98adf5def559e7a | diff --git a/backend/projects/serializers.py b/backend/projects/serializers.py
--- a/backend/projects/serializers.py
+++ b/backend/projects/serializers.py
@@ -49,6 +49,13 @@ class Meta:
class ProjectSerializer(serializers.ModelSerializer):
tags = TagSerializer(many=True, required=False)
+ author = serializers.SerializerMethodField()
+
+ @classmethod
+ def get_author(cls, instance):
+ if instance.created_by:
+ return instance.created_by.username
+ return ""
class Meta:
model = Project
@@ -58,9 +65,10 @@ class Meta:
"description",
"guideline",
"project_type",
+ "created_at",
"updated_at",
"random_order",
- "created_by",
+ "author",
"collaborative_annotation",
"single_class_classification",
"is_text_project",
@@ -71,7 +79,9 @@ class Meta:
"tags",
]
read_only_fields = (
+ "created_at",
"updated_at",
+ "author",
"is_text_project",
"can_define_label",
"can_define_relation",
diff --git a/backend/projects/views/project.py b/backend/projects/views/project.py
--- a/backend/projects/views/project.py
+++ b/backend/projects/views/project.py
@@ -13,6 +13,8 @@ class ProjectList(generics.ListCreateAPIView):
serializer_class = ProjectPolymorphicSerializer
filter_backends = (DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter)
search_fields = ("name", "description")
+ ordering_fields = ["name", "created_at", "created_by", "project_type"]
+ ordering = ["-created_at"]
def get_permissions(self):
if self.request.method == "GET":
| [Feature Request] Allowed to add more metadata for a project
Feature description
---------
currently we have many annotation projects in doccano.
However, it is not easy to find the the right project. Because the information for a project is only its name.
- If the user could add more metadata for a project will be good. Such as the created data, created user, description. And all those metadata could be shown in project list page to help the user find the project.
- the metadata for a project could be modified. For example, we created the project in a bad name such as "DocumentationClassification-1". And we can't change the name.
- some way to search the project or sort the project or filter the project? For example, sort the project by creation date or only shown the project created by a user.
| 2022-09-05T00:51:20 |
||
doccano/doccano | 1,989 | doccano__doccano-1989 | [
"857"
] | a670e759f52dcbcc873d5678b8b3ebfa681c11da | diff --git a/backend/api/management/commands/create_admin.py b/backend/api/management/commands/create_admin.py
--- a/backend/api/management/commands/create_admin.py
+++ b/backend/api/management/commands/create_admin.py
@@ -13,9 +13,17 @@ def handle(self, *args, **options):
password = options.get("password")
username = options.get("username")
- if password and not username:
+ if not username:
+ self.stderr.write("Error: Blank username isn't allowed.")
raise CommandError("--username is required if specifying --password")
+ if not password:
+ self.stderr.write("Error: Blank password isn't allowed.")
+ raise CommandError("--password is required")
+
+ if password == "password":
+ self.stdout.write(self.style.WARNING("Warning: You should change the default password."))
+
try:
super().handle(*args, **options)
except Exception as err:
@@ -24,10 +32,10 @@ def handle(self, *args, **options):
else:
raise
- if password:
- database = options.get("database")
- db = self.UserModel._default_manager.db_manager(database)
- user = db.get(username=username)
- user.set_password(password)
- self.stderr.write(f"Setting password for User {username}.")
- user.save()
+ database = options.get("database")
+ db = self.UserModel._default_manager.db_manager(database)
+ user = db.get(username=username)
+ user.set_password(password)
+ message = f"Setting password for User {username}."
+ self.stdout.write(self.style.SUCCESS(message))
+ user.save()
| diff --git a/backend/api/tests/test_commands.py b/backend/api/tests/test_commands.py
new file mode 100644
--- /dev/null
+++ b/backend/api/tests/test_commands.py
@@ -0,0 +1,72 @@
+from unittest.mock import MagicMock
+
+from django.contrib.auth import get_user_model
+from django.core.management import CommandError
+from django.test import TestCase
+
+from api.management.commands.create_admin import Command
+
+
+class TestCreateAdminCommand(TestCase):
+ def test_can_create_user(self):
+ mock_out = MagicMock()
+ command = Command(stdout=mock_out)
+ command.handle(
+ username="user",
+ password="whoami",
+ email="[email protected]",
+ database="default",
+ interactive=False,
+ verbosity=0,
+ )
+ self.assertEqual(get_user_model().objects.count(), 1)
+ mock_out.write.assert_called_once_with("Setting password for User user.\n")
+
+ def test_raise_error_if_username_is_not_given(self):
+ mock_err = MagicMock()
+ command = Command(stderr=mock_err)
+ with self.assertRaises(CommandError):
+ command.handle(
+ password="whoami", email="[email protected]", database="default", interactive=False, verbosity=0
+ )
+ mock_err.write.assert_called_once_with("Error: Blank username isn't allowed.\n")
+
+ def test_raise_error_if_password_is_not_given(self):
+ mock_err = MagicMock()
+ command = Command(stderr=mock_err)
+ with self.assertRaises(CommandError):
+ command.handle(
+ username="user", email="[email protected]", database="default", interactive=False, verbosity=0
+ )
+ mock_err.write.assert_called_once_with("Error: Blank password isn't allowed.\n")
+
+ def test_warn_default_password(self):
+ mock_out = MagicMock()
+ command = Command(stdout=mock_out)
+ command.handle(
+ username="user",
+ password="password",
+ email="[email protected]",
+ database="default",
+ interactive=False,
+ verbosity=0,
+ )
+ self.assertEqual(get_user_model().objects.count(), 1)
+ self.assertEqual(mock_out.write.call_count, 2)
+ mock_out.write.assert_any_call("Warning: You should change the default password.\n")
+ mock_out.write.assert_any_call("Setting password for User user.\n")
+
+ def test_warn_duplicate_username(self):
+ get_user_model().objects.create(username="admin", password="pass")
+ mock_err = MagicMock()
+ command = Command(stderr=mock_err)
+ command.handle(
+ username="admin",
+ password="whoami",
+ email="[email protected]",
+ database="default",
+ interactive=False,
+ verbosity=0,
+ )
+ self.assertEqual(get_user_model().objects.count(), 1)
+ mock_err.write.assert_called_once_with("User admin already exists.\n")
| [Proposal] Warn and/or fail if default admin's password hasn't been changed
Feature description
---------
Proposal: warn and/or fail if default `admin`'s password hasn't been changed.
| Only for production env.
Related: #748. | 2022-09-08T07:47:16 |
doccano/doccano | 2,077 | doccano__doccano-2077 | [
"2067"
] | 2c9e6818762b6629ef89962d9220cc3ee115bd5e | diff --git a/backend/config/settings/base.py b/backend/config/settings/base.py
--- a/backend/config/settings/base.py
+++ b/backend/config/settings/base.py
@@ -230,8 +230,9 @@
ALLOWED_HOSTS = ["*"]
if DEBUG:
- CORS_ORIGIN_WHITELIST = ("http://127.0.0.1:3000", "http://0.0.0.0:3000", "http://localhost:3000")
- CSRF_TRUSTED_ORIGINS = CORS_ORIGIN_WHITELIST
+ CORS_ORIGIN_ALLOW_ALL = True
+ CSRF_TRUSTED_ORIGINS = ["http://127.0.0.1:3000", "http://0.0.0.0:3000", "http://localhost:3000"]
+ CSRF_TRUSTED_ORIGINS += env.list("CSRF_TRUSTED_ORIGINS", [])
# Batch size for importing data
IMPORT_BATCH_SIZE = env.int("IMPORT_BATCH_SIZE", 1000)
diff --git a/backend/config/settings/development.py b/backend/config/settings/development.py
--- a/backend/config/settings/development.py
+++ b/backend/config/settings/development.py
@@ -1,8 +1,6 @@
from .base import * # noqa: F403
MIDDLEWARE.append("api.middleware.RangesMiddleware") # noqa: F405
-CORS_ORIGIN_WHITELIST = ("http://127.0.0.1:3000", "http://0.0.0.0:3000", "http://localhost:3000")
-CSRF_TRUSTED_ORIGINS = CORS_ORIGIN_WHITELIST
# LOGGING = {
# 'version': 1,
# 'handlers': {
| CSRF

| Would you write your environment? Thank you!
> Would you write your environment? Thank you!
environment:
centos
doccano:1.6.0
solved:https://blog.csdn.net/qq_23953717/article/details/128084659?csdn_share_tail=%7B%22type%22%3A%22blog%22%2C%22rType%22%3A%22article%22%2C%22rId%22%3A%22128084659%22%2C%22source%22%3A%22qq_23953717%22%7D
I would flag this as a duplicate of #2062 both for bug report and solution | 2022-12-02T04:12:24 |
|
doccano/doccano | 2,089 | doccano__doccano-2089 | [
"2080"
] | 3b79a67ed0e435060dde733c5cabae9535cc9711 | diff --git a/backend/cli.py b/backend/cli.py
--- a/backend/cli.py
+++ b/backend/cli.py
@@ -155,7 +155,7 @@ def main():
# Create a parser for task queue.
parser_queue = subparsers.add_parser("task", help="see `task -h`")
parser_queue.add_argument("--concurrency", type=int, default=2, help="concurrency")
- parser_queue.add_argument("--env_file", type=str, default="", help="read in a file of environment variables")
+ parser_queue.add_argument("--env_file", type=str, help="read in a file of environment variables")
parser_queue.set_defaults(handler=command_run_task_queue)
parser_flower = subparsers.add_parser("flower", help="see `flower -h`")
| data task error
* Operating System:ubuntu22
* Python Version Used:3.9
* When you install doccano:2022-12-04
* How did you install doccano (Heroku button etc):pip install doccano
<img width="732" alt="image" src="https://user-images.githubusercontent.com/43643599/205482588-64111fa6-23bc-4c19-ade7-a7fb4532b177.png">
| Would you write your environment? Thank you!
same error with miniconda environment | 2022-12-08T12:01:22 |
|
doccano/doccano | 2,099 | doccano__doccano-2099 | [
"2039"
] | 700f08ffc4f350f2f5405a93bf1ec836c3bcab0f | diff --git a/backend/data_import/pipeline/label.py b/backend/data_import/pipeline/label.py
--- a/backend/data_import/pipeline/label.py
+++ b/backend/data_import/pipeline/label.py
@@ -142,6 +142,6 @@ def create(self, user, example: Example, types: LabelTypes, **kwargs):
user=user,
example=example,
type=types[self.type],
- from_id=kwargs["id_to_span"][self.from_id],
- to_id=kwargs["id_to_span"][self.to_id],
+ from_id=kwargs["id_to_span"][(self.from_id, str(self.example_uuid))],
+ to_id=kwargs["id_to_span"][(self.to_id, str(self.example_uuid))],
)
diff --git a/backend/data_import/pipeline/labels.py b/backend/data_import/pipeline/labels.py
--- a/backend/data_import/pipeline/labels.py
+++ b/backend/data_import/pipeline/labels.py
@@ -1,6 +1,6 @@
import abc
from itertools import groupby
-from typing import Dict, List
+from typing import Dict, List, Tuple
from .examples import Examples
from .label import Label
@@ -70,11 +70,11 @@ def clean(self, project: Project):
self.labels = spans
@property
- def id_to_span(self) -> Dict[int, SpanModel]:
- span_uuids = [str(label.uuid) for label in self.labels]
- spans = SpanModel.objects.filter(uuid__in=span_uuids)
+ def id_to_span(self) -> Dict[Tuple[int, str], SpanModel]:
+ uuids = [str(span.uuid) for span in self.labels]
+ spans = SpanModel.objects.filter(uuid__in=uuids)
uuid_to_span = {span.uuid: span for span in spans}
- return {span.id: uuid_to_span[span.uuid] for span in self.labels}
+ return {(span.id, str(span.example_uuid)): uuid_to_span[span.uuid] for span in self.labels}
class Texts(Labels):
| diff --git a/backend/data_import/tests/test_label.py b/backend/data_import/tests/test_label.py
--- a/backend/data_import/tests/test_label.py
+++ b/backend/data_import/tests/test_label.py
@@ -25,7 +25,7 @@ class TestLabel(TestCase):
def setUp(self):
self.project = prepare_project(self.task)
self.user = self.project.admin
- self.example = mommy.make("Example", project=self.project.item)
+ self.example = mommy.make("Example", project=self.project.item, text="hello world")
class TestCategoryLabel(TestLabel):
@@ -166,12 +166,12 @@ def test_create_type(self):
self.assertEqual(relation_type.text, "A")
def test_create(self):
- relation = RelationLabel(type="A", from_id=0, to_id=1, example_uuid=uuid.uuid4())
+ relation = RelationLabel(type="A", from_id=0, to_id=1, example_uuid=self.example.uuid)
types = MagicMock()
types.__getitem__.return_value = mommy.make(RelationType, project=self.project.item)
id_to_span = {
- 0: mommy.make(SpanModel, start_offset=0, end_offset=1),
- 1: mommy.make(SpanModel, start_offset=2, end_offset=3),
+ (0, str(self.example.uuid)): mommy.make(SpanModel, start_offset=0, end_offset=1, example=self.example),
+ (1, str(self.example.uuid)): mommy.make(SpanModel, start_offset=2, end_offset=3, example=self.example),
}
relation_model = relation.create(self.user, self.example, types, id_to_span=id_to_span)
self.assertIsInstance(relation_model, RelationModel)
diff --git a/backend/data_import/tests/test_labels.py b/backend/data_import/tests/test_labels.py
--- a/backend/data_import/tests/test_labels.py
+++ b/backend/data_import/tests/test_labels.py
@@ -146,7 +146,7 @@ def setUp(self):
self.project = prepare_project(SEQUENCE_LABELING, use_relation=True)
self.user = self.project.admin
example_uuid = uuid.uuid4()
- example = mommy.make("Example", project=self.project.item, uuid=example_uuid)
+ example = mommy.make("Example", project=self.project.item, uuid=example_uuid, text="hello world")
from_span = mommy.make("Span", example=example, start_offset=0, end_offset=1)
to_span = mommy.make("Span", example=example, start_offset=2, end_offset=3)
labels = [
@@ -154,7 +154,7 @@ def setUp(self):
]
self.relations = Relations(labels, self.types)
self.spans = MagicMock()
- self.spans.id_to_span = {from_span.id: from_span, to_span.id: to_span}
+ self.spans.id_to_span = {(from_span.id, str(example_uuid)): from_span, (to_span.id, str(example_uuid)): to_span}
self.examples = MagicMock()
self.examples.__getitem__.return_value = example
self.examples.__contains__.return_value = True
| Broken: Importing and Exporting SequenceLabeling projects with relations
How to reproduce the behaviour
---------
<!-- Before submitting an issue, make sure to check the docs and closed issues and FAQ to see if any of the solutions work for you. https://github.com/doccano/doccano/wiki/Frequently-Asked-Questions -->
<!-- Include a code example or the steps that led to the problem. Please try to be as specific as possible. -->
Your Environment
---------
<!-- Include details of your environment.-->
* Operating System: Dockeer
* Python Version Used: 3.8
* When you install doccano: 11/1/22
* How did you install doccano (Heroku button etc): docker-compose
I observed issues with the UI and interacting with relation labels. I am able create a relation label between two span labels in the UI, however the relation array get exported empty when going through the Export Dataset -> JSONL(relation) path. Furthermore, issues occur when trying to import relations as well. The import dataset flow only takes one "Column Label" field. When that is set to label, all of the span label and relation label info are uploaded as metadata.

If the "Column Label" field is set to "entities" the span labels are imported and only the relation label data is uploaded as metadata.

The first goal would be that the export process, exports in the format displayed when you select the JSONL(relation) option from Export Dataset.
ie.
```
{
"text": "Google was founded on September 4, 1998, by Larry Page and Sergey Brin.",
"entities": [
{
"id": 0,
"start_offset": 0,
"end_offset": 6,
"label": "ORG"
},
{
"id": 1,
"start_offset": 22,
"end_offset": 39,
"label": "DATE"
},
{
"id": 2,
"start_offset": 44,
"end_offset": 54,
"label": "PERSON"
},
{
"id": 3,
"start_offset": 59,
"end_offset": 70,
"label": "PERSON"
}
],
"relations": [
{
"id": 0,
"from_id": 0,
"to_id": 1,
"type": "foundedAt"
},
{
"id": 1,
"from_id": 0,
"to_id": 2,
"type": "foundedBy"
},
{
"id": 2,
"from_id": 0,
"to_id": 3,
"type": "foundedBy"
}
]
}
```
The second goal would be the ability to upload span labels and relation labels. Basically, Import Dataset should work with the Export Dataset -> JSONL(relation) results. I'll include a JSONL testing file for imports.
[relation_import_sample.zip](https://github.com/doccano/doccano/files/9913661/relation_import_sample.zip)
| Related to #2022 | 2022-12-16T07:02:12 |
doccano/doccano | 2,201 | doccano__doccano-2201 | [
"1883"
] | 5261fa944295b5fc4d3625a89af0e94febbd2bb6 | diff --git a/backend/examples/views/comment.py b/backend/examples/views/comment.py
--- a/backend/examples/views/comment.py
+++ b/backend/examples/views/comment.py
@@ -12,9 +12,10 @@
class CommentList(generics.ListCreateAPIView):
permission_classes = [IsAuthenticated & IsProjectMember]
serializer_class = CommentSerializer
- filter_backends = (DjangoFilterBackend, filters.SearchFilter)
+ filter_backends = (DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter)
filterset_fields = ["example"]
search_fields = ("text",)
+ ordering_fields = ("created_at", "example")
def get_queryset(self):
queryset = Comment.objects.filter(example__project_id=self.kwargs["project_id"])
| Include Example number in the Project Comments view
Feature description
---------
On the backend API and in the dataset annotation interface, the Comments are associated with specific Examples. But in the Project Comments view, the Example association is unclear--all the comments are grouped together.
Can the Project Comments view tab be improved to detail Examples, maybe even sort or group by Example?
Thanks!
| Would you write your environment? Thank you!
Using the Docker Compose setup, running on Mac OS X Catalina 10.15.7 | 2023-06-02T06:08:44 |
|
doccano/doccano | 2,204 | doccano__doccano-2204 | [
"1818"
] | f3e8c92abd9bc4ad9cb6ef67e65b38d7bede8b23 | diff --git a/backend/projects/models.py b/backend/projects/models.py
--- a/backend/projects/models.py
+++ b/backend/projects/models.py
@@ -1,4 +1,5 @@
import abc
+import uuid
from django.conf import settings
from django.contrib.auth.models import User
@@ -60,6 +61,54 @@ def add_admin(self):
def is_text_project(self) -> bool:
return False
+ def clone(self) -> "Project":
+ """Clone the project.
+ See https://docs.djangoproject.com/en/4.2/topics/db/queries/#copying-model-instances
+
+ Returns:
+ The cloned project.
+ """
+ project = Project.objects.get(pk=self.pk)
+ project.pk = None
+ project.id = None
+ project._state.adding = True
+ project.save()
+
+ def bulk_clone(queryset: models.QuerySet, field_initializers: dict = None):
+ """Clone the queryset.
+
+ Args:
+ queryset: The queryset to clone.
+ field_initializers: The field initializers.
+ """
+ if field_initializers is None:
+ field_initializers = {}
+ items = []
+ for item in queryset:
+ item.id = None
+ item.pk = None
+ for field, value_or_callable in field_initializers.items():
+ if callable(value_or_callable):
+ value_or_callable = value_or_callable()
+ setattr(item, field, value_or_callable)
+ item.project = project
+ item._state.adding = True
+ items.append(item)
+ queryset.model.objects.bulk_create(items)
+
+ bulk_clone(self.role_mappings.all())
+ bulk_clone(self.tags.all())
+
+ # clone examples
+ bulk_clone(self.examples.all(), field_initializers={"uuid": uuid.uuid4})
+
+ # clone label types
+ bulk_clone(self.categorytype_set.all())
+ bulk_clone(self.spantype_set.all())
+ bulk_clone(self.relationtype_set.all())
+
+ return project
+
def __str__(self):
return self.name
diff --git a/backend/projects/urls.py b/backend/projects/urls.py
--- a/backend/projects/urls.py
+++ b/backend/projects/urls.py
@@ -1,7 +1,7 @@
from django.urls import path
from .views.member import MemberDetail, MemberList, MyRole
-from .views.project import ProjectDetail, ProjectList
+from .views.project import CloneProject, ProjectDetail, ProjectList
from .views.tag import TagDetail, TagList
urlpatterns = [
@@ -11,5 +11,6 @@
path(route="projects/<int:project_id>/tags", view=TagList.as_view(), name="tag_list"),
path(route="projects/<int:project_id>/tags/<int:tag_id>", view=TagDetail.as_view(), name="tag_detail"),
path(route="projects/<int:project_id>/members", view=MemberList.as_view(), name="member_list"),
+ path(route="projects/<int:project_id>/clone", view=CloneProject.as_view(), name="clone_project"),
path(route="projects/<int:project_id>/members/<int:member_id>", view=MemberDetail.as_view(), name="member_detail"),
]
diff --git a/backend/projects/views/project.py b/backend/projects/views/project.py
--- a/backend/projects/views/project.py
+++ b/backend/projects/views/project.py
@@ -1,6 +1,8 @@
from django.conf import settings
+from django.db import transaction
+from django.shortcuts import get_object_or_404
from django_filters.rest_framework import DjangoFilterBackend
-from rest_framework import filters, generics, status
+from rest_framework import filters, generics, status, views
from rest_framework.permissions import IsAdminUser, IsAuthenticated
from rest_framework.response import Response
@@ -52,3 +54,14 @@ class ProjectDetail(generics.RetrieveUpdateDestroyAPIView):
serializer_class = ProjectPolymorphicSerializer
lookup_url_kwarg = "project_id"
permission_classes = [IsAuthenticated & (IsProjectAdmin | IsProjectStaffAndReadOnly)]
+
+
+class CloneProject(views.APIView):
+ permission_classes = [IsAuthenticated & IsProjectAdmin]
+
+ @transaction.atomic
+ def post(self, request, *args, **kwargs):
+ project = get_object_or_404(Project, pk=self.kwargs["project_id"])
+ cloned_project = project.clone()
+ serializer = ProjectPolymorphicSerializer(cloned_project)
+ return Response(serializer.data, status=status.HTTP_201_CREATED)
| diff --git a/backend/projects/tests/test_project.py b/backend/projects/tests/test_project.py
--- a/backend/projects/tests/test_project.py
+++ b/backend/projects/tests/test_project.py
@@ -1,9 +1,12 @@
from django.conf import settings
+from django.test import TestCase
from rest_framework import status
from rest_framework.reverse import reverse
from api.tests.utils import CRUDMixin
-from projects.models import Member
+from examples.tests.utils import make_doc
+from label_types.tests.utils import make_label
+from projects.models import DOCUMENT_CLASSIFICATION, Member, Project
from projects.tests.utils import prepare_project
from roles.tests.utils import create_default_roles
from users.tests.utils import make_user
@@ -124,3 +127,46 @@ def test_denies_project_staff_to_delete_project(self):
def test_denies_non_member_to_delete_project(self):
self.assert_delete(self.non_member, status.HTTP_403_FORBIDDEN)
+
+
+class TestProjectModel(TestCase):
+ def setUp(self):
+ self.project = prepare_project().item
+
+ def test_clone_project(self):
+ project = self.project.clone()
+ self.assertNotEqual(project.id, self.project.id)
+ self.assertEqual(project.name, self.project.name)
+ self.assertEqual(project.role_mappings.count(), self.project.role_mappings.count())
+
+
+class TestCloneProject(CRUDMixin):
+ task = DOCUMENT_CLASSIFICATION
+ view_name = "annotation_list"
+
+ @classmethod
+ def setUpTestData(cls):
+ project = prepare_project(task=DOCUMENT_CLASSIFICATION)
+ cls.project = project.item
+ cls.user = project.admin
+ make_doc(cls.project)
+ cls.category_type = make_label(cls.project)
+ cls.url = reverse(viewname="clone_project", args=[cls.project.id])
+
+ def test_clone_project(self):
+ response = self.assert_create(self.user, status.HTTP_201_CREATED)
+
+ project = Project.objects.get(id=response.data["id"])
+
+ # assert project
+ self.assertNotEqual(project.id, self.project.id)
+ self.assertEqual(project.name, self.project.name)
+
+ # assert category type
+ category_type = project.categorytype_set.first()
+ self.assertEqual(category_type.text, self.category_type.text)
+
+ # assert example
+ example = self.project.examples.first()
+ cloned_example = project.examples.first()
+ self.assertEqual(example.text, cloned_example.text)
| Add ability to clone a project
Feature description
---------
Add a button on the Projects page that allows the user to clone an existing project and specify which sections to duplicate (Guidelines, Labels, Data, etc) into a new project.
Use Case
----------
Often, I want to do a test run of annotations to check the efficacy of my class labels and instructions before annotating a larger dataset.
There are times when I need to perform multiple test runs after changing instructions, class labels, or both. This is a very manual workflow of creating multiple, near duplicate projects. Having the ability to duplicate or clone an existing project, and then make edits manually would be an improvement. Currently, the only option in Doccano is to create a new project from scratch and manually copy/paste the Guidelines and re-upload a labels file (or manually enter in the labels a second time).
Env: MacOS
| Would you write your environment? Thank you!
Thanks for the use case. I understood why you need the feature.
Memo: Need to clone the following information at the same time:
- Project
- Label Types
- Guideline(Included in the project)
I can work on this issue | 2023-06-06T05:05:21 |
doccano/doccano | 2,228 | doccano__doccano-2228 | [
"1941"
] | 67b356dcaa9873fcc0795bf74a451a0c79ab4c17 | diff --git a/backend/examples/filters.py b/backend/examples/filters.py
--- a/backend/examples/filters.py
+++ b/backend/examples/filters.py
@@ -1,11 +1,12 @@
-from django.db.models import Count, Q
-from django_filters.rest_framework import BooleanFilter, FilterSet
+from django.db.models import Count, Q, QuerySet
+from django_filters.rest_framework import BooleanFilter, CharFilter, FilterSet
from .models import Example
class ExampleFilter(FilterSet):
confirmed = BooleanFilter(field_name="states", method="filter_by_state")
+ label = CharFilter(method="filter_by_label")
def filter_by_state(self, queryset, field_name, is_confirmed: bool):
queryset = queryset.annotate(
@@ -21,6 +22,35 @@ def filter_by_state(self, queryset, field_name, is_confirmed: bool):
queryset = queryset.filter(num_confirm__lte=0)
return queryset
+ def filter_by_label(self, queryset: QuerySet, field_name: str, label: str) -> QuerySet:
+ """Filter examples by a given label name.
+
+ This performs filtering on all of the following labels at once:
+ - categories
+ - spans
+ - relations
+ - bboxes
+ - segmentations
+
+ Todo: Consider project type to make filtering more efficient.
+
+ Args:
+ queryset (QuerySet): QuerySet to filter.
+ field_name (str): This equals to `label`.
+ label (str): The label name to filter.
+
+ Returns:
+ QuerySet: Filtered examples.
+ """
+ queryset = queryset.filter(
+ Q(categories__label__text=label)
+ | Q(spans__label__text=label)
+ | Q(relations__type__text=label)
+ | Q(bboxes__label__text=label)
+ | Q(segmentations__label__text=label)
+ )
+ return queryset
+
class Meta:
model = Example
- fields = ("project", "text", "created_at", "updated_at")
+ fields = ("project", "text", "created_at", "updated_at", "label")
| diff --git a/backend/examples/tests/test_filters.py b/backend/examples/tests/test_filters.py
--- a/backend/examples/tests/test_filters.py
+++ b/backend/examples/tests/test_filters.py
@@ -1,10 +1,12 @@
from unittest.mock import MagicMock
from django.test import TestCase
+from model_mommy import mommy
from .utils import make_doc, make_example_state
from examples.filters import ExampleFilter
from examples.models import Example
+from projects.models import ProjectType
from projects.tests.utils import prepare_project
@@ -48,6 +50,17 @@ def test_returns_example_if_user_is_different_and_confirmed_is_empty(self):
self.assert_filter(data={"confirmed": ""}, expected=1)
+class TestLabelFilter(TestFilterMixin):
+ def setUp(self):
+ self.project = prepare_project(task=ProjectType.DOCUMENT_CLASSIFICATION)
+ self.prepare(project=self.project)
+ self.label_type = mommy.make("CategoryType", project=self.project.item, text="positive")
+ mommy.make("Category", example=self.example, label=self.label_type)
+
+ def test_returns_example_with_positive_label(self):
+ self.assert_filter(data={"label": self.label_type.text}, expected=1)
+
+
class TestExampleFilterOnCollaborative(TestFilterMixin):
def setUp(self):
self.project = prepare_project(task="DocumentClassification", collaborative_annotation=True)
| Add a function to filter labels
When I rechecked the labels of the annotated data, I had no way of filtering out the labels I wanted to see. For example, when I am doing a check of dichotomous annotations, I would like to filter the data set to find out which labels are positive and which are negative, so that I can save time on the check. However, due to the lack of this function, I have to filter one by one from dataset, which wastes a lot of time and manpower.
Thanks for every contributor!
| The same Wish
I also have great interest in this. Such a functionality is desirable over the existing done/undone filtering.
Would it be possible as part of this feature to also filter annotations that have a label marked vs. annotations that haven't been marked with a label?
c.c: @Hironsan | 2023-06-29T01:58:44 |
doccano/doccano | 2,246 | doccano__doccano-2246 | [
"2245"
] | c0093f0c06db498d604b32e33704b13f4c45f820 | diff --git a/backend/cli.py b/backend/cli.py
--- a/backend/cli.py
+++ b/backend/cli.py
@@ -170,7 +170,7 @@ def main():
# Dispatch handler.
args = parser.parse_args()
- if hasattr(args, "env_file") and Path(args.env_file).is_file():
+ if hasattr(args, "env_file") and args.env_file and Path(args.env_file).is_file():
env.read_env(args.env_file, recurse=False, override=True)
if hasattr(args, "handler"):
django.setup()
diff --git a/backend/data_import/pipeline/label.py b/backend/data_import/pipeline/label.py
--- a/backend/data_import/pipeline/label.py
+++ b/backend/data_import/pipeline/label.py
@@ -2,7 +2,7 @@
import uuid
from typing import Any, Optional
-from pydantic import UUID4, BaseModel, ConstrainedStr, NonNegativeInt, root_validator
+from pydantic import UUID4, BaseModel, NonNegativeInt, constr, root_validator
from .label_types import LabelTypes
from examples.models import Example
@@ -15,10 +15,6 @@
from projects.models import Project
-class NonEmptyStr(ConstrainedStr):
- min_length = 1
-
-
class Label(BaseModel, abc.ABC):
id: int = -1
uuid: UUID4
@@ -49,14 +45,14 @@ def __hash__(self):
class CategoryLabel(Label):
- label: NonEmptyStr
+ label: constr(min_length=1) # type: ignore
def __lt__(self, other):
return self.label < other.label
@classmethod
def parse(cls, example_uuid: UUID4, obj: Any):
- return cls(example_uuid=example_uuid, label=obj)
+ return cls(example_uuid=example_uuid, label=obj) # type: ignore
def create_type(self, project: Project) -> Optional[LabelType]:
return CategoryType(text=self.label, project=project)
@@ -66,14 +62,14 @@ def create(self, user, example: Example, types: LabelTypes, **kwargs):
class SpanLabel(Label):
- label: NonEmptyStr
+ label: constr(min_length=1) # type: ignore
start_offset: NonNegativeInt
end_offset: NonNegativeInt
def __lt__(self, other):
return self.start_offset < other.start_offset
- @root_validator
+ @root_validator(skip_on_failure=True)
def check_start_offset_is_less_than_end_offset(cls, values):
start_offset, end_offset = values.get("start_offset"), values.get("end_offset")
if start_offset >= end_offset:
@@ -105,14 +101,14 @@ def create(self, user, example: Example, types: LabelTypes, **kwargs):
class TextLabel(Label):
- text: NonEmptyStr
+ text: constr(min_length=1) # type: ignore
def __lt__(self, other):
return self.text < other.text
@classmethod
def parse(cls, example_uuid: UUID4, obj: Any):
- return cls(example_uuid=example_uuid, text=obj)
+ return cls(example_uuid=example_uuid, text=obj) # type: ignore
def create_type(self, project: Project) -> Optional[LabelType]:
return None
@@ -124,7 +120,7 @@ def create(self, user, example: Example, types: LabelTypes, **kwargs):
class RelationLabel(Label):
from_id: int
to_id: int
- type: NonEmptyStr
+ type: constr(min_length=1) # type: ignore
def __lt__(self, other):
return self.from_id < other.from_id
diff --git a/backend/projects/models.py b/backend/projects/models.py
--- a/backend/projects/models.py
+++ b/backend/projects/models.py
@@ -1,5 +1,6 @@
import abc
import uuid
+from typing import Any, Dict, Optional
from django.conf import settings
from django.contrib.auth.models import User
@@ -66,7 +67,7 @@ def clone(self) -> "Project":
project._state.adding = True
project.save()
- def bulk_clone(queryset: models.QuerySet, field_initializers: dict = None):
+ def bulk_clone(queryset: models.QuerySet, field_initializers: Optional[Dict[Any, Any]] = None):
"""Clone the queryset.
Args:
| Issue with pydantic in local installation
<!-- Before submitting an issue, make sure to check the docs and closed issues and FAQ to see if any of the solutions work for you. https://github.com/doccano/doccano/wiki/Frequently-Asked-Questions -->
How to reproduce the problem
---------
<!-- Include the details of how the problem occurred. Which option did you choose to install doccano? Did you come across an error? What else did you try? -->
Following the steps for a local installation in the ["Install with pip" section](https://doccano.github.io/doccano/install_and_upgrade_doccano/#install-doccano) in a virtualenv, when running `doccano task` I get the error
```bash
pydantic.errors.PydanticImportError: `pydantic:ConstrainedStr` has been removed in V2
```
<details>
<summary>Click here for full error trace </summary>
<br>
```bash
(.doccanovenv) ➜ project: doccano task
/user/project/.doccanovenv/lib/python3.9/site-packages/pydantic/_internal/_config.py:261: UserWarning: Valid config keys have changed in V2:
* 'schema_extra' has been renamed to 'json_schema_extra'
warnings.warn(message, UserWarning)
[2023-07-19 12:35:54 +0000] [9403] [INFO] [django_drf_filepond.apps::ready::61] App init: no django-storages backend configured, using default (local) storage backend if set, otherwise you need to manage file storage independently of this app.
Starting task queue.
[2023-07-19 12:35:57 +0000] [9403] [ERROR] [celery.utils.dispatch.signal::send::280] Signal handler <bound method DjangoFixup.on_import_modules of <celery.fixups.django.DjangoFixup object at 0x10411df40>> raised: PydanticImportError('`pydantic:ConstrainedStr` has been removed in V2.')
Traceback (most recent call last):
File "/user/project/.doccanovenv/lib/python3.9/site-packages/celery/utils/dispatch/signal.py", line 276, in send
response = receiver(signal=self, sender=sender, **named)
File "/user/project/.doccanovenv/lib/python3.9/site-packages/celery/fixups/django.py", line 97, in on_import_modules
self.worker_fixup.validate_models()
File "/user/project/.doccanovenv/lib/python3.9/site-packages/celery/fixups/django.py", line 137, in validate_models
run_checks()
File "/user/project/.doccanovenv/lib/python3.9/site-packages/django/core/checks/registry.py", line 88, in run_checks
new_errors = check(app_configs=app_configs, databases=databases)
File "/user/project/.doccanovenv/lib/python3.9/site-packages/django/core/checks/urls.py", line 14, in check_url_config
return check_resolver(resolver)
File "/user/project/.doccanovenv/lib/python3.9/site-packages/django/core/checks/urls.py", line 24, in check_resolver
return check_method()
File "/user/project/.doccanovenv/lib/python3.9/site-packages/django/urls/resolvers.py", line 494, in check
for pattern in self.url_patterns:
File "/user/project/.doccanovenv/lib/python3.9/site-packages/django/utils/functional.py", line 57, in __get__
res = instance.__dict__[self.name] = self.func(instance)
File "/user/project/.doccanovenv/lib/python3.9/site-packages/django/urls/resolvers.py", line 715, in url_patterns
patterns = getattr(self.urlconf_module, "urlpatterns", self.urlconf_module)
File "/user/project/.doccanovenv/lib/python3.9/site-packages/django/utils/functional.py", line 57, in __get__
res = instance.__dict__[self.name] = self.func(instance)
File "/user/project/.doccanovenv/lib/python3.9/site-packages/django/urls/resolvers.py", line 708, in urlconf_module
return import_module(self.urlconf_name)
File "/opt/homebrew/Cellar/[email protected]/3.9.16/Frameworks/Python.framework/Versions/3.9/lib/python3.9/importlib/__init__.py", line 127, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 1030, in _gcd_import
File "<frozen importlib._bootstrap>", line 1007, in _find_and_load
File "<frozen importlib._bootstrap>", line 986, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 680, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 850, in exec_module
File "<frozen importlib._bootstrap>", line 228, in _call_with_frames_removed
File "/user/project/.doccanovenv/lib/python3.9/site-packages/backend/config/urls.py", line 59, in <module>
path("v1/", include("data_import.urls")),
File "/user/project/.doccanovenv/lib/python3.9/site-packages/django/urls/conf.py", line 38, in include
urlconf_module = import_module(urlconf_module)
File "/opt/homebrew/Cellar/[email protected]/3.9.16/Frameworks/Python.framework/Versions/3.9/lib/python3.9/importlib/__init__.py", line 127, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 1030, in _gcd_import
File "<frozen importlib._bootstrap>", line 1007, in _find_and_load
File "<frozen importlib._bootstrap>", line 986, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 680, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 850, in exec_module
File "<frozen importlib._bootstrap>", line 228, in _call_with_frames_removed
File "/user/project/.doccanovenv/lib/python3.9/site-packages/backend/data_import/urls.py", line 3, in <module>
from .views import DatasetCatalog, DatasetImportAPI
File "/user/project/.doccanovenv/lib/python3.9/site-packages/backend/data_import/views.py", line 7, in <module>
from .celery_tasks import import_dataset
File "/user/project/.doccanovenv/lib/python3.9/site-packages/backend/data_import/celery_tasks.py", line 11, in <module>
from .datasets import load_dataset
File "/user/project/.doccanovenv/lib/python3.9/site-packages/backend/data_import/datasets.py", line 12, in <module>
from .pipeline.label import CategoryLabel, Label, RelationLabel, SpanLabel, TextLabel
File "/user/project/.doccanovenv/lib/python3.9/site-packages/backend/data_import/pipeline/label.py", line 5, in <module>
from pydantic import UUID4, BaseModel, ConstrainedStr, NonNegativeInt, root_validator
File "/user/project/.doccanovenv/lib/python3.9/site-packages/pydantic/__init__.py", line 207, in __getattr__
return _getattr_migration(attr_name)
File "/user/project/.doccanovenv/lib/python3.9/site-packages/pydantic/_migration.py", line 294, in wrapper
raise PydanticImportError(f'`{import_path}` has been removed in V2.')
pydantic.errors.PydanticImportError: `pydantic:ConstrainedStr` has been removed in V2.
For further information visit https://errors.pydantic.dev/2.0.3/u/import-error
Traceback (most recent call last):
File "/user/project/.doccanovenv/bin/doccano", line 8, in <module>
sys.exit(main())
File "/user/project/.doccanovenv/lib/python3.9/site-packages/backend/cli.py", line 177, in main
args.handler(args)
File "/user/project/.doccanovenv/lib/python3.9/site-packages/backend/cli.py", line 110, in command_run_task_queue
app.worker_main(argv=argv)
File "/user/project/.doccanovenv/lib/python3.9/site-packages/celery/app/base.py", line 388, in worker_main
self.start(argv=argv)
File "/user/project/.doccanovenv/lib/python3.9/site-packages/celery/app/base.py", line 368, in start
celery.main(args=argv, standalone_mode=False)
File "/user/project/.doccanovenv/lib/python3.9/site-packages/click/core.py", line 1078, in main
rv = self.invoke(ctx)
File "/user/project/.doccanovenv/lib/python3.9/site-packages/click/core.py", line 1688, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/user/project/.doccanovenv/lib/python3.9/site-packages/click/core.py", line 1434, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/user/project/.doccanovenv/lib/python3.9/site-packages/click/core.py", line 783, in invoke
return __callback(*args, **kwargs)
File "/user/project/.doccanovenv/lib/python3.9/site-packages/click/decorators.py", line 33, in new_func
return f(get_current_context(), *args, **kwargs)
File "/user/project/.doccanovenv/lib/python3.9/site-packages/celery/bin/base.py", line 134, in caller
return f(ctx, *args, **kwargs)
File "/user/project/.doccanovenv/lib/python3.9/site-packages/celery/bin/worker.py", line 348, in worker
worker = app.Worker(
File "/user/project/.doccanovenv/lib/python3.9/site-packages/celery/worker/worker.py", line 93, in __init__
self.app.loader.init_worker()
File "/user/project/.doccanovenv/lib/python3.9/site-packages/celery/loaders/base.py", line 110, in init_worker
self.import_default_modules()
File "/user/project/.doccanovenv/lib/python3.9/site-packages/celery/loaders/base.py", line 104, in import_default_modules
raise response
File "/user/project/.doccanovenv/lib/python3.9/site-packages/celery/utils/dispatch/signal.py", line 276, in send
response = receiver(signal=self, sender=sender, **named)
File "/user/project/.doccanovenv/lib/python3.9/site-packages/celery/fixups/django.py", line 97, in on_import_modules
self.worker_fixup.validate_models()
File "/user/project/.doccanovenv/lib/python3.9/site-packages/celery/fixups/django.py", line 137, in validate_models
run_checks()
File "/user/project/.doccanovenv/lib/python3.9/site-packages/django/core/checks/registry.py", line 88, in run_checks
new_errors = check(app_configs=app_configs, databases=databases)
File "/user/project/.doccanovenv/lib/python3.9/site-packages/django/core/checks/urls.py", line 14, in check_url_config
return check_resolver(resolver)
File "/user/project/.doccanovenv/lib/python3.9/site-packages/django/core/checks/urls.py", line 24, in check_resolver
return check_method()
File "/user/project/.doccanovenv/lib/python3.9/site-packages/django/urls/resolvers.py", line 494, in check
for pattern in self.url_patterns:
File "/user/project/.doccanovenv/lib/python3.9/site-packages/django/utils/functional.py", line 57, in __get__
res = instance.__dict__[self.name] = self.func(instance)
File "/user/project/.doccanovenv/lib/python3.9/site-packages/django/urls/resolvers.py", line 715, in url_patterns
patterns = getattr(self.urlconf_module, "urlpatterns", self.urlconf_module)
File "/user/project/.doccanovenv/lib/python3.9/site-packages/django/utils/functional.py", line 57, in __get__
res = instance.__dict__[self.name] = self.func(instance)
File "/user/project/.doccanovenv/lib/python3.9/site-packages/django/urls/resolvers.py", line 708, in urlconf_module
return import_module(self.urlconf_name)
File "/opt/homebrew/Cellar/[email protected]/3.9.16/Frameworks/Python.framework/Versions/3.9/lib/python3.9/importlib/__init__.py", line 127, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 1030, in _gcd_import
File "<frozen importlib._bootstrap>", line 1007, in _find_and_load
File "<frozen importlib._bootstrap>", line 986, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 680, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 850, in exec_module
File "<frozen importlib._bootstrap>", line 228, in _call_with_frames_removed
File "/user/project/.doccanovenv/lib/python3.9/site-packages/backend/config/urls.py", line 59, in <module>
path("v1/", include("data_import.urls")),
File "/user/project/.doccanovenv/lib/python3.9/site-packages/django/urls/conf.py", line 38, in include
urlconf_module = import_module(urlconf_module)
File "/opt/homebrew/Cellar/[email protected]/3.9.16/Frameworks/Python.framework/Versions/3.9/lib/python3.9/importlib/__init__.py", line 127, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 1030, in _gcd_import
File "<frozen importlib._bootstrap>", line 1007, in _find_and_load
File "<frozen importlib._bootstrap>", line 986, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 680, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 850, in exec_module
File "<frozen importlib._bootstrap>", line 228, in _call_with_frames_removed
File "/user/project/.doccanovenv/lib/python3.9/site-packages/backend/data_import/urls.py", line 3, in <module>
from .views import DatasetCatalog, DatasetImportAPI
File "/user/project/.doccanovenv/lib/python3.9/site-packages/backend/data_import/views.py", line 7, in <module>
from .celery_tasks import import_dataset
File "/user/project/.doccanovenv/lib/python3.9/site-packages/backend/data_import/celery_tasks.py", line 11, in <module>
from .datasets import load_dataset
File "/user/project/.doccanovenv/lib/python3.9/site-packages/backend/data_import/datasets.py", line 12, in <module>
from .pipeline.label import CategoryLabel, Label, RelationLabel, SpanLabel, TextLabel
File "/user/project/.doccanovenv/lib/python3.9/site-packages/backend/data_import/pipeline/label.py", line 5, in <module>
from pydantic import UUID4, BaseModel, ConstrainedStr, NonNegativeInt, root_validator
File "/user/project/.doccanovenv/lib/python3.9/site-packages/pydantic/__init__.py", line 207, in __getattr__
return _getattr_migration(attr_name)
File "/user/project/.doccanovenv/lib/python3.9/site-packages/pydantic/_migration.py", line 294, in wrapper
raise PydanticImportError(f'`{import_path}` has been removed in V2.')
pydantic.errors.PydanticImportError: `pydantic:ConstrainedStr` has been removed in V2.
```
</details>
Fix
---------
I managed to run doccano by running `pip install pydantic==1.9` after running `pip install doccano` in the steps from the docs.
Your Environment
---------
<!-- Include details of your environment.-->
* Operating System: MacOs 13.0.1
* Python Version Used: 3.9.16
* When you install doccano: 19Jul2023 10PM UTC
* How did you install doccano (Heroku button etc): [Install with pip with virtualenv](https://doccano.github.io/doccano/install_and_upgrade_doccano/#install-doccano) locally
| Thanks. I can reproduce the problem. | 2023-07-20T10:38:32 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.