repo
stringclasses 856
values | pull_number
int64 3
127k
| instance_id
stringlengths 12
58
| issue_numbers
sequencelengths 1
5
| base_commit
stringlengths 40
40
| patch
stringlengths 67
1.54M
| test_patch
stringlengths 0
107M
| problem_statement
stringlengths 3
307k
| hints_text
stringlengths 0
908k
| created_at
timestamp[s] |
---|---|---|---|---|---|---|---|---|---|
Mailu/Mailu | 2,083 | Mailu__Mailu-2083 | [
"2081"
] | dbbfa44461c3b8a537e9fe6b56f4690417d3c8a2 | diff --git a/core/admin/mailu/internal/nginx.py b/core/admin/mailu/internal/nginx.py
--- a/core/admin/mailu/internal/nginx.py
+++ b/core/admin/mailu/internal/nginx.py
@@ -27,12 +27,12 @@
}),
}
-def check_credentials(user, password, ip, protocol=None):
+def check_credentials(user, password, ip, protocol=None, auth_port=None):
if not user or not user.enabled or (protocol == "imap" and not user.enable_imap) or (protocol == "pop3" and not user.enable_pop):
return False
is_ok = False
# webmails
- if len(password) == 64 and ip == app.config['WEBMAIL_ADDRESS']:
+ if len(password) == 64 and auth_port in ['10143', '10025']:
if user.verify_temp_token(password):
is_ok = True
# All tokens are 32 characters hex lowercase
@@ -100,7 +100,7 @@ def handle_authentication(headers):
app.logger.warn(f'Invalid user {user_email!r}: {exc}')
else:
ip = urllib.parse.unquote(headers["Client-Ip"])
- if check_credentials(user, password, ip, protocol):
+ if check_credentials(user, password, ip, protocol, headers["Auth-Port"]):
server, port = get_server(headers["Auth-Protocol"], True)
return {
"Auth-Status": "OK",
| [master] Typo in front port
In https://github.com/Mailu/Mailu/blob/dbbfa44461c3b8a537e9fe6b56f4690417d3c8a2/core/nginx/conf/nginx.conf#L280 port number must be 10143, not 10043. Just cosmetical, will have no other effect.
| 2021-12-14T12:05:23 |
||
Mailu/Mailu | 2,103 | Mailu__Mailu-2103 | [
"2102"
] | ee5fc81b0770aa2cde233f07a09383cb26ed8e42 | diff --git a/core/admin/mailu/utils.py b/core/admin/mailu/utils.py
--- a/core/admin/mailu/utils.py
+++ b/core/admin/mailu/utils.py
@@ -300,7 +300,7 @@ class MailuSessionConfig:
# default size of session key parts
uid_bits = 64 # default if SESSION_KEY_BITS is not set in config
sid_bits = 128 # for now. must be multiple of 8!
- time_bits = 32 # for now. must be multiple of 8!
+ time_bits = 32 # for now. must be multiple of 8!
def __init__(self, app=None):
@@ -341,6 +341,9 @@ def gen_created(self, now=None):
def parse_key(self, key, app=None, now=None):
""" Split key into sid, uid and creation time. """
+ if app is None:
+ app = flask.current_app
+
if not (isinstance(key, bytes) and self._key_min <= len(key) <= self._key_max):
return None
@@ -357,7 +360,7 @@ def parse_key(self, key, app=None, now=None):
if now is None:
now = int(time.time())
created = int.from_bytes(created, byteorder='big')
- if not created <= now <= created + self.app.config['PERMANENT_SESSION_LIFETIME']:
+ if not created <= now <= created + app.config['PERMANENT_SESSION_LIFETIME']:
return None
return (uid, sid, crt)
@@ -422,8 +425,8 @@ def cleanup_sessions(app=None):
count = 0
for key in app.session_store.list():
- if key.startswith('token-'):
- if sessid := app.session_store.get(token):
+ if key.startswith(b'token-'):
+ if sessid := app.session_store.get(key):
if not app.session_config.parse_key(sessid, app, now=now):
app.session_store.delete(sessid)
app.session_store.delete(key)
@@ -451,7 +454,7 @@ def prune_sessions(uid=None, keep=None, app=None):
count = 0
for key in app.session_store.list(prefix):
- if key not in keep and not key.startswith('token-'):
+ if key not in keep and not key.startswith(b'token-'):
app.session_store.delete(key)
count += 1
| Admin Error 500: 'MailuSessionConfig' object has no attribute 'app'
Thank you for opening an issue with Mailu. Please understand that issues are meant for bugs and enhancement-requests.
For **user-support questions**, reach out to us on [matrix](https://matrix.to/#/#mailu:tedomum.net).
To be able to help you best, we need some more information.
## Before you open your issue
- [ ] Check if no issue or pull-request for this already exists.
- [ ] Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)
- [X] You understand `Mailu` is made by volunteers in their **free time** β be conscise, civil and accept that delays can occur.
- [X] The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.
## Environment & Versions
### Environment
- [X] docker-compose
- [ ] kubernetes
- [ ] docker swarm
### Versions
master
## Description
Admin and Webmail show 500 Internal Server Error.
The login page can be restored by deleting cookies, but nothing beyond the login page works.
## Replication Steps
Access `/admin` (and log in)
## Expected behaviour
Works normally.
## Logs
```
mailu-admin-1 | [2021-12-22 16:16:26,927] ERROR in app: Exception on /admin [GET]
mailu-admin-1 | Traceback (most recent call last):
mailu-admin-1 | File "/usr/lib/python3.9/site-packages/flask/app.py", line 2072, in wsgi_app
mailu-admin-1 | ctx.push()
mailu-admin-1 | File "/usr/lib/python3.9/site-packages/flask/ctx.py", line 404, in push
mailu-admin-1 | self.session = session_interface.open_session(self.app, self.request)
mailu-admin-1 | File "/app/mailu/utils.py", line 379, in open_session
mailu-admin-1 | return MailuSession(request.cookies.get(app.config['SESSION_COOKIE_NAME'], None), app)
mailu-admin-1 | File "/app/mailu/utils.py", line 195, in __init__
mailu-admin-1 | if parsed := self.app.session_config.parse_key(key, self.app):
mailu-admin-1 | File "/app/mailu/utils.py", line 360, in parse_key
mailu-admin-1 | if not created <= now <= created + self.app.config['PERMANENT_SESSION_LIFETIME']:
mailu-admin-1 | AttributeError: 'MailuSessionConfig' object has no attribute 'app'
mailu-admin-1 | [2021-12-22 16:16:26,927] ERROR in app: Request finalizing failed with an error while handling an error
mailu-admin-1 | Traceback (most recent call last):
mailu-admin-1 | File "/usr/lib/python3.9/site-packages/flask/app.py", line 2072, in wsgi_app
mailu-admin-1 | ctx.push()
mailu-admin-1 | File "/usr/lib/python3.9/site-packages/flask/ctx.py", line 404, in push
mailu-admin-1 | self.session = session_interface.open_session(self.app, self.request)
mailu-admin-1 | File "/app/mailu/utils.py", line 379, in open_session
mailu-admin-1 | return MailuSession(request.cookies.get(app.config['SESSION_COOKIE_NAME'], None), app)
mailu-admin-1 | File "/app/mailu/utils.py", line 195, in __init__
mailu-admin-1 | if parsed := self.app.session_config.parse_key(key, self.app):
mailu-admin-1 | File "/app/mailu/utils.py", line 360, in parse_key
mailu-admin-1 | if not created <= now <= created + self.app.config['PERMANENT_SESSION_LIFETIME']:
mailu-admin-1 | AttributeError: 'MailuSessionConfig' object has no attribute 'app'
mailu-admin-1 |
mailu-admin-1 | During handling of the above exception, another exception occurred:
mailu-admin-1 |
mailu-admin-1 | Traceback (most recent call last):
mailu-admin-1 | File "/usr/lib/python3.9/site-packages/flask/app.py", line 1540, in finalize_request
mailu-admin-1 | response = self.process_response(response)
mailu-admin-1 | File "/usr/lib/python3.9/site-packages/flask/app.py", line 1885, in process_response
mailu-admin-1 | response = self.ensure_sync(func)(response)
mailu-admin-1 | File "/usr/lib/python3.9/site-packages/flask_login/login_manager.py", line 399, in _update_remember_cookie
mailu-admin-1 | if '_remember' not in session and \
mailu-admin-1 | TypeError: argument of type 'NoneType' is not iterable
```
| 2021-12-22T17:42:31 |
||
Mailu/Mailu | 2,111 | Mailu__Mailu-2111 | [
"1930"
] | 14177c3f98de3f9722a710d9473825f3ba2f3a35 | diff --git a/core/admin/mailu/configuration.py b/core/admin/mailu/configuration.py
--- a/core/admin/mailu/configuration.py
+++ b/core/admin/mailu/configuration.py
@@ -27,7 +27,7 @@
'SQLALCHEMY_TRACK_MODIFICATIONS': False,
# Statistics management
'INSTANCE_ID_PATH': '/data/instance',
- 'STATS_ENDPOINT': '18.{}.stats.mailu.io',
+ 'STATS_ENDPOINT': '19.{}.stats.mailu.io',
# Common configuration variables
'SECRET_KEY': 'changeMe',
'DOMAIN': 'mailu.io',
diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -36,12 +36,10 @@
'github_user': 'mailu',
'github_repo': 'mailu',
'github_version': version,
- 'stable_version': '1.8',
+ 'stable_version': '1.9',
'versions': [
- ('1.5', '/1.5/'),
- ('1.6', '/1.6/'),
- ('1.7', '/1.7/'),
('1.8', '/1.8/'),
+ ('1.9', '/1.9/'),
('master', '/master/')
],
'conf_py_path': '/docs/'
| Discussion of 1.9 roadmap
As proposed in the last project meeting (#1582 (comment)), we are experimenting with a discussion driven model for determining the roadmap of the next release. This follows the existing practice of the succesful community run project Gitea (e.g go-gitea/gitea#14477).
During last meeting someone made the remark that with everything in the pipeline we already have sufficient work for a 1.9 release. I agree. We have this special situation due to the 1.8rc where we had a release candidate for over 7 months. This caused many PRs to be merged, but not included in 1.8. These are now waiting to be made available via a 1.9 release. This also caused a lot of already submitted PRs not be reviewed yet. We wanted to focus on getting 1.8 out of the door.
There are new PRs waiting for review for
- Updating the interface to AdminLTE3 #1800. This is not only a security update, but also adds a lot of functionality such as
- a language selector
- more modern look&feel of interface elements
- filtering and column ordering for tables.
- Multiple PRs for increasing the overall security of mailu #1922, #1916, #1902.
- For these PRs we (I?) will also introduce a brand new security documentation page where you can find all documentation on what security measures are in place, how it works and how it can be tweaked.
- updated polish translations #1751 and completely new Hebrew translation #1873.
We have already merged PRs on master for
- Completely new CLI configuration import/export which really exports/imports the complete mailu configuration database. With this it is possible to migrate the mailu config to another mailu instance. You can also use it to easily add new configuration (e.g. import users with specific settings) https://mailu.io/master/cli.html#config-export
- single sign on for webmail. Webmail uses the admin web interface for authenticating users
- various security related enhancements
- All other already merged PRs which were not included in 1.8. See all newsfragments for the details https://github.com/Mailu/Mailu/tree/master/towncrier/newsfragments
IMHO All those features together are sufficient to release a new version.
I suggest we review all open PRs and then check what issues really need to be included as well for 1.9 and include these on the roadmap. Examples are:
- I think a good example is the new SSO #1929. When you are redirected to the login page, it does not show that the login is for the webmail. You have the feeling that you are redirected to the wrong page. See the demo site for an example (https://test.mailu.io/webmail/). Of course this is not production worthy.
- Incorrect documentation
- E.g.the documentation on translations #1869. We have no weblate instance anymore for web-based translations. The documentation must be updated as it is incorrect now. You can only do translations manually with a tool such as poedit and send a PR for getting your new translation files merged .
- documentation on development environment for the admin interface #1577. The faulty documentation should be removed or directly updated with the correct steps.
For small non-critical issues/features I suggest we do not put it on the roadmap, but simply offer anyone the chance to pick these up and submit a PR if they want it included.
What are your thoughts? Please share your feedback.
Regardless the above wall of text, feel free to mention any feature/issue you would like included in 1.9.
| To keep track of things, I'd like to try using a github project for 1.9. I created a 1.9 project (https://github.com/Mailu/Mailu/projects/8). By simply using it we can determine if it is preferred over using a milestone. We could still use both at the same time.
A milestone quickly shows what has not been finished yet.
However I like that a project also shows if an issue has already been picked up by someone (moved from column to-do, to column in-progress)
Do we want to have the rate-limit changes part of 1.9 or are we okay with having another release where we have no effective rate-limiting?
I guess that depends on your availability since you generously offered to provide this functionality. It would be really neat.
I think if we are honest, it will probably take 2-3 months before we release 1.9?
Since we are with only a couple of people everything goes slowly. I think 2-3 months is realistic.
If you currently do not feel like it, I'd say let's do it after 1.9.
| 2021-12-29T14:47:39 |
|
Mailu/Mailu | 2,116 | Mailu__Mailu-2116 | [
"2114"
] | 14177c3f98de3f9722a710d9473825f3ba2f3a35 | diff --git a/core/admin/mailu/ui/views/base.py b/core/admin/mailu/ui/views/base.py
--- a/core/admin/mailu/ui/views/base.py
+++ b/core/admin/mailu/ui/views/base.py
@@ -11,6 +11,10 @@
def index():
return flask.redirect(flask.url_for('.user_settings'))
[email protected]('/ui/')
+def redirect_old_path():
+ return flask.redirect(flask.url_for('.index'), code=301)
+
@ui.route('/announcement', methods=['GET', 'POST'])
@access.global_admin
def announcement():
| Error 404 not found when opening admin after upgrade 1.8 to master
## Before you open your issue
- [X] Check if no issue or pull-request for this already exists.
- [X] Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)
- [X] You understand `Mailu` is made by volunteers in their **free time** β be conscise, civil and accept that delays can occur.
- [X] The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.
## Environment & Versions
### Environment
- [X] docker-compose
### Versions
Before upgrade: Docker 1.8 images.
After upgrade: Docker master images (pulled 30 December 2021).
## Description
**Mailu 1.8** image redirects `/admin` to `/admin/ui`.
**Mailu master** image no longer redirects `/admin/ui` as the `ui` part in the URL has been removed according to [Tomcat 1929.enhacement](https://github.com/Mailu/Mailu/blob/master/towncrier/newsfragments/1929.enhancement):
> Removed the /admin/ prefix to reduce complexity of routing with Mailu. Admin is accessible directly via /admin instead of /admin/ui
After the upgrade from `1.8` to `master` and visiting the admin page, the browser still uses the cached URL `/admin/ui` and results in 404 not found.
## Replication Steps
1. Create 1.8 production environment on AMD64 platform using `mailu 1.8 Docker images`.
2. Make sure the Admin page works.
3. Remove docker containers (`docker-compose down`).
4. Recreate **all** containers at the same time using `mailu master Docker images`.
5. Open root mail domain. The browser uses the cached URL `admin/ui` and shows Error 404 not found.
Note: Tested with `TLS_FLAVOR=letsencrypt`, admin and roundcube and Firefox.
## Expected behaviour
Backwards compatibility after Mailu 1.8 upgrade without the need of removing browser caches.
## Front log
```
front_1 | <IP> - - [30/Dec/2021:10:14:35 +0000] "GET /admin/ui/ HTTP/2.0" 404 198 "https://mail.mydomain.nl/sso/login" "Mozilla/5.0 (X11; Linux x86_64; rv:95.0) Gecko/20100101 Firefox/95.0"
```
## Bugfix
Proposal is to redirect `/admin/ui` always to `/admin` to prevent browser caching problems after the upgrade.
| 2021-12-30T14:31:23 |
||
Mailu/Mailu | 2,130 | Mailu__Mailu-2130 | [
"2128"
] | 3453d12ccbc2624b45178ddc1332c4e650b2c9ce | diff --git a/core/admin/mailu/internal/views/auth.py b/core/admin/mailu/internal/views/auth.py
--- a/core/admin/mailu/internal/views/auth.py
+++ b/core/admin/mailu/internal/views/auth.py
@@ -31,6 +31,7 @@ def nginx_authentication():
for key, value in headers.items():
response.headers[key] = str(value)
is_valid_user = False
+ is_from_webmail = headers['Auth-Port'] in ['10143', '10025']
if response.headers.get("Auth-User-Exists"):
username = response.headers["Auth-User"]
if utils.limiter.should_rate_limit_user(username, client_ip):
@@ -47,7 +48,7 @@ def nginx_authentication():
utils.limiter.exempt_ip_from_ratelimits(client_ip)
elif is_valid_user:
utils.limiter.rate_limit_user(username, client_ip)
- else:
+ elif not is_from_webmail:
utils.limiter.rate_limit_ip(client_ip)
return response
diff --git a/core/admin/mailu/limiter.py b/core/admin/mailu/limiter.py
--- a/core/admin/mailu/limiter.py
+++ b/core/admin/mailu/limiter.py
@@ -53,11 +53,10 @@ def should_rate_limit_ip(self, ip):
return is_rate_limited
def rate_limit_ip(self, ip):
- if ip != app.config['WEBMAIL_ADDRESS']:
- limiter = self.get_limiter(app.config["AUTH_RATELIMIT_IP"], 'auth-ip')
- client_network = utils.extract_network_from_ip(ip)
- if self.is_subject_to_rate_limits(ip):
- limiter.hit(client_network)
+ limiter = self.get_limiter(app.config["AUTH_RATELIMIT_IP"], 'auth-ip')
+ client_network = utils.extract_network_from_ip(ip)
+ if self.is_subject_to_rate_limits(ip):
+ limiter.hit(client_network)
def should_rate_limit_user(self, username, ip, device_cookie=None, device_cookie_name=None):
limiter = self.get_limiter(app.config["AUTH_RATELIMIT_USER"], 'auth-user')
| fix keyError WEBMAIL_ADDRESS
## What type of PR?
bugfix WEBMAIL_ADDRESS not initialized in admin/mailu/configuration.py, leading to lot of errors in log.
## What does this PR do?
Initialize 'WEBMAIL_ADDRESS' to None in the admin configuration
### Related issue(s)
- closes #2125
## Prerequisites
None
| Thanks for submitting this pull request.
Bors-ng will now build test images. When it succeeds, we will continue to review and test your PR.
bors try
Note: if this build fails, [read this](http://mailu.io/master/contributors/environment.html#when-bors-try-fails).
## try
Build succeeded!
*And happy new year! π*
* [CI-Done](https://github.com/Mailu/Mailu/runs/4684075607?check_suite_focus=true)
Thank you for your contribution but I do think that this is the wrong approach.
Let's remove WEBMAIL_ADDRESS from the limiter instead... and make the caller decide whether it's worth calling rate_limit_ip() or not.
https://github.com/Mailu/Mailu/blob/3453d12ccbc2624b45178ddc1332c4e650b2c9ce/core/admin/mailu/internal/views/auth.py#L50 should be replaced to ensure that we only call it if the port isn't one of the webmail's (see https://github.com/Mailu/Mailu/blob/3453d12ccbc2624b45178ddc1332c4e650b2c9ce/core/admin/mailu/internal/nginx.py#L35 on how-to)
Well, that's totally your call @nextgens ! I confess I didn't try to find the best solution but the easiest and fastest one... | 2022-01-03T12:53:15 |
|
Mailu/Mailu | 2,140 | Mailu__Mailu-2140 | [
"2138"
] | 6953ee6bdea79408d7b49b89e0dc5ad54c8f66db | diff --git a/core/admin/mailu/models.py b/core/admin/mailu/models.py
--- a/core/admin/mailu/models.py
+++ b/core/admin/mailu/models.py
@@ -276,7 +276,7 @@ def dns_tlsa(self):
hostname = app.config['HOSTNAME']
if app.config['TLS_FLAVOR'] in ('letsencrypt', 'mail-letsencrypt'):
# current ISRG Root X1 (RSA 4096, O = Internet Security Research Group, CN = ISRG Root X1) @20210902
- return f'_25._tcp.{hostname}. 600 IN TLSA 2 1 1 0b9fa5a59eed715c26c1020c711b4f6ec42d58b0015e14337a39dad301c5afc3'
+ return f'_25._tcp.{hostname}. 86400 IN TLSA 2 1 0 30820222300d06092a864886f70d01010105000382020f003082020a0282020100ade82473f41437f39b9e2b57281c87bedcb7df38908c6e3ce657a078f775c2a2fef56a6ef6004f28dbde68866c4493b6b163fd14126bbf1fd2ea319b217ed1333cba48f5dd79dfb3b8ff12f1219a4bc18a8671694a66666c8f7e3c70bfad292206f3e4c0e680aee24b8fb7997e94039fd347977c99482353e838ae4f0a6f832ed149578c8074b6da2fd0388d7b0370211b75f2303cfa8faeddda63abeb164fc28e114b7ecf0be8ffb5772ef4b27b4ae04c12250c708d0329a0e15324ec13d9ee19bf10b34a8c3f89a36151deac870794f46371ec2ee26f5b9881e1895c34796c76ef3b906279e6dba49a2f26c5d010e10eded9108e16fbb7f7a8f7c7e50207988f360895e7e237960d36759efb0e72b11d9bbc03f94905d881dd05b42ad641e9ac0176950a0fd8dfd5bd121f352f28176cd298c1a80964776e4737baceac595e689d7f72d689c50641293e593edd26f524c911a75aa34c401f46a199b5a73a516e863b9e7d72a712057859ed3e5178150b038f8dd02f05b23e7b4a1c4b730512fcc6eae050137c439374b3ca74e78e1f0108d030d45b7136b407bac130305c48b7823b98a67d608aa2a32982ccbabd83041ba2830341a1d605f11bc2b6f0a87c863b46a8482a88dc769a76bf1f6aa53d198feb38f364dec82b0d0a28fff7dbe21542d422d0275de179fe18e77088ad4ee6d98b3ac6dd27516effbc64f533434f0203010001'
@property
def dkim_key(self):
| Root CA is not included in certificate chain for DANE validation
Thank you for opening an issue with Mailu. Please understand that issues are meant for bugs and enhancement-requests.
For **user-support questions**, reach out to us on [matrix](https://matrix.to/#/#mailu:tedomum.net).
To be able to help you best, we need some more information.
## Before you open your issue
- [X] Check if no issue or pull-request for this already exists.
- [X] Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)
- [X] You understand `Mailu` is made by volunteers in their **free time** β be conscise, civil and accept that delays can occur.
- [X] The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.
## Environment & Versions
### Environment
- [X] docker-compose
- [ ] kubernetes
- [ ] docker swarm
### Versions
1.9
## Description
I just upgraded to 1.9 and am trying to integrate DANE validation. I got the following DANE Survey Notice:
"Your TLSA record designates a root CA hash, but, as is common, the root CA is not included in your certificate chain. It would need to be included to work with DANE-TA(2), but simpler to use an intermediate CA hash instead."
## Replication Steps
I set a DNS TLSA record as recommended, with 2 1 1 designation. I have DNSSEC activated for the relevant domain. Certificates are issued through Let's Encrypt.
## Expected behaviour
Full validation of the certificate chain.
## Logs
| Doh. Good catch | 2022-01-05T09:39:15 |
|
Mailu/Mailu | 2,144 | Mailu__Mailu-2144 | [
"2135"
] | 94bbd25fe8d8824bf1a56f20dabfbc0e123a055b | diff --git a/core/admin/start.py b/core/admin/start.py
--- a/core/admin/start.py
+++ b/core/admin/start.py
@@ -18,6 +18,34 @@
log.info("Creating initial admin accout %s@%s with mode %s",account,domain,mode)
os.system("flask mailu admin %s %s '%s' --mode %s" % (account, domain, password, mode))
+def test_DNS():
+ import dns.resolver
+ import dns.exception
+ import dns.flags
+ import dns.rdtypes
+ import dns.rdatatype
+ import dns.rdataclass
+ import time
+ # DNS stub configured to do DNSSEC enabled queries
+ resolver = dns.resolver.Resolver()
+ resolver.use_edns(0, 0, 1232)
+ resolver.flags = dns.flags.AD | dns.flags.RD
+ nameservers = resolver.nameservers
+ for ns in nameservers:
+ resolver.nameservers=[ns]
+ while True:
+ try:
+ result = resolver.query('example.org', dns.rdatatype.A, dns.rdataclass.IN, lifetime=10)
+ except Exception as e:
+ log.critical("Your DNS resolver at %s is not working (%s). Please use another resolver or enable unbound via https://setup.mailu.io.", ns, e);
+ else:
+ if result.response.flags & dns.flags.AD:
+ break
+ log.critical("Your DNS resolver at %s isn't doing DNSSEC validation; Please use another resolver or enable unbound via https://setup.mailu.io.", ns)
+ time.sleep(5)
+
+test_DNS()
+
start_command="".join([
"gunicorn --threads ", str(os.cpu_count()),
" -b :80 ",
| diff --git a/tests/compose/core/docker-compose.yml b/tests/compose/core/docker-compose.yml
--- a/tests/compose/core/docker-compose.yml
+++ b/tests/compose/core/docker-compose.yml
@@ -40,8 +40,11 @@ services:
volumes:
- "/mailu/data:/data"
- "/mailu/dkim:/dkim"
+ dns:
+ - 192.168.203.254
depends_on:
- redis
+ - resolver
imap:
image: ${DOCKER_ORG:-mailu}/${DOCKER_PREFIX:-}dovecot:${PINNED_MAILU_VERSION:-local}
@@ -75,7 +78,13 @@ services:
# Optional services
-
+ resolver:
+ image: ${DOCKER_ORG:-mailu}/${DOCKER_PREFIX:-}unbound:${MAILU_VERSION:-local}
+ env_file: mailu.env
+ restart: always
+ networks:
+ default:
+ ipv4_address: 192.168.203.254
# Webmail
diff --git a/tests/compose/fetchmail/docker-compose.yml b/tests/compose/fetchmail/docker-compose.yml
--- a/tests/compose/fetchmail/docker-compose.yml
+++ b/tests/compose/fetchmail/docker-compose.yml
@@ -42,6 +42,9 @@ services:
- "/mailu/dkim:/dkim"
depends_on:
- redis
+ - resolver
+ dns:
+ - 192.168.203.254
imap:
image: ${DOCKER_ORG:-mailu}/${DOCKER_PREFIX:-}dovecot:${PINNED_MAILU_VERSION:-local}
@@ -81,6 +84,15 @@ services:
restart: always
env_file: mailu.env
+ resolver:
+ image: ${DOCKER_ORG:-mailu}/${DOCKER_PREFIX:-}unbound:${MAILU_VERSION:-local}
+ env_file: mailu.env
+ restart: always
+ networks:
+ default:
+ ipv4_address: 192.168.203.254
+
+
# Webmail
diff --git a/tests/compose/filters/docker-compose.yml b/tests/compose/filters/docker-compose.yml
--- a/tests/compose/filters/docker-compose.yml
+++ b/tests/compose/filters/docker-compose.yml
@@ -40,8 +40,11 @@ services:
volumes:
- "/mailu/data:/data"
- "/mailu/dkim:/dkim"
+ dns:
+ - 192.168.203.254
depends_on:
- redis
+ - resolver
imap:
image: ${DOCKER_ORG:-mailu}/${DOCKER_PREFIX:-}dovecot:${PINNED_MAILU_VERSION:-local}
@@ -81,7 +84,13 @@ services:
volumes:
- "/mailu/filter:/data"
-
+ resolver:
+ image: ${DOCKER_ORG:-mailu}/${DOCKER_PREFIX:-}unbound:${MAILU_VERSION:-local}
+ env_file: mailu.env
+ restart: always
+ networks:
+ default:
+ ipv4_address: 192.168.203.254
# Webmail
diff --git a/tests/compose/rainloop/docker-compose.yml b/tests/compose/rainloop/docker-compose.yml
--- a/tests/compose/rainloop/docker-compose.yml
+++ b/tests/compose/rainloop/docker-compose.yml
@@ -42,6 +42,9 @@ services:
- "/mailu/dkim:/dkim"
depends_on:
- redis
+ - resolver
+ dns:
+ - 192.168.203.254
imap:
image: ${DOCKER_ORG:-mailu}/${DOCKER_PREFIX:-}dovecot:${PINNED_MAILU_VERSION:-local}
@@ -75,7 +78,13 @@ services:
# Optional services
-
+ resolver:
+ image: ${DOCKER_ORG:-mailu}/${DOCKER_PREFIX:-}unbound:${MAILU_VERSION:-local}
+ env_file: mailu.env
+ restart: always
+ networks:
+ default:
+ ipv4_address: 192.168.203.254
# Webmail
webmail:
diff --git a/tests/compose/roundcube/docker-compose.yml b/tests/compose/roundcube/docker-compose.yml
--- a/tests/compose/roundcube/docker-compose.yml
+++ b/tests/compose/roundcube/docker-compose.yml
@@ -42,6 +42,9 @@ services:
- "/mailu/dkim:/dkim"
depends_on:
- redis
+ - resolver
+ dns:
+ - 192.168.203.254
imap:
image: ${DOCKER_ORG:-mailu}/${DOCKER_PREFIX:-}dovecot:${PINNED_MAILU_VERSION:-local}
@@ -75,7 +78,13 @@ services:
# Optional services
-
+ resolver:
+ image: ${DOCKER_ORG:-mailu}/${DOCKER_PREFIX:-}unbound:${MAILU_VERSION:-local}
+ env_file: mailu.env
+ restart: always
+ networks:
+ default:
+ ipv4_address: 192.168.203.254
# Webmail
webmail:
diff --git a/tests/compose/webdav/docker-compose.yml b/tests/compose/webdav/docker-compose.yml
--- a/tests/compose/webdav/docker-compose.yml
+++ b/tests/compose/webdav/docker-compose.yml
@@ -40,8 +40,11 @@ services:
volumes:
- "/mailu/data:/data"
- "/mailu/dkim:/dkim"
+ dns:
+ - 192.168.203.254
depends_on:
- redis
+ - resolver
imap:
image: ${DOCKER_ORG:-mailu}/${DOCKER_PREFIX:-}dovecot:${PINNED_MAILU_VERSION:-local}
@@ -82,6 +85,13 @@ services:
volumes:
- "/mailu/dav:/data"
+ resolver:
+ image: ${DOCKER_ORG:-mailu}/${DOCKER_PREFIX:-}unbound:${MAILU_VERSION:-local}
+ env_file: mailu.env
+ restart: always
+ networks:
+ default:
+ ipv4_address: 192.168.203.254
# Webmail
| Cannot send email after upgrade to 1.9 `non DNSSEC destination`
## Before you open your issue
- [x] Check if no issue or pull-request for this already exists.
- [x] Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)
- [x] You understand `Mailu` is made by volunteers in their **free time** β be conscise, civil and accept that delays can occur.
- [x] The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.
## Environment & Versions
### Environment
- [x] docker-compose
- [ ] kubernetes
- [ ] docker swarm
### Versions
1.9
## Description
Thanks for all the work! 1.9 seems like an amazing release overall β€οΈ
I upgraded to 1.9, generated the new docker-compose file with the tool.
Copied the relevant env variables over and started everything.
I can still receive mails, and send internal one (from and to my own domain)
I cannot send mails to external providers.
after restoring from backup everything works in 1.8
```
Final-Recipient: rfc822; ******
Original-Recipient: rfc822;******
Action: delayed
Status: 4.7.5
Diagnostic-Code: X-Postfix; non DNSSEC destination
Will-Retry-Until: Fri, 7 Jan 2022 18:38:34 +0000 (UTC)
Return-Path: <*****>
From: Cloud <*******>
DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=nicco.io; s=dkim;
t=1641148715;
h=from:from:reply-to:subject:subject:date:date:message-id:message-id:
to:to:cc:mime-version:mime-version:content-type:content-type:
in-reply-to:in-reply-to:references:references;
bh=Q0j3Ph9l8nLeBMIzdq6aOtNcsZOyiD8WuiQSGgc2oeY=;
b=nF+9OZeRSSEDZ995inbj/6nDYgbTDMhPGc63Gab3FN1A984PxY7rDoHIhkO5nvh2wzNZG6
5jIAmfCyYfHriJawNrtcKeUA4wBO5YgYPWag6CbGmWQ8sGSIty2fjIO0W4dWfWy+OjsksX
sJ2TK8uft8Ax9F9QmQKMuZHBP3Myh/I=
Content-Type: multipart/mixed;
boundary="Apple-Mail=_DD0DBE9A-59E8-455E-B4DD-1317DF1D24F9"
Mime-Version: 1.0
Subject: =*****
Date: Sun, 2 Jan 2022 19:38:33 +0100
References: <****>
<****>
<****>
<****>
To: ***
In-Reply-To: ****
Message-Id: ****
X-Spam-Level: *
X-Spamd-Bar: +
Authentication-Results: *****;
auth=pass smtp.mailfrom=*****
```
| Is your local DNS resolver supporting DNSSEC? If not can you retry with unbound configured (it's an option in setup)?
Is it failing for all outgoing destinations or just one? If it's one, would you mind sharing the target FQDN?
How can I check? I can try unbound, don't have it in the config rn.
Yes, gmail delivery fails and some other providers too.
Thanks for the quick reply!
I am having the same issue:
```
2022-01-04T14:28:48.456222-06:00 cca953dfc6f8 postfix/smtp[428]: warning: TLS policy lookup for gmail.com/gmail-smtp-in.l.google.com: non DNSSEC destination
2022-01-04T14:28:48.456465-06:00 cca953dfc6f8 postfix/smtp[428]: warning: TLS policy lookup for gmail.com/gmail-smtp-in.l.google.com: non DNSSEC destination
2022-01-04T14:28:48.519750-06:00 cca953dfc6f8 postfix/smtp[428]: warning: TLS policy lookup for gmail.com/alt1.gmail-smtp-in.l.google.com: non DNSSEC destination
2022-01-04T14:28:48.519937-06:00 cca953dfc6f8 postfix/smtp[428]: warning: TLS policy lookup for gmail.com/alt1.gmail-smtp-in.l.google.com: non DNSSEC destination
2022-01-04T14:28:48.573257-06:00 cca953dfc6f8 postfix/smtp[428]: warning: TLS policy lookup for gmail.com/alt2.gmail-smtp-in.l.google.com: non DNSSEC destination
2022-01-04T14:28:48.587170-06:00 cca953dfc6f8 postfix/smtp[428]: B66B71C4140: to=<[email protected]>, relay=none, delay=0.94, delays=0.24/0.07/0.63/0, dsn=4.7.5, status=deferred (non DNSSEC destination)
```
Setting `DEFER_ON_TLS_ERROR` in the env seems to have fixed this for now. Although it looks like it "magically" validated DNSSEC so maybe when I added that to the env file and redeployed it fixed something too?
I suspect that this is down to not having access to a DNSSEC validating resolver.
To fix it you need to either:
- enable DNSSEC validation on your current DNS resolver
- enable unbound (so that Mailu uses its own resolver)
- switch to a different DNS resolver (keep in mind that public DNS resolvers such as 8.8.8.8 are usually unsuitable to use with Mailu as they are rate limited on RBLs)
@cupcakearmy would you mind retrying 1.9 with unbound enabled please?
I added
```
resolver:
image: ${DOCKER_ORG:-mailu}/${DOCKER_PREFIX:-}unbound:${MAILU_VERSION:-1.9}
env_file: .env
restart: unless-stopped
networks:
default:
ipv4_address: 192.168.203.254
```
To my compose file and the depends_on in the smtp and antivirus but no luck. I checked the generated config but there seems nothing to be configured there? Am I doing something wront?
You should have the following for all containers except unbound in your docker-compose.yml:
```
depends_on:
- resolver
dns:
- <ip of unbound>
```
And yes, there's nothing to configure | 2022-01-05T17:57:52 |
Mailu/Mailu | 2,150 | Mailu__Mailu-2150 | [
"2146"
] | 94bbd25fe8d8824bf1a56f20dabfbc0e123a055b | diff --git a/core/admin/mailu/internal/views/auth.py b/core/admin/mailu/internal/views/auth.py
--- a/core/admin/mailu/internal/views/auth.py
+++ b/core/admin/mailu/internal/views/auth.py
@@ -18,7 +18,8 @@ def nginx_authentication():
response.headers['Auth-Error-Code'] = '502 5.5.1'
utils.limiter.rate_limit_ip(client_ip)
return response
- if utils.limiter.should_rate_limit_ip(client_ip):
+ is_from_webmail = headers['Auth-Port'] in ['10143', '10025']
+ if not is_from_webmail and utils.limiter.should_rate_limit_ip(client_ip):
status, code = nginx.get_status(flask.request.headers['Auth-Protocol'], 'ratelimit')
response = flask.Response()
response.headers['Auth-Status'] = status
@@ -31,7 +32,6 @@ def nginx_authentication():
for key, value in headers.items():
response.headers[key] = str(value)
is_valid_user = False
- is_from_webmail = headers['Auth-Port'] in ['10143', '10025']
if response.headers.get("Auth-User-Exists"):
username = response.headers["Auth-User"]
if utils.limiter.should_rate_limit_user(username, client_ip):
| fix key error in dictionary
## What type of PR?
(Feature, enhancement, bug-fix, documentation)
## What does this PR do?
### Related issue(s)
- Auto close an issue like: closes #2145
## Prerequisites
Before we can consider review and merge, please make sure the following list is done and checked.
If an entry in not applicable, you can check it or remove it from the list.
- [ ] In case of feature or enhancement: documentation updated accordingly
- [ ] Unless it's docs or a minor change: add [changelog](https://mailu.io/master/contributors/workflow.html#changelog) entry file.
| Thanks for submitting this pull request.
Bors-ng will now build test images. When it succeeds, we will continue to review and test your PR.
bors try
Note: if this build fails, [read this](http://mailu.io/master/contributors/environment.html#when-bors-try-fails).
## try
Build failed:
* [CI-Done](https://github.com/Mailu/Mailu/runs/4729305720?check_suite_focus=true) | 2022-01-07T08:11:27 |
|
Mailu/Mailu | 2,157 | Mailu__Mailu-2157 | [
"2154"
] | 896e7fb54b71c1954b8078811664a256471f946c | diff --git a/core/admin/mailu/__init__.py b/core/admin/mailu/__init__.py
--- a/core/admin/mailu/__init__.py
+++ b/core/admin/mailu/__init__.py
@@ -57,6 +57,7 @@ def inject_defaults():
return dict(
signup_domains= signup_domains,
config = app.config,
+ get_locale = utils.get_locale,
)
# Jinja filters
| Admin User Quota sorting is off
Thank you for opening an issue with Mailu. Please understand that issues are meant for bugs and enhancement-requests.
For **user-support questions**, reach out to us on [matrix](https://matrix.to/#/#mailu:tedomum.net).
To be able to help you best, we need some more information.
## Before you open your issue
- [ x] Check if no issue or pull-request for this already exists.
- [ x] Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)
- [ x] You understand `Mailu` is made by volunteers in their **free time** β be conscise, civil and accept that delays can occur.
- [ x] The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.
## Environment & Versions
### Environment
- [ x] docker-compose
- [ ] kubernetes
- [ ] docker swarm
### Versions
1.9
## Description
When sorting by quota in the Admin interface the numbers are sorted like text instead of by number and bytes.
## Expected behaviour
kB is smaller than MB is smaller than GB

| 2022-01-11T15:52:35 |
||
Mailu/Mailu | 2,158 | Mailu__Mailu-2158 | [
"2155"
] | e3e370018791e17d647e014c6543e5b6ee79a23f | diff --git a/core/admin/mailu/sso/views/base.py b/core/admin/mailu/sso/views/base.py
--- a/core/admin/mailu/sso/views/base.py
+++ b/core/admin/mailu/sso/views/base.py
@@ -30,10 +30,10 @@ def login():
username = form.email.data
if username != device_cookie_username and utils.limiter.should_rate_limit_ip(client_ip):
flask.flash('Too many attempts from your IP (rate-limit)', 'error')
- return flask.render_template('login.html', form=form)
+ return flask.render_template('login.html', form=form, fields=fields)
if utils.limiter.should_rate_limit_user(username, client_ip, device_cookie, device_cookie_username):
flask.flash('Too many attempts for this user (rate-limit)', 'error')
- return flask.render_template('login.html', form=form)
+ return flask.render_template('login.html', form=form, fields=fields)
user = models.User.login(username, form.pw.data)
if user:
flask.session.regenerate()
| Login attempt on roundcube triggers error 500 on /sso/login endpoint (ZeroDivisionError: division by zero)
Hi everybody!
Thanks in advance for your help!
## Before you open your issue
- [x] Check if no issue or pull-request for this already exists.
- [x] Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)
- [x] You understand `Mailu` is made by volunteers in their **free time** β be conscise, civil and accept that delays can occur.
- [x] The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.
## Environment & Versions
### Environment
- [x] docker-compose
- [ ] kubernetes
- [ ] docker swarm
### Versions
1.9
## Description
A user encountered an error 500 while trying to log into an account on roundcube. The 500 comes from a ZeroDivisionError in one of the jinja templates (see logs below).
## Replication Steps
Unfortunately I could not reproduce it so far. Apparently it happened on the first login attempt, even though I suspect that it had to do with rate limiting, since there was a message about rate limiting right before the error (see below). Apparently the number of fields on the sso form is zero?
The user also reports, that it is now working (also when rapidly logging out and logging in again).
## Expected behaviour
No error 500 and succesful login.
## Logs
````markdown
```
admin_1 | [2022-01-10 14:59:49,322] WARNING in limiter: Authentication attempt from <REDACTED IP OF USER> for <REDACTED MAIL ACCOUNT> has been rate-limited.
admin_1 | [2022-01-10 14:59:49,334] ERROR in app: Exception on /sso/login [POST]
admin_1 | Traceback (most recent call last):
admin_1 | File "/usr/lib/python3.9/site-packages/flask/app.py", line 2073, in wsgi_app
admin_1 | response = self.full_dispatch_request()
admin_1 | File "/usr/lib/python3.9/site-packages/flask/app.py", line 1518, in full_dispatch_request
admin_1 | rv = self.handle_user_exception(e)
admin_1 | File "/usr/lib/python3.9/site-packages/flask/app.py", line 1516, in full_dispatch_request
admin_1 | rv = self.dispatch_request()
admin_1 | File "/usr/lib/python3.9/site-packages/flask/app.py", line 1502, in dispatch_request
admin_1 | return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)
admin_1 | File "/app/mailu/sso/views/base.py", line 36, in login
admin_1 | return flask.render_template('login.html', form=form)
admin_1 | File "/usr/lib/python3.9/site-packages/flask/templating.py", line 147, in render_template
admin_1 | return _render(
admin_1 | File "/usr/lib/python3.9/site-packages/flask/templating.py", line 128, in _render
admin_1 | rv = template.render(context)
admin_1 | File "/usr/lib/python3.9/site-packages/jinja2/environment.py", line 1304, in render
admin_1 | self.environment.handle_exception()
admin_1 | File "/usr/lib/python3.9/site-packages/jinja2/environment.py", line 925, in handle_exception
admin_1 | raise rewrite_traceback_stack(source=source)
admin_1 | File "/app/mailu/sso/templates/login.html", line 1, in top-level template code
admin_1 | {%- extends "form_sso.html" %}
admin_1 | File "/app/mailu/sso/templates/form_sso.html", line 1, in top-level template code
admin_1 | {%- extends "base_sso.html" %}
admin_1 | File "/app/mailu/sso/templates/base_sso.html", line 70, in top-level template code
admin_1 | {%- block content %}{%- endblock %}
admin_1 | File "/app/mailu/sso/templates/form_sso.html", line 4, in block 'content'
admin_1 | {%- call macros.card() %}
admin_1 | File "/usr/lib/python3.9/site-packages/jinja2/runtime.py", line 828, in _invoke
admin_1 | rv = self._func(*arguments)
admin_1 | File "/app/mailu/ui/templates/macros.html", line 84, in template
admin_1 | {{- caller() }}
admin_1 | File "/usr/lib/python3.9/site-packages/jinja2/runtime.py", line 828, in _invoke
admin_1 | rv = self._func(*arguments)
admin_1 | File "/app/mailu/sso/templates/form_sso.html", line 8, in template
admin_1 | {{ macros.form_fields(fields, label=False, class="btn btn-default") }}
admin_1 | File "/usr/lib/python3.9/site-packages/jinja2/runtime.py", line 828, in _invoke
admin_1 | rv = self._func(*arguments)
admin_1 | File "/app/mailu/ui/templates/macros.html", line 22, in template
admin_1 | {%- set width = (12 / fields|length)|int %}
admin_1 | ZeroDivisionError: division by zero
```
````
| The stack trace indeed indicates the error comes from ` `` {%- set width = (12 / fields|length)|int %}` ` `
A possible replication scenario could maybe be:
1). Set a low value for AUTH_RATELIMIT_USER or AUTH_RATELIMIT_IP
2). Trigger the rate limiter by failing to login multiple times.
3). Check the logs if we see the division by zero error.
I think I see the problem in the stack trace and source code.
https://github.com/Mailu/Mailu/blob/e3e370018791e17d647e014c6543e5b6ee79a23f/core/admin/mailu/sso/views/base.py#L33
https://github.com/Mailu/Mailu/blob/e3e370018791e17d647e014c6543e5b6ee79a23f/core/admin/mailu/sso/views/base.py#L36
I think these render_template calls miss the fields variable. This causes the fields variable to be empty (0) and results in the division by zero error.
https://github.com/Mailu/Mailu/blob/e3e370018791e17d647e014c6543e5b6ee79a23f/core/admin/mailu/sso/views/base.py#L49 | 2022-01-11T17:54:37 |
|
Mailu/Mailu | 2,177 | Mailu__Mailu-2177 | [
"001"
] | c3af62ab033d706aca6eba57028e58de1a4669ba | diff --git a/core/admin/start.py b/core/admin/start.py
--- a/core/admin/start.py
+++ b/core/admin/start.py
@@ -35,7 +35,7 @@ def test_DNS():
resolver.nameservers=[ns]
while True:
try:
- result = resolver.query('example.org', dns.rdatatype.A, dns.rdataclass.IN, lifetime=10)
+ result = resolver.resolve('example.org', dns.rdatatype.A, dns.rdataclass.IN, lifetime=10)
except Exception as e:
log.critical("Your DNS resolver at %s is not working (%s). Please use another resolver or enable unbound via https://setup.mailu.io.", ns, e);
else:
| Manage user authentication and permissions
Currently no authentication is implemented. Multiple issues will have to be tackled:
- complete permission scheme or simple admin role plus admins per domain?
- how to store user passwords (shared format between Flask-admin and dovecot)?
- how should the initial use be created?
| Fixed by 4576e1f5c2bd8fbdd8c706f435c9e4718dbc384d.
| 2022-01-21T07:48:01 |
|
Mailu/Mailu | 2,207 | Mailu__Mailu-2207 | [
"2186"
] | 855f3b065ba44ff54e17fc0308b16ba928c0e569 | diff --git a/webmails/rainloop/config.py b/webmails/rainloop/config.py
new file mode 100755
--- /dev/null
+++ b/webmails/rainloop/config.py
@@ -0,0 +1,15 @@
+#!/usr/bin/python3
+
+import os
+import logging as log
+import sys
+from socrate import system, conf
+
+args = os.environ.copy()
+
+log.basicConfig(stream=sys.stderr, level=args.get("LOG_LEVEL", "WARNING"))
+
+# Build final configuration paths
+conf.jinja("/config/nginx-rainloop.conf", args, "/etc/nginx/http.d/rainloop.conf")
+if os.path.exists("/var/run/nginx.pid"):
+ os.system("nginx -s reload")
diff --git a/webmails/rainloop/start.py b/webmails/rainloop/start.py
--- a/webmails/rainloop/start.py
+++ b/webmails/rainloop/start.py
@@ -4,6 +4,7 @@
import shutil
import logging as log
import sys
+import subprocess
from socrate import system, conf
log.basicConfig(stream=sys.stderr, level=os.environ.get("LOG_LEVEL", "WARNING"))
@@ -22,8 +23,11 @@
conf.jinja("/defaults/default.ini", os.environ, "/data/_data_/_default_/domains/default.ini")
conf.jinja("/defaults/application.ini", os.environ, "/data/_data_/_default_/configs/application.ini")
conf.jinja("/defaults/php.ini", os.environ, "/etc/php7/php.ini")
+# Start the fastcgi process manager now that config files have been adjusted
+os.system("php-fpm7")
os.system("chown -R nginx:nginx /data")
os.system("chmod -R a+rX /var/www/rainloop/")
+subprocess.call(["/config.py"])
os.execv("/usr/sbin/nginx", ["nginx", "-g", "daemon off;"])
| max attachement size with rainloop is 1mb
## Environment & Versions
### Environment
- [x] docker-compose
- [ ] kubernetes
- [ ] docker swarm
### Versions
1.9
## Description
cannot attach anything larger then 1,048,300 bytes to any email from rainloop
## Replication Steps
attach anything larger then 1,048,300 bytes to any email from rainloop and it will fails with http 413
## Expected behaviour
i can attach any file as large as the MESSAGE_SIZE_LIMIT as defined in (mailu.env - 33%)
## Logs
browser log only: for the xhr POST
````markdown
```
413 Request Entity Too Large
```
````
| Good day please see the comment in this pool request: https://github.com/Mailu/Mailu/pull/2149#issuecomment-1021784532
This explains that something needs to be changed also in the webmail container | 2022-01-31T21:48:56 |
|
Mailu/Mailu | 2,210 | Mailu__Mailu-2210 | [
"1817"
] | 1e54d44b19cc2a646bd347b417fb9d8ae78b6be5 | diff --git a/core/admin/mailu/ui/views/domains.py b/core/admin/mailu/ui/views/domains.py
--- a/core/admin/mailu/ui/views/domains.py
+++ b/core/admin/mailu/ui/views/domains.py
@@ -2,6 +2,7 @@
from mailu.ui import ui, forms, access
from flask import current_app as app
+import validators
import flask
import flask_login
import wtforms_components
@@ -18,18 +19,21 @@ def domain_list():
def domain_create():
form = forms.DomainForm()
if form.validate_on_submit():
- conflicting_domain = models.Domain.query.get(form.name.data)
- conflicting_alternative = models.Alternative.query.get(form.name.data)
- conflicting_relay = models.Relay.query.get(form.name.data)
- if conflicting_domain or conflicting_alternative or conflicting_relay:
- flask.flash('Domain %s is already used' % form.name.data, 'error')
+ if validators.domain(form.name.data):
+ conflicting_domain = models.Domain.query.get(form.name.data)
+ conflicting_alternative = models.Alternative.query.get(form.name.data)
+ conflicting_relay = models.Relay.query.get(form.name.data)
+ if conflicting_domain or conflicting_alternative or conflicting_relay:
+ flask.flash('Domain %s is already used' % form.name.data, 'error')
+ else:
+ domain = models.Domain()
+ form.populate_obj(domain)
+ models.db.session.add(domain)
+ models.db.session.commit()
+ flask.flash('Domain %s created' % domain)
+ return flask.redirect(flask.url_for('.domain_list'))
else:
- domain = models.Domain()
- form.populate_obj(domain)
- models.db.session.add(domain)
- models.db.session.commit()
- flask.flash('Domain %s created' % domain)
- return flask.redirect(flask.url_for('.domain_list'))
+ flask.flash('Domain %s is invalid' % form.name.data, 'error')
return flask.render_template('domain/create.html', form=form)
| Admin interface errors with leading whitespace in domain name of new domain view
Thank you for opening an issue with Mailu. Please understand that issues are meant for bugs and enhancement-requests.
For **user-support questions**, reach out to us on [matrix](https://matrix.to/#/#mailu:tedomum.net).
To be able to help you best, we need some more information.
## Before you open your issue
- [x] Check if no issue or pull-request for this already exists.
- [x] Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)
- [x] You understand `Mailu` is made by volunteers in their **free time** β be conscise, civil and accept that delays can occur.
- [x] The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.
## Environment & Versions
### Environment
- [x] docker-compose
- [ ] kubernetes
- [ ] docker swarm
### Versions
1.7
## Description
The "New Domain" dialog view (`/admin/ui/domain/create`) crashes when the "Domain Name" field contains leading whitespace
## Replication Steps
- Open the Admin Interface
- Navigate to "Mail Domains" using the Sidenav
- Click on "New Domain" in the top right corner
- Write a string with a leading space like "` test`" in the "Domain Name" field
- Click "Save"
- The page crashes
## Expected behaviour
The input field could self validate the input and indicate that its invalid or the input string for domains should get trimmed.
## Logs
````markdown
```
mail-admin | [2021-05-26 15:00:32,624] ERROR in app: Exception on /ui/domain/create [POST]
mail-admin | Traceback (most recent call last):
mail-admin | File "/usr/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1179, in _execute_context
mail-admin | context = constructor(dialect, self, conn, *args)
mail-admin | File "/usr/lib/python3.7/site-packages/sqlalchemy/engine/default.py", line 745, in _init_compiled
mail-admin | for key in compiled_params
mail-admin | File "/usr/lib/python3.7/site-packages/sqlalchemy/engine/default.py", line 745, in <genexpr>
mail-admin | for key in compiled_params
mail-admin | File "/usr/lib/python3.7/site-packages/sqlalchemy/sql/type_api.py", line 1201, in process
mail-admin | return process_param(value, dialect)
mail-admin | File "/app/mailu/models.py", line 30, in process_bind_param
mail-admin | return idna.encode(value).decode("ascii").lower()
mail-admin | File "/usr/lib/python3.7/site-packages/idna/core.py", line 358, in encode
mail-admin | s = alabel(label)
mail-admin | File "/usr/lib/python3.7/site-packages/idna/core.py", line 270, in alabel
mail-admin | ulabel(label)
mail-admin | File "/usr/lib/python3.7/site-packages/idna/core.py", line 304, in ulabel
mail-admin | check_label(label)
mail-admin | File "/usr/lib/python3.7/site-packages/idna/core.py", line 261, in check_label
mail-admin | raise InvalidCodepoint('Codepoint {0} at position {1} of {2} not allowed'.format(_unot(cp_value), pos+1, repr(label)))
mail-admin | idna.core.InvalidCodepoint: Codepoint U+0020 at position 1 of ' test ' not allowed
mail-admin |
mail-admin | The above exception was the direct cause of the following exception:
mail-admin |
mail-admin | Traceback (most recent call last):
mail-admin | File "/usr/lib/python3.7/site-packages/flask/app.py", line 2292, in wsgi_app
mail-admin | response = self.full_dispatch_request()
mail-admin | File "/usr/lib/python3.7/site-packages/flask/app.py", line 1815, in full_dispatch_request
mail-admin | rv = self.handle_user_exception(e)
mail-admin | File "/usr/lib/python3.7/site-packages/flask/app.py", line 1718, in handle_user_exception
mail-admin | reraise(exc_type, exc_value, tb)
mail-admin | File "/usr/lib/python3.7/site-packages/flask/_compat.py", line 35, in reraise
mail-admin | raise value
mail-admin | File "/usr/lib/python3.7/site-packages/flask/app.py", line 1813, in full_dispatch_request
mail-admin | rv = self.dispatch_request()
mail-admin | File "/usr/lib/python3.7/site-packages/flask/app.py", line 1799, in dispatch_request
mail-admin | return self.view_functions[rule.endpoint](**req.view_args)
mail-admin | File "/usr/lib/python3.7/site-packages/flask_login/utils.py", line 261, in decorated_view
mail-admin | return func(*args, **kwargs)
mail-admin | File "/app/mailu/ui/access.py", line 36, in wrapper
mail-admin | return callback(function, args, kwargs, (), {})
mail-admin | File "/app/mailu/ui/access.py", line 19, in callback
mail-admin | return function(*args, **kwargs)
mail-admin | File "/app/mailu/ui/views/domains.py", line 22, in domain_create
mail-admin | conflicting_domain = models.Domain.query.get(form.name.data)
mail-admin | File "/usr/lib/python3.7/site-packages/sqlalchemy/orm/query.py", line 959, in get
mail-admin | return self._get_impl(ident, loading.load_on_pk_identity)
mail-admin | File "/usr/lib/python3.7/site-packages/sqlalchemy/orm/query.py", line 1069, in _get_impl
mail-admin | return db_load_fn(self, primary_key_identity)
mail-admin | File "/usr/lib/python3.7/site-packages/sqlalchemy/orm/loading.py", line 282, in load_on_pk_identity
mail-admin | return q.one()
mail-admin | File "/usr/lib/python3.7/site-packages/sqlalchemy/orm/query.py", line 3275, in one
mail-admin | ret = self.one_or_none()
mail-admin | File "/usr/lib/python3.7/site-packages/sqlalchemy/orm/query.py", line 3244, in one_or_none
mail-admin | ret = list(self)
mail-admin | File "/usr/lib/python3.7/site-packages/sqlalchemy/orm/query.py", line 3317, in __iter__
mail-admin | return self._execute_and_instances(context)
mail-admin | File "/usr/lib/python3.7/site-packages/sqlalchemy/orm/query.py", line 3342, in _execute_and_instances
mail-admin | result = conn.execute(querycontext.statement, self._params)
mail-admin | File "/usr/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 988, in execute
mail-admin | return meth(self, multiparams, params)
mail-admin | File "/usr/lib/python3.7/site-packages/sqlalchemy/sql/elements.py", line 287, in _execute_on_connection
mail-admin | return connection._execute_clauseelement(self, multiparams, params)
mail-admin | File "/usr/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1107, in _execute_clauseelement
mail-admin | distilled_params,
mail-admin | File "/usr/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1182, in _execute_context
mail-admin | e, util.text_type(statement), parameters, None, None
mail-admin | File "/usr/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1466, in _handle_dbapi_exception
mail-admin | util.raise_from_cause(sqlalchemy_exception, exc_info)
mail-admin | File "/usr/lib/python3.7/site-packages/sqlalchemy/util/compat.py", line 383, in raise_from_cause
mail-admin | reraise(type(exception), exception, tb=exc_tb, cause=cause)
mail-admin | File "/usr/lib/python3.7/site-packages/sqlalchemy/util/compat.py", line 128, in reraise
mail-admin | raise value.with_traceback(tb)
mail-admin | File "/usr/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1179, in _execute_context
mail-admin | context = constructor(dialect, self, conn, *args)
mail-admin | File "/usr/lib/python3.7/site-packages/sqlalchemy/engine/default.py", line 745, in _init_compiled
mail-admin | for key in compiled_params
mail-admin | File "/usr/lib/python3.7/site-packages/sqlalchemy/engine/default.py", line 745, in <genexpr>
mail-admin | for key in compiled_params
mail-admin | File "/usr/lib/python3.7/site-packages/sqlalchemy/sql/type_api.py", line 1201, in process
mail-admin | return process_param(value, dialect)
mail-admin | File "/app/mailu/models.py", line 30, in process_bind_param
mail-admin | return idna.encode(value).decode("ascii").lower()
mail-admin | File "/usr/lib/python3.7/site-packages/idna/core.py", line 358, in encode
mail-admin | s = alabel(label)
mail-admin | File "/usr/lib/python3.7/site-packages/idna/core.py", line 270, in alabel
mail-admin | ulabel(label)
mail-admin | File "/usr/lib/python3.7/site-packages/idna/core.py", line 304, in ulabel
mail-admin | check_label(label)
mail-admin | File "/usr/lib/python3.7/site-packages/idna/core.py", line 261, in check_label
mail-admin | raise InvalidCodepoint('Codepoint {0} at position {1} of {2} not allowed'.format(_unot(cp_value), pos+1, repr(label)))
mail-admin | sqlalchemy.exc.StatementError: (idna.core.InvalidCodepoint) Codepoint U+0020 at position 1 of ' test ' not allowed
mail-admin | [SQL: SELECT domain.created_at AS domain_created_at, domain.updated_at AS domain_updated_at, domain.comment AS domain_comment, domain.name AS domain_name, domain.max_users AS domain_max_users, domain.max_aliases AS domain_max_aliases, domain.max_quota_bytes AS domain_max_quota_bytes, domain.signup_enabled AS domain_signup_enabled
mail-admin | FROM domain
mail-admin | WHERE domain.name = %(param_1)s]
mail-admin | [parameters: [{'%(140672597756432 param)s': ' test '}]]
```
````
| Thank you for reporting this bug. You would indeed expect the input to be validated or the whitespace to be stripped. | 2022-02-01T12:09:53 |
|
Mailu/Mailu | 2,253 | Mailu__Mailu-2253 | [
"2239"
] | 9a99f4253b8871f85e5e06fc1eb986d15f1e1887 | diff --git a/core/admin/mailu/utils.py b/core/admin/mailu/utils.py
--- a/core/admin/mailu/utils.py
+++ b/core/admin/mailu/utils.py
@@ -47,7 +47,7 @@ def handle_needs_login():
# DNS stub configured to do DNSSEC enabled queries
resolver = dns.resolver.Resolver()
-resolver.use_edns(0, 0, 1232)
+resolver.use_edns(0, dns.flags.DO, 1232)
resolver.flags = dns.flags.AD | dns.flags.RD
def has_dane_record(domain, timeout=10):
@@ -56,7 +56,6 @@ def has_dane_record(domain, timeout=10):
if result.response.flags & dns.flags.AD:
for record in result:
if isinstance(record, dns.rdtypes.ANY.TLSA.TLSA):
- record.validate()
if record.usage in [2,3] and record.selector in [0,1] and record.mtype in [0,1,2]:
return True
except dns.resolver.NoNameservers:
diff --git a/core/admin/start.py b/core/admin/start.py
--- a/core/admin/start.py
+++ b/core/admin/start.py
@@ -28,7 +28,7 @@ def test_DNS():
import time
# DNS stub configured to do DNSSEC enabled queries
resolver = dns.resolver.Resolver()
- resolver.use_edns(0, 0, 1232)
+ resolver.use_edns(0, dns.flags.DO, 1232)
resolver.flags = dns.flags.AD | dns.flags.RD
nameservers = resolver.nameservers
for ns in nameservers:
@@ -37,11 +37,11 @@ def test_DNS():
try:
result = resolver.resolve('example.org', dns.rdatatype.A, dns.rdataclass.IN, lifetime=10)
except Exception as e:
- log.critical("Your DNS resolver at %s is not working (%s). Please use another resolver or enable unbound via https://setup.mailu.io.", ns, e);
+ log.critical("Your DNS resolver at %s is not working (%s). Please see https://mailu.io/master/faq.html#the-admin-container-won-t-start-and-its-log-says-critical-your-dns-resolver-isn-t-doing-dnssec-validation", ns, e);
else:
if result.response.flags & dns.flags.AD:
break
- log.critical("Your DNS resolver at %s isn't doing DNSSEC validation; Please use another resolver or enable unbound via https://setup.mailu.io.", ns)
+ log.critical("Your DNS resolver at %s isn't doing DNSSEC validation; Please see https://mailu.io/master/faq.html#the-admin-container-won-t-start-and-its-log-says-critical-your-dns-resolver-isn-t-doing-dnssec-validation.", ns)
time.sleep(5)
test_DNS()
| helm/k8s deployments seem to have problems with DNS resolution; Mailu expects validated dnssec records
## Description
As the default k8s dns setup is not working correctly with dnssec, we will (like the docker setup) use unbound as resolver, but to allow us to continue using the internal k8s-dns system, we need a dns forwarder setting.
## Replication Steps
Setup mailu with mailu helm chart in k8s
I will provide a suitable fix myself.
| This is related to #2163
We have talked about it on #mailu-dev recently. My preferred option would be to do the opposite: configure k8s' resolver to use unbound (that would run as a service in a pod) as upstream DNS resolver. The pro of doing it that way is to ensure cluster stability if the unbound pod is having problems...
https://kubernetes.io/docs/tasks/administer-cluster/dns-custom-nameservers/#coredns-configuration-equivalent-to-kube-dns explains how to configure/change the upstream for the two common cases (coredns or kube-dns). Maybe all we need is some documentation in the FAQ?
@devfaz which DNS resolver is your k8s deployment configured to use? what is its upstream?
Can you run ``dig @<ip> +adflag example.org`` on the various resolvers and track down where you are loosing the AD flag please?
@nextgens did some tests, the adflag got lost asking the dns on my router (openwrt / dnsmasq). I replaced the upstream with google-dns and now im getting the adflag also in k8s.
So, maybe we should change the WARN-msg in admin-pod to include an link to some docs about "how to change your k8s-dns-server to one allowing successfully do dnssec"
Yes, I do believe that it's a documentation problem more than anything else.
For dnsmasq you can make it perform the validation https://wiki.gentoo.org/wiki/Dnsmasq/en#DNSSEC or configure it to pass the validation of upstream servers through.
I'd accept and backport a PR that adds a FAQ entry in the documentation and changes the message to point to it (https://mailu.io/1.9/faq.html#technical-issues)
Thanks for the link, but in openwrt its just a tick in a checkbox ;)
I will try to provide the requested PR.
@nextgens so after undoing my unbound change on my cluster, im getting again the admin-pod-crit:
`
CRITICAL:root:Your DNS resolver at 192.168.102.10 isn't doing DNSSEC validation; Please use another resolver or enable unbound via https://setup.mailu.io.
`
Background infos:
- 192.168.101.1 => dnsmasq on openwrt (dnssec enabled)
- 192.168.102.10 => kube-dns svc ip
- commands executed on debian-pod running in the same namespace.
`
# ask kube-dns for ad-flag => bad
root@bash:/# dig @192.168.102.10 +adflag example.org
; <<>> DiG 9.16.22-Debian <<>> @192.168.102.10 +adflag example.org
; (1 server found)
;; global options: +cmd
;; Got answer:
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 28083
;; flags: qr aa rd ra; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 1
;; OPT PSEUDOSECTION:
; EDNS: version: 0, flags:; udp: 4096
; COOKIE: ac945c2f55e83efc (echoed)
;; QUESTION SECTION:
;example.org. IN A
;; ANSWER SECTION:
example.org. 27 IN A 93.184.216.34
;; Query time: 4 msec
;; SERVER: 192.168.102.10#53(192.168.102.10)
;; WHEN: Tue Feb 15 07:50:49 UTC 2022
;; MSG SIZE rcvd: 79
# ask openwrt-dns for ad-flag => good
root@bash:/# dig @192.168.101.1 +adflag example.org
; <<>> DiG 9.16.22-Debian <<>> @192.168.101.1 +adflag example.org
; (1 server found)
;; global options: +cmd
;; Got answer:
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 57895
;; flags: qr rd ra ad; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 1
;; OPT PSEUDOSECTION:
; EDNS: version: 0, flags:; udp: 4096
;; QUESTION SECTION:
;example.org. IN A
;; ANSWER SECTION:
example.org. 17625 IN A 93.184.216.34
;; Query time: 4 msec
;; SERVER: 192.168.101.1#53(192.168.101.1)
;; WHEN: Tue Feb 15 07:50:58 UTC 2022
;; MSG SIZE rcvd: 56
# ask kube-dns for dnssec => good.
root@bash:/# dig @192.168.102.10 +dnssec example.org
; <<>> DiG 9.16.22-Debian <<>> @192.168.102.10 +dnssec example.org
; (1 server found)
;; global options: +cmd
;; Got answer:
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 10926
;; flags: qr aa rd ra ad; QUERY: 1, ANSWER: 2, AUTHORITY: 0, ADDITIONAL: 1
;; OPT PSEUDOSECTION:
; EDNS: version: 0, flags: do; udp: 4096
; COOKIE: 1388011d98e57af4 (echoed)
;; QUESTION SECTION:
;example.org. IN A
;; ANSWER SECTION:
example.org. 6 IN RRSIG A 8 2 86400 20220222143712 20220201175836 14665 example.org. gVEFxer1XAPl/NbmyRRDWI3eOge/UVXVZ5BN4/62HDwn1NVAtZybjy70 T42FN6nvt9ARWwTWZBs+DF7+ohE66NQNiKYefXapMNdbJS4N6NbpiSn9 ZlkdUQzR/JR2onNvHeUclIYVL9spYJszECTPV5erz7H1g9+k71JJX+7O WFc=
example.org. 6 IN A 93.184.216.34
;; Query time: 0 msec
;; SERVER: 192.168.102.10#53(192.168.102.10)
;; WHEN: Tue Feb 15 07:51:10 UTC 2022
;; MSG SIZE rcvd: 261
# ask openwrt-dns for dnssec-flag => good
root@bash:/# dig @192.168.101.1 +dnssec example.org
; <<>> DiG 9.16.22-Debian <<>> @192.168.101.1 +dnssec example.org
; (1 server found)
;; global options: +cmd
;; Got answer:
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 49257
;; flags: qr rd ra ad; QUERY: 1, ANSWER: 2, AUTHORITY: 0, ADDITIONAL: 1
;; OPT PSEUDOSECTION:
; EDNS: version: 0, flags: do; udp: 512
;; QUESTION SECTION:
;example.org. IN A
;; ANSWER SECTION:
example.org. 17457 IN A 93.184.216.34
example.org. 17457 IN RRSIG A 8 2 86400 20220222143712 20220201175836 14665 example.org. gVEFxer1XAPl/NbmyRRDWI3eOge/UVXVZ5BN4/62HDwn1NVAtZybjy70 T42FN6nvt9ARWwTWZBs+DF7+ohE66NQNiKYefXapMNdbJS4N6NbpiSn9 ZlkdUQzR/JR2onNvHeUclIYVL9spYJszECTPV5erz7H1g9+k71JJX+7O WFc=
;; Query time: 4 msec
;; SERVER: 192.168.101.1#53(192.168.101.1)
;; WHEN: Tue Feb 15 07:53:46 UTC 2022
;; MSG SIZE rcvd: 227
`
so seems CoreDNS is still having issues with the ad-flag, but works if you request +DNSSEC. That just FYI - i will try to investigate later.
Seems CoreDNS changed AD-flag behavior in 1.8.5 (im using k8s 1.23.3 which includes CoreDNS 1.8.6): https://coredns.io/2021/09/10/coredns-1.8.5-release/ => https://github.com/coredns/coredns/pull/4736.
I've replied on that PR (probably the wrong place tbh).
coredns ignores https://datatracker.ietf.org/doc/html/rfc6840#section-5.8 ... which makes perfectly clear that DO or AD in the query should be treated the same. This is a coredns bug.
> I've replied on that PR (probably the wrong place tbh).
>
> coredns ignores https://datatracker.ietf.org/doc/html/rfc6840#section-5.8 ... which makes perfectly clear that DO or AD in the query should be treated the same. This is a coredns bug.
Yes, you are right - but how can we ensure mailu is working on k8s even with this bug?
I see 2 ways:
1. Apply the workaround to the start.py and hope the real tools are also setting the DO-flag.
2. Give users an easy way to workaround coreDNS on k8s completly till the bug is fixed. (unbound resolver ;) )
> Yes, you are right - but how can we ensure mailu is working on k8s even with this bug?
You can test it out with something like https://havedane.net/ or try to find out what postfix actually does (share a packet capture)
@micw do you want to shime in?
FYI: seems upstream (coreDNS) is fixing this quickly, so im just doing nothing and waiting for a new coreDNS version. | 2022-02-19T12:38:45 |
|
Mailu/Mailu | 2,254 | Mailu__Mailu-2254 | [
"2138"
] | 9a99f4253b8871f85e5e06fc1eb986d15f1e1887 | diff --git a/core/admin/mailu/models.py b/core/admin/mailu/models.py
--- a/core/admin/mailu/models.py
+++ b/core/admin/mailu/models.py
@@ -276,7 +276,7 @@ def dns_tlsa(self):
hostname = app.config['HOSTNAME']
if app.config['TLS_FLAVOR'] in ('letsencrypt', 'mail-letsencrypt'):
# current ISRG Root X1 (RSA 4096, O = Internet Security Research Group, CN = ISRG Root X1) @20210902
- return f'_25._tcp.{hostname}. 86400 IN TLSA 2 1 0 30820222300d06092a864886f70d01010105000382020f003082020a0282020100ade82473f41437f39b9e2b57281c87bedcb7df38908c6e3ce657a078f775c2a2fef56a6ef6004f28dbde68866c4493b6b163fd14126bbf1fd2ea319b217ed1333cba48f5dd79dfb3b8ff12f1219a4bc18a8671694a66666c8f7e3c70bfad292206f3e4c0e680aee24b8fb7997e94039fd347977c99482353e838ae4f0a6f832ed149578c8074b6da2fd0388d7b0370211b75f2303cfa8faeddda63abeb164fc28e114b7ecf0be8ffb5772ef4b27b4ae04c12250c708d0329a0e15324ec13d9ee19bf10b34a8c3f89a36151deac870794f46371ec2ee26f5b9881e1895c34796c76ef3b906279e6dba49a2f26c5d010e10eded9108e16fbb7f7a8f7c7e50207988f360895e7e237960d36759efb0e72b11d9bbc03f94905d881dd05b42ad641e9ac0176950a0fd8dfd5bd121f352f28176cd298c1a80964776e4737baceac595e689d7f72d689c50641293e593edd26f524c911a75aa34c401f46a199b5a73a516e863b9e7d72a712057859ed3e5178150b038f8dd02f05b23e7b4a1c4b730512fcc6eae050137c439374b3ca74e78e1f0108d030d45b7136b407bac130305c48b7823b98a67d608aa2a32982ccbabd83041ba2830341a1d605f11bc2b6f0a87c863b46a8482a88dc769a76bf1f6aa53d198feb38f364dec82b0d0a28fff7dbe21542d422d0275de179fe18e77088ad4ee6d98b3ac6dd27516effbc64f533434f0203010001'
+ return f'_25._tcp.{hostname}. 86400 IN TLSA 2 1 1 0b9fa5a59eed715c26c1020c711b4f6ec42d58b0015e14337a39dad301c5afc3'
@property
def dkim_key(self):
diff --git a/core/nginx/config.py b/core/nginx/config.py
--- a/core/nginx/config.py
+++ b/core/nginx/config.py
@@ -34,6 +34,22 @@
"notls": None
}[args["TLS_FLAVOR"]]
+def format_for_nginx(fullchain, output):
+ """ We may want to strip ISRG Root X1 out """
+ if not os.path.exists(fullchain):
+ return
+ split = '-----END CERTIFICATE-----\n'
+ with open(fullchain, 'r') as pem:
+ certs = [f'{cert}{split}' for cert in pem.read().split(split) if cert]
+ if len(certs)>2 and os.getenv('LETSENCRYPT_SHORTCHAIN'):
+ del certs[-1]
+ with open(output, 'w') as pem:
+ pem.write(''.join(certs))
+
+if args['TLS_FLAVOR'] in ['letsencrypt', 'mail-letsencrypt']:
+ format_for_nginx('/certs/letsencrypt/live/mailu/fullchain.pem', '/certs/letsencrypt/live/mailu/nginx-chain.pem')
+ format_for_nginx('/certs/letsencrypt/live/mailu-ecdsa/fullchain.pem', '/certs/letsencrypt/live/mailu-ecdsa/nginx-chain.pem')
+
if args["TLS"] and not all(os.path.exists(file_path) for file_path in args["TLS"]):
print("Missing cert or key file, disabling TLS")
args["TLS_ERROR"] = "yes"
diff --git a/core/nginx/letsencrypt.py b/core/nginx/letsencrypt.py
--- a/core/nginx/letsencrypt.py
+++ b/core/nginx/letsencrypt.py
@@ -32,28 +32,11 @@
"--post-hook", "/config.py"
]
-def format_for_nginx(fullchain, output):
- """ We may want to strip ISRG Root X1 out
- """
- certs = []
- with open(fullchain, 'r') as pem:
- cert = ''
- for line in pem:
- cert += line
- if '-----END CERTIFICATE-----' in line:
- certs += [cert]
- cert = ''
- with open(output, 'w') as pem:
- for cert in certs[:-1] if len(certs)>2 and os.getenv('LETSENCRYPT_SHORTCHAIN', default="False") else certs:
- pem.write(cert)
-
# Wait for nginx to start
time.sleep(5)
# Run certbot every day
while True:
subprocess.call(command)
- format_for_nginx('/certs/letsencrypt/live/mailu/fullchain.pem', '/certs/letsencrypt/live/mailu/nginx-chain.pem')
subprocess.call(command2)
- format_for_nginx('/certs/letsencrypt/live/mailu-ecdsa/fullchain.pem', '/certs/letsencrypt/live/mailu-ecdsa/nginx-chain.pem')
time.sleep(86400)
| Root CA is not included in certificate chain for DANE validation
Thank you for opening an issue with Mailu. Please understand that issues are meant for bugs and enhancement-requests.
For **user-support questions**, reach out to us on [matrix](https://matrix.to/#/#mailu:tedomum.net).
To be able to help you best, we need some more information.
## Before you open your issue
- [X] Check if no issue or pull-request for this already exists.
- [X] Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)
- [X] You understand `Mailu` is made by volunteers in their **free time** β be conscise, civil and accept that delays can occur.
- [X] The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.
## Environment & Versions
### Environment
- [X] docker-compose
- [ ] kubernetes
- [ ] docker swarm
### Versions
1.9
## Description
I just upgraded to 1.9 and am trying to integrate DANE validation. I got the following DANE Survey Notice:
"Your TLSA record designates a root CA hash, but, as is common, the root CA is not included in your certificate chain. It would need to be included to work with DANE-TA(2), but simpler to use an intermediate CA hash instead."
## Replication Steps
I set a DNS TLSA record as recommended, with 2 1 1 designation. I have DNSSEC activated for the relevant domain. Certificates are issued through Let's Encrypt.
## Expected behaviour
Full validation of the certificate chain.
## Logs
| Doh. Good catch
@vrjula would you mind trying with what's in [#2140](https://github.com/Mailu/Mailu/pull/2140/files) please?
I swapped my records to the generated "2 1 0" from yesterday's release and today I get the following message from the survey ([https://stats.dnssec-tools.org/](https://stats.dnssec-tools.org/)):
> Not all DANE implementations support "2 1 0" TLSA records
with public keys of root CAs not included in the certificate
chain. See <https://www.rfc-editor.org/rfc/rfc7671.html#section-5.2.2>
>
> For Let's Encrypt, publish the recommended "2 1 1" records instead:
<https://dnssec-stats.ant.isi.edu/~viktor/x3hosts.html>
Grrr, what a mess.
If we pin the intermediaries we need 4 records and they would be valid for at most 3 years... I would rather we stick with what we have or consider more sensible alternatives (like creating a new CA locally -with a lifetime expressed in decades- that we would cross-sign with just for DANE enabled setups)
@csutcliff can you try with this please? If it works we could switch to that instead
```
_443._tcp.mail.example.com IN TLSA 2 0 0 3082056b30820353a0030201020211008210cfb0d240e3594463e0bb63828b00300d06092a864886f70d01010b0500304f310b300906035504061302555331293027060355040a1320496e7465726e65742053656375726974792052657365617263682047726f7570311530130603550403130c4953524720526f6f74205831301e170d3135303630343131303433385a170d3335303630343131303433385a304f310b300906035504061302555331293027060355040a1320496e7465726e65742053656375726974792052657365617263682047726f7570311530130603550403130c4953524720526f6f7420583130820222300d06092a864886f70d01010105000382020f003082020a0282020100ade82473f41437f39b9e2b57281c87bedcb7df38908c6e3ce657a078f775c2a2fef56a6ef6004f28dbde68866c4493b6b163fd14126bbf1fd2ea319b217ed1333cba48f5dd79dfb3b8ff12f1219a4bc18a8671694a66666c8f7e3c70bfad292206f3e4c0e680aee24b8fb7997e94039fd347977c99482353e838ae4f0a6f832ed149578c8074b6da2fd0388d7b0370211b75f2303cfa8faeddda63abeb164fc28e114b7ecf0be8ffb5772ef4b27b4ae04c12250c708d0329a0e15324ec13d9ee19bf10b34a8c3f89a36151deac870794f46371ec2ee26f5b9881e1895c34796c76ef3b906279e6dba49a2f26c5d010e10eded9108e16fbb7f7a8f7c7e50207988f360895e7e237960d36759efb0e72b11d9bbc03f94905d881dd05b42ad641e9ac0176950a0fd8dfd5bd121f352f28176cd298c1a80964776e4737baceac595e689d7f72d689c50641293e593edd26f524c911a75aa34c401f46a199b5a73a516e863b9e7d72a712057859ed3e5178150b038f8dd02f05b23e7b4a1c4b730512fcc6eae050137c439374b3ca74e78e1f0108d030d45b7136b407bac130305c48b7823b98a67d608aa2a32982ccbabd83041ba2830341a1d605f11bc2b6f0a87c863b46a8482a88dc769a76bf1f6aa53d198feb38f364dec82b0d0a28fff7dbe21542d422d0275de179fe18e77088ad4ee6d98b3ac6dd27516effbc64f533434f0203010001a3423040300e0603551d0f0101ff040403020106300f0603551d130101ff040530030101ff301d0603551d0e0416041479b459e67bb6e5e40173800888c81a58f6e99b6e300d06092a864886f70d01010b05000382020100551f58a9bcb2a850d00cb1d81a6920272908ac61755c8a6ef882e5692fd5f6564bb9b8731059d321977ee74c71fbb2d260ad39a80bea17215685f1500e59ebcee059e9bac915ef869d8f8480f6e4e99190dc179b621b45f06695d27c6fc2ea3bef1fcfcbd6ae27f1a9b0c8aefd7d7e9afa2204ebffd97fea912b22b1170e8ff28a345b58d8fc01c954b9b826cc8a8833894c2d843c82dfee965705ba2cbbf7c4b7c74e3b82be31c822737392d1c280a43939103323824c3c9f86b255981dbe29868c229b9ee26b3b573a82704ddc09c789cb0a074d6ce85d8ec9efceabc7bbb52b4e45d64ad026cce572ca086aa595e315a1f7a4edc92c5fa5fbffac28022ebed77bbbe3717b9016d3075e46537c3707428cd3c4969cd599b52ae0951a8048ae4c3907cecc47a452952bbab8fbadd233537de51d4d6dd5a1b1c7426fe64027355ca328b7078de78d3390e7239ffb509c796c46d5b415b3966e7e9b0c963ab8522d3fd65be1fb08c284fe24a8a389daac6ae1182ab1a843615bd31fdc3b8d76f22de88d75df17336c3d53fb7bcb415fffdca2d06138e196b8ac5d8b37d775d533c09911ae9d41c1727584be0241425f67244894d19b27be073fb9b84f817451e17ab7ed9d23e2bee0d52804133c31039edd7a6c8fc60718c67fde478e3f289e0406cfa5543477bdec899be91743df5bdb5ffe8e1e57a2cd409d7e6222dade1827
```
I'm afraid that I can't as it is rejected by my DNS provider (Cloudflare)
> The content length is too large. (Code: 81041)
After the first swap, my DANE validation is now passing:

My favourite option has now become to cross-sign the LE certificate with a CA we would generate locally specifically for DANE. It would be small (ECDSA), could have a very long lifetime and could be used to cross-sign non-LE certs too
But since this is a much bigger piece of work, I'm not sure it would be wise to backport it to 1.9; we'll discuss it at the next meeting I guess. CC @Diman0
> After the first swap, my DANE validation is now passing: 
Mine also passes that test with the "2 1 0" record but the dnssec-tools.org email still suggests that it will fail with some implementations
> My favourite option has now become to cross-sign the LE certificate with a CA we would generate locally specifically for DANE. It would be small (ECDSA), could have a very long lifetime and could be used to cross-sign non-LE certs too
>
> But since this is a much bigger piece of work, I'm not sure it would be wise to backport it to 1.9; we'll discuss it at the next meeting I guess. CC @Diman0
That sounds like a good solution for a future version. In the mean time I've added the 4 records suggested for letsencrypt by https://dnssec-stats.ant.isi.edu/~viktor/x3hosts.html which should last a few years!
> Grrr, what a mess.
>
> If we pin the intermediaries we need 4 records and they would be valid for at most 3 years... I would rather we stick with what we have or consider more sensible alternatives (like creating a new CA locally -with a lifetime expressed in decades- that we would cross-sign with just for DANE enabled setups)
Well, strictly speaking you don't need all four. One is enough if you delay certificate rollover until the new CA hash has been published (along with the current) for a few TTLs.
This requires a more sophisticated rollover process, but that is needed in any case, since, as you note, the four hashes in question are not forever.
DANE requires monitoring, and robust automation that now and then needs to actually update the TLSA records, and wait for remote caches (and secondary servers) to catch up.
My advice would be to not use "2 X X" TLSA records at all, or at least not for low security "DV" CAs like Let's Encrypt. You're far better off with "3 1 1" (which is the approach used by <https://mailinabox.email>).
* The future key is generated, and a matching TLSA record is added to the RRset.
* A delay of a few TTLs/days expires
* A new certificate (perhaps from Let's Encrypt to satisfy non-DANE clients) is deployed with that key.
* The TLSA RR matching the old key is purged.
One way to handle this is to always generate the future key as soon as the new cert is deployed, that way you always have two TLSA RRs (current + next):
https://mail.sys4.de/pipermail/dane-users/2018-February/000440.html
When publishing the next key, drop the now stale previous key at the same time.
Or perhaps four "3 1 1" records if deploying both an RSA and ECDSA cert (not common):
https://mail.sys4.de/pipermail/dane-users/2017-August/000416.html
Finally, I very much DO NOT recommend the DANE TLSA *matching type* `Full(0)`, which places the entire certificate or public key in the TLSA record payload, at least not with RSA keys, which are quite large. This leads to rather hefty DNS UDP payloads, hindering interoperability and increasing response amplification.
Please stick with `SHA2-256(1)` hashes, and let the certs be conveyed over TCP in the TLS handshake. Thus either "3 1 1" or "2 1 1", and avoid all the others.
Below are the survey stats for distinct TLSA RR parameters by base domain:
```
count | usage | selector | mtype
-------+-------+----------+-------
16143 | 3 | 1 | 1
1473 | 2 | 1 | 1
1170 | 3 | 0 | 1
415 | 3 | 1 | 2
206 | 2 | 0 | 1
176 | 3 | 0 | 2
70 | 2 | 1 | 2
24 | 2 | 0 | 2
21 | 2 | 1 | 0 <-----
13 | 1 | 1 | 1
11 | 2 | 0 | 0
10 | 3 | 1 | 0
6 | 0 | 0 | 0
5 | 0 | 1 | 1
4 | 1 | 1 | 2
4 | 0 | 1 | 2
4 | 0 | 0 | 1
4 | 1 | 0 | 1
1 | 1 | 0 | 2
1 | 0 | 0 | 2
1 | 3 | 3 | 3
```
As you can see, "2 1 0" is not at all popular...
> This requires a more sophisticated rollover process, but that is needed in any case, since, as you note, the four hashes in question are not forever.
>
That's why pinning the root CA makes sense; pinning something that expire in decades allows to ignore the rollover problem (for now).
> My advice would be to not use "2 X X" TLSA records at all, or at least not for low security "DV" CAs like Let's Encrypt. You're far better off with "3 1 1" (which is the approach used by https://mailinabox.email).
>
Doing so would mean keeping the same key material and using larger key sizes... We could.
I've changed it from 4096bit RSA to 2048bit RSA to optimize handshake sizes... This obviously wouldn't be compatible with keeping the key material for decades.
> One way to handle this is to always generate the future key as soon as the new cert is deployed, that way you always have two TLSA RRs (current + next):
>
Security-wise this doesn't make much sense; we are not in a scenario where the key material could be kept offline
> Or perhaps four "3 1 1" records if deploying both an RSA and ECDSA cert (not common):
We're already using two types of certs.
You should *not* be keeping any keys live for decades. Since rollover should ultimately happen, far better to do it often and well, than to defer it to some unspecified future date.
Use "3 1 1" keys, rolled every 90 days or so, with prior TLSA updates. You can then use Let's Encrypt, without actually trusting it for DANE, since its domain control challenges are quite weak.
Yes, you can use a "2 1 1" RR matching a local CA (included in the server chain file and sent with the TLS handshake), but that only makes sense for large mail server clusters with a centrally managed PKI. That is, for Google's `mx[1-4].smtp.goog`, but not for SOHO/vanity domain mail servers.
Thank you for taking the time to reply.
We've decided not long ago that maintaining an authoritative DNS server on behalf of our users was out of scope for the project... and so is programmatically managing their DNS records (we were looking at https://github.com/AnalogJ/lexicon/).
Based on the above, I see the following solutions:
1) ~~stick to 210 pinned to ISRG X1 and assume the fact that we don't go for the popular choice.~~ This isn't an option, see below
2) stop encouraging our users to deploy DANE/TLSA records; those that want them can of course still do it but at least we won't be advocating for "known problematic/exotic" configuration choices
3) use 311 records, stop changing the key material on certificate re-issuance; The rationale being that the security margin offered by a 4096 bit RSA certificate is likely good enough for the next decade (https://www.keylength.com/en/3/). We could warn about certificate age and let the user manage the rollover on his own time
4) use 211 records pinned to the intermediaries (E1 E2 R3 R4 today); We know when they are due to expire so we could display a big fat red warning in advance of their expiry... but it will be a recurring headache with a 3y periodicity at best.
5) use 211 records pinned to ISRG X1 (revert to the state before this ticket), and send ISRG X1 in the handshake (4096 extra bits! we were not doing that but we could accept that cost).
6) use 211 matching a local CA that we would send in the handshake (manage our own local PKI). The advantages over the previous solution being: 1) we have agility in terms of which CA is used and we could support user-managed certificates (and make DANE available to more users) 2) the CA would likely be using ECC and its certificate would be smaller than ISRG X1
This issue is NOT resolved by #2140, and needs to be reopened. The change in https://github.com/Mailu/Mailu/blob/master/core/admin/mailu/models.py#L273-L279
puts the root CA's *key* (not full certificate) in the TLSA record.
Use of bare "2 1 0" keys (without the corresponding cert in the chain, making the full key in DNS redundant, and "2 1 1" a better choice) violates [RFC7671 Section 5.2.3](https://datatracker.ietf.org/doc/html/rfc7671#section-5.2.3):
```
Thus, servers cannot rely on "DANE-TA(2) SPKI(1) Full(0)" TLSA
records to be sufficient to authenticate chains issued by the
associated public key in the absence of a corresponding certificate
in the server's TLS certificate message. Servers employing "2 1 0"
TLSA records MUST include the corresponding TA certificate in their
certificate chain.
```
> We've decided not long ago that maintaining an authoritative DNS server on behalf of our users was out of scope for the project... and so is programmatically managing their DNS records (we were looking at https://github.com/AnalogJ/lexicon/).
So, you've decided to not attempt to automate DNS TLSA record updates, with DANE supported only via long-term static data... [ If this is wrong, please clarify ]
> Based on the above, I see the following solutions:
>
> 1. stick to 210 pinned to ISRG X1 and assume the fact that we don't go for the popular choice.
Popularity isn't the problem, it is *interoperability*, I don't expect Microsoft's or Cisco's (or pick some other random implementation that isn't mine, i.e. Postfix, Exim or OpenSSL) DANE verification stack to support bare "2 1 0" keys in the absence of a matching cert in the chain. IIRC I've seen DANE verification code in "Go" that would fail to handle this...
My survey code also chooses to not support "2 1 0" except via a matching chain cert, and users with "2 1 0" TLSA records get nagging email from the survey...
> 2. stop encouraging our users to deploy DANE/TLSA records; those that want them can of course still do it but at least we won't be advocating for "known problematic/exotic" configuration choices
That's an option of course, though ideally I'd like to see turnkey solutions that support DANE, which is often better than users doing their own DANE integration, not always in a robust manner...
> 3. use 311 records, stop changing the key material on certificate re-issuance; The rationale being that the security margin offered by an a 4096 bit RSA certificate is likely good enough for the next decade (https://www.keylength.com/en/3/)
This is certainly a popular choice. And you don't need 4096-bit RSA keys to get there, 2048-bits is plenty resistant to brute force attacks (even 1536 bits is well outside any plausible classical attack). Indeed with the DNS root zone signed with a long-term 2048-bit RSA key, using 4096-bit RSA keys lower down the stack is rather futile.
> 4. use 211 records pinned to the intermediaries (E1 E2 R3 R4 today); We know when they are due to expire so we could display a big fat red warning in advance of their expiry... but it will be a recurring headache with a 3y periodicity at best.
That is plausible, but there's no good reason to trust LE issuers, this just reduces security. That does not mean that one should not deploy LE certs to support non-DANE relying parties (or even, not recommended, MTA-STS).
> 5. use 211 records pinned to ISRG X1 (revert to the state before this ticket), and send ISRG X1 in the handshake (4096 extra bits! we were not doing that but we could accept that cost).
SMTP is not latency sensitive, there's no interactive user loading a webpage. There's little reason to avoid sending the root cert, at least on port 25. You might have a case for port 465 or 587, with handheld devices sending email, but these could use a separate config sans root cert (those MUAs don't presently do DANE).
Thus, sending the X1 root is fine. (Note that for submission, if you want to support old Android devices, what gets sent is a cross-cert for ISRG, signed by an expired DST root, but Android ignores root CA expiration).
> 6. use 211 matching a local CA that we would send in the handshake (manage our own local PKI). The advantages over the previous solution being: 1) we have agility in terms of which CA is used and we could support user-managed certificates (and make DANE available to more users) 2) the CA would likely be using ECC and its certificate smaller than ISRG X1
This is also fine, but not clear why it is better than "3 1 1", unless the same CA signs lots of keys, is kept off-line or in an HSM, is resistant to misissuance, ...
A "3 1 1" is basically CA + server key all in one, and compatible with actually using an LE cert (for a key that is stable while the TLSA RRset is unchanged).
I should perhaps mention that 3 years is not necessarily the upper bound on the validity of the key hashes for R3/E1/R4/E2. The intermediate issuer certificates may well (this has happened previously) be renewed with the same underlying key, just new expiration dates, and various other metadata. Time will tell... | 2022-02-19T13:32:33 |
|
Mailu/Mailu | 2,255 | Mailu__Mailu-2255 | [
"2213"
] | 9a99f4253b8871f85e5e06fc1eb986d15f1e1887 | diff --git a/core/postfix/start.py b/core/postfix/start.py
--- a/core/postfix/start.py
+++ b/core/postfix/start.py
@@ -74,9 +74,10 @@ def is_valid_postconf_line(line):
else:
conf.jinja("/conf/mta-sts-daemon.yml", os.environ, "/etc/mta-sts-daemon.yml")
-if not os.path.exists("/etc/postfix/tls_policy.map.lmdb"):
- open("/etc/postfix/tls_policy.map", "a").close()
- os.system("postmap /etc/postfix/tls_policy.map")
+for policy in ['tls_policy', 'transport']:
+ if not os.path.exists(f'/etc/postfix/{policy}.map.lmdb'):
+ open(f'/etc/postfix/{policy}.map', 'a').close()
+ os.system(f'postmap /etc/postfix/{policy}.map')
if "RELAYUSER" in os.environ:
path = "/etc/postfix/sasl_passwd"
| Provide a "slow" transport for Postfix
## Environment & Versions
### Environment
- [x] docker-compose
- [ ] kubernetes
- [ ] docker swarm
### Versions
1.9
## Description
Orange, a mainstream french ISP, and a few others, have a rate limit : without a slow transport, I get deferred messages with this : "Too many connections, slow down." It is a known issue https://blog.network-studio.fr/2011/06/30/too-many-connections-slow-down/
I managed to get it done with the overrides/ files :
overrides/postfix.cf :
```
transport_maps = socketmap:unix:/tmp/podop.socket:transport lmdb:/etc/postfix/transport.map
slow_destination_concurrency_limit = 1
slow_destination_recipient_limit = 20
slow_destination_rate_delay = 5s
slow_destination_concurrency_failed_cohort_limit=10
```
overrides/postfix.master :
```
slow/unix= slow unix - - n - 5 smtp -o syslog_name=postfix-slow
```
overrides/transport.map :
```
wanadoo.com slow:
wanadoo.fr slow:
orange.com slow:
orange.fr slow:
laposte.net slow:
free.fr slow:
hotmail.fr slow:
outlook.fr slow:
yahoo.fr slow:
```
I did not have time to fully test it, but it seems to work. Configuration values may need a fine tuning...
It would be nice to have such "slow" transport built in in Mailu, with an override possibility to edit the domain list.
| 2022-02-20T12:32:34 |
||
Mailu/Mailu | 2,265 | Mailu__Mailu-2265 | [
"2261"
] | 2e9b14d536b69d4c7b5fe0cd4fb7de3d1c069126 | diff --git a/core/admin/mailu/internal/nginx.py b/core/admin/mailu/internal/nginx.py
--- a/core/admin/mailu/internal/nginx.py
+++ b/core/admin/mailu/internal/nginx.py
@@ -93,8 +93,8 @@ def handle_authentication(headers):
app.logger.warn(f'Received undecodable user/password from nginx: {raw_user_email!r}/{raw_password!r}')
else:
try:
- user = models.User.query.get(user_email)
- is_valid_user = True
+ user = models.User.query.get(user_email) if '@' in user_email else None
+ is_valid_user = user is not None
except sqlalchemy.exc.StatementError as exc:
exc = str(exc).split('\n', 1)[0]
app.logger.warn(f'Invalid user {user_email!r}: {exc}')
diff --git a/core/admin/mailu/internal/views/auth.py b/core/admin/mailu/internal/views/auth.py
--- a/core/admin/mailu/internal/views/auth.py
+++ b/core/admin/mailu/internal/views/auth.py
@@ -12,7 +12,7 @@ def nginx_authentication():
"""
client_ip = flask.request.headers["Client-Ip"]
headers = flask.request.headers
- if headers["Auth-Port"] == '25' and headers['Auth-Method'] == 'plain':
+ if headers["Auth-Port"] == '25':
response = flask.Response()
response.headers['Auth-Status'] = 'AUTH not supported'
response.headers['Auth-Error-Code'] = '502 5.5.1'
diff --git a/core/admin/mailu/internal/views/postfix.py b/core/admin/mailu/internal/views/postfix.py
--- a/core/admin/mailu/internal/views/postfix.py
+++ b/core/admin/mailu/internal/views/postfix.py
@@ -5,6 +5,7 @@
import flask
import idna
import re
+import sqlalchemy.exc
import srslib
@internal.route("/postfix/dane/<domain_name>")
@@ -158,18 +159,13 @@ def postfix_sender_rate(sender):
def postfix_sender_access(sender):
""" Simply reject any sender that pretends to be from a local domain
"""
- if not is_void_address(sender):
- localpart, domain_name = models.Email.resolve_domain(sender)
- return flask.jsonify("REJECT") if models.Domain.query.get(domain_name) else flask.abort(404)
- else:
- return flask.abort(404)
-
-
-def is_void_address(email):
- '''True if the email is void (null) email address.
- '''
- if email.startswith('<') and email.endswith('>'):
- email = email[1:-1]
- # Some MTAs use things like '<MAILER-DAEMON>' instead of '<>'; so let's
- # consider void any such thing.
- return '@' not in email
+ if '@' in sender:
+ if sender.startswith('<') and sender.endswith('>'):
+ sender = sender[1:-1]
+ try:
+ localpart, domain_name = models.Email.resolve_domain(sender)
+ if models.Domain.query.get(domain_name):
+ return flask.jsonify("REJECT")
+ except sqlalchemy.exc.StatementError:
+ pass
+ return flask.abort(404)
| Question: Is login possible via port 25
If only port 25 is open to the world, is it possible for a brute force password attack to occur?
I'm seeing messages but someone told me the errors can't be trusted, that there is no actual way to log in via port 25. But if this were true why would the error message indicate different?
```
2022/03/02 06:04:22 [info] 13#13: *89499 client 31.210.20.189:60509 connected to 0.0.0.0:25
2022/03/02 06:04:22 [error] 13#13: *89499 31.210.20.189 could not be resolved (3: Host not found) while in resolving client address, client: 31.210.20.189, server: 0.0.0.0:25
127.0.0.1 - - [02/Mar/2022:06:04:23 +0000] "GET /auth/email HTTP/1.0" 200 0 "-" "-"
2022/03/02 06:04:23 [info] 13#13: *89499 client login failed: "Authentication credentials invalid" while in http auth state, client: 31.210.20.189, server: 0.0.0.0:25, login: "database"
```
| Can you fetch the corresponding logs from the admin container?
https://github.com/Mailu/Mailu/blob/2e9b14d536b69d4c7b5fe0cd4fb7de3d1c069126/core/admin/mailu/internal/views/auth.py#L15 is what should prevent login on port 25... The attacker may be using a method that isn't "plain"... but last I have checked the nginx module was "normalizing" all attempts. We could probably drop the second condition in any case
I'm experiencing similar logs. However, there's no new messages in the admin container since startup. None of the other containers show anything relavant, either.
I will need a packet capture and the exact version of what you are running (``cat /version``) to debug this... You can generate one using ``tcpdump -w /tmp/25.pcap -n -s 0 tcp port 25``. Assuming the attacker is always using the same source IPs you can add something like `` and net 31.210.20.0/24`` to filter out unrelated traffic.
From what I can see:
1) no AUTH method is advertised on port 25 (so no RFC compliant client should attempt to AUTH)
2) we reject any "valid" attempt
Of course there are a zillion of corner cases (like if the attacker does STARTTLS, uses something that isn't AUTH/PLAIN, sends invalid data) so that's why a packet capture would help.
PLAIN using valid credentials:
```
$nc -C test.mailu.io 25
220 test.mailu.io ESMTP ready
EHLO client.example.com
250-test.mailu.io
250-PIPELINING
250-SIZE
250-2000000
250-ETRN
250-ENHANCEDSTATUSCODES
250-8BITMIME
250-DSN
250 STARTTLS
AUTH PLAIN
334
AGFkbWluQHRlc3QubWFpbHUuaW8AbGV0bWVpbg==
502 5.5.1 AUTH not supported
QUIT
$nc -C test.mailu.io 25
220 test.mailu.io ESMTP ready
EHLO client.example.com
250-test.mailu.io
250-PIPELINING
250-SIZE
250-2000000
250-ETRN
250-ENHANCEDSTATUSCODES
250-8BITMIME
250-DSN
250 STARTTLS
AUTH PLAIN AGFkbWluQHRlc3QubWFpbHUuaW8AbGV0bWVpbg==
502 5.5.1 AUTH not supported
QUIT
```
LOGIN using valid credentials:
```
$nc -C test.mailu.io 25
220 test.mailu.io ESMTP ready
EHLO client.example.com
250-test.mailu.io
250-PIPELINING
250-SIZE
250-2000000
250-ETRN
250-ENHANCEDSTATUSCODES
250-8BITMIME
250-DSN
250 STARTTLS
AUTH LOGIN
334 VXNlcm5hbWU6
YWRtaW5AdGVzdC5tYWlsdS5pbwo=
334 UGFzc3dvcmQ6
bGV0bWVpbgo=
502 5.5.1 AUTH not supported
QUIT
``` | 2022-03-05T13:54:02 |
|
Mailu/Mailu | 2,275 | Mailu__Mailu-2275 | [
"2274"
] | fe7397bedf2dd3d611f9f2837a1253133aa0b4f2 | diff --git a/core/admin/mailu/internal/views/auth.py b/core/admin/mailu/internal/views/auth.py
--- a/core/admin/mailu/internal/views/auth.py
+++ b/core/admin/mailu/internal/views/auth.py
@@ -12,7 +12,7 @@ def nginx_authentication():
"""
client_ip = flask.request.headers["Client-Ip"]
headers = flask.request.headers
- if headers["Auth-Port"] == '25':
+ if headers["Auth-Port"] == '25' and headers['Auth-Method'] != 'none':
response = flask.Response()
response.headers['Auth-Status'] = 'AUTH not supported'
response.headers['Auth-Error-Code'] = '502 5.5.1'
| master can't receive mail β "AUTH not supported"
## Before you open your issue
- [X] Check if no issue or pull-request for this already exists.
- [X] Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)
- [X] You understand `Mailu` is made by volunteers in their **free time** β be conscise, civil and accept that delays can occur.
- [X] The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.
## Environment & Versions
### Environment
- [X] docker-compose
- [ ] kubernetes
- [ ] docker swarm
### Versions
`master`
## Description
Not able to receive email.
Error `502 5.5.1 AUTH not supported (in reply to RCPT TO command)`
## Replication Steps
1) Spin up a `master` instance, using setup.mailu.io
2) Send an E-Mail to the newly created admin on the instance
3) (Make sure it's nothing on your end, set up a `1.9` instance the same way. Works without a problem.)
## Logs
On the receiving side (mailu:master).
```
front_1 | 2022/03/09 16:27:12 [info] 19#19: *107 client [removed-ipv4]:38640 connected to 0.0.0.0:25
front_1 | 2022/03/09 16:27:13 [info] 19#19: *107 client login failed: "AUTH not supported" while in http auth state, client: [removed-ipv4] using starttls, server: 0.0.0.0:25
```
And on the sending side (mailu:1.9 - my production instance) I get this email:
```
This is the mail system at host [my-domain].
I'm sorry to have to inform you that your message could not
be delivered to one or more recipients. It's attached below.
For further assistance, please send mail to postmaster.
If you do so, please include this problem report. You can
delete your own text from the attached returned message.
The mail system
<admin@[destination.mail]>: host [destination.mail][removed-ipv4] said:
502 5.5.1 AUTH not supported (in reply to RCPT TO command)
Reporting-MTA: dns; [my-domain]
X-Postfix-Queue-ID: 027AA1198C9
X-Postfix-Sender: rfc822; [my-mail]
Arrival-Date: Wed, 9 Mar 2022 17:27:11 +0100 (CET)
Final-Recipient: rfc822; admin@[destination.mail]
Original-Recipient: rfc822;admin@[destination.mail]</a>
Action: failed
Status: 5.5.1
Remote-MTA: dns; [destination.mail]
Diagnostic-Code: smtp; 502 5.5.1 AUTH not supported
```
Same thing (but differently formatted) with gmail, too.
Probably related to #2265 and #2261
*Edit:* Maybe this commit, too: https://github.com/Mailu/Mailu/pull/2265/commits/7ce7f2096b530376af4944a98bd6edc276cd648e
| `git revert 7ce7f2096b530376af4944a98bd6edc276cd648e` (https://github.com/Mailu/Mailu/commit/7ce7f2096b530376af4944a98bd6edc276cd648e)
Fixes this issue. Make of that what you want.
(*Edit:* I have a docker image called `niduroki/mailu-admin:revert` implementing this little change, the configuration from above thus changes the admin-image from `mailu/admin:master` to `niduroki/mailu-admin:revert`)
Can you make a packet capture and paste here the request made by nginx to admin please? (to /internal/auth/email)
Not too certain if this is right - I'm not too versed with tcpdump.
(Output of `tcpdump -vvv -s0` in mailu_admin_1 β this is the only entry related to `/auth/email` β using the official `mailu/admin:master`)
```
18:08:34.214362 IP (tos 0x0, ttl 64, id 24944, offset 0, flags [DF], proto TCP (6), length 502)
mailu_front_1.mailu_default.36244 > 3c7905b538eb.80: Flags [P.], cksum 0x1941 (incorrect -> 0xac8b), seq 1:451, ack 1, win 502, options [nop,nop,TS val 2773252836 ecr 321669164], length 450: HTTP, length: 450
GET /internal/auth/email HTTP/1.0
Host: 192.168.203.4
Connection: close
Auth-Method: none
Auth-User:
Auth-Pass:
Auth-Protocol: smtp
Auth-Login-Attempt: 1
Client-IP: [production-server-ip4]
Client-Host: [production-server-hostname]
Auth-SMTP-Helo: [production-server-hostname]
Auth-SMTP-From: MAIL FROM:<chris@[production-server-hostname]> SIZE=403 BODY=8BITMIME
Auth-SMTP-To: RCPT TO:<admin@[testserver-hostname]> ORCPT=rfc822;admin@[testserver-hostname]
Auth-SSL: on
Auth-Port: 25
``` | 2022-03-09T18:31:54 |
|
Mailu/Mailu | 2,284 | Mailu__Mailu-2284 | [
"2283"
] | 0b25854de0e72d4e8c0f042c8017b53d9ca5a0ab | diff --git a/core/admin/mailu/internal/nginx.py b/core/admin/mailu/internal/nginx.py
--- a/core/admin/mailu/internal/nginx.py
+++ b/core/admin/mailu/internal/nginx.py
@@ -94,11 +94,11 @@ def handle_authentication(headers):
else:
try:
user = models.User.query.get(user_email) if '@' in user_email else None
- is_valid_user = user is not None
except sqlalchemy.exc.StatementError as exc:
exc = str(exc).split('\n', 1)[0]
app.logger.warn(f'Invalid user {user_email!r}: {exc}')
else:
+ is_valid_user = user is not None
ip = urllib.parse.unquote(headers["Client-Ip"])
if check_credentials(user, password, ip, protocol, headers["Auth-Port"]):
server, port = get_server(headers["Auth-Protocol"], True)
diff --git a/core/admin/mailu/internal/views/auth.py b/core/admin/mailu/internal/views/auth.py
--- a/core/admin/mailu/internal/views/auth.py
+++ b/core/admin/mailu/internal/views/auth.py
@@ -32,7 +32,7 @@ def nginx_authentication():
for key, value in headers.items():
response.headers[key] = str(value)
is_valid_user = False
- if response.headers.get("Auth-User-Exists"):
+ if response.headers.get("Auth-User-Exists") == "True":
username = response.headers["Auth-User"]
if utils.limiter.should_rate_limit_user(username, client_ip):
# FIXME could be done before handle_authentication()
| AUTH_RATELIMIT_IP not working on imap/pop3/smtp
## Environment & Versions
### Environment
- [x] docker-compose
- [x] kubernetes
- [x] docker swarm (not tested)
### Versions
- 1.9
## Description
Currently, `AUTH_RATELIMIT_IP` seems to have no effect when logging in using imap/pop3/smtp.
## Replication Steps
1. Start a new (local) mailu server (no certs needed) using the web generator and set `AUTH_RATELIMIT_IP` to `3/hour`.
2. `telnet localhost 143` -> `login [email protected] PASSWORD`
3. repeat step 2 three times.
## Expected behaviour
On the last login, the server should respond with `. NO Temporary authentication failure (rate-limit)`.
Instead, it's possible to to repeat the login attempt, until `AUTH_RATELIMIT_USER` hits.
## Additional information
When the user gets into `AUTH_RATELIMIT_IP` using the web interface, imap/pop3/smtp login attempts are stopped as expected.
## Logs
```
β― telnet localhost 143
Telnet escape character is '^Q'.
Trying 127.0.0.1...
Connected to localhost.
Escape character is '^Q'.
* OK IMAP4 ready
. login [email protected] PASSWORD
. NO Authentication credentials invalid
Connection closed by foreign host.
β― telnet localhost 143
Telnet escape character is '^Q'.
Trying 127.0.0.1...
Connected to localhost.
Escape character is '^Q'.
* OK IMAP4 ready
. login [email protected] PASSWORD
. NO Authentication credentials invalid
Connection closed by foreign host.
β― telnet localhost 143
Telnet escape character is '^Q'.
Trying 127.0.0.1...
Connected to localhost.
Escape character is '^Q'.
* OK IMAP4 ready
. login [email protected] PASSWORD
. NO Authentication credentials invalid
Connection closed by foreign host.
β― telnet localhost 143
Telnet escape character is '^Q'.
Trying 127.0.0.1...
Connected to localhost.
Escape character is '^Q'.
* OK IMAP4 ready
. login [email protected] PASSWORD
. NO Authentication credentials invalid
Connection closed by foreign host.
```
I'll provide a PR.
| 2022-03-17T19:38:43 |
||
Mailu/Mailu | 2,299 | Mailu__Mailu-2299 | [
"2296"
] | c15e4e6015592735fa6f730af72b8332e93ae672 | diff --git a/core/admin/mailu/internal/views/auth.py b/core/admin/mailu/internal/views/auth.py
--- a/core/admin/mailu/internal/views/auth.py
+++ b/core/admin/mailu/internal/views/auth.py
@@ -5,6 +5,7 @@
import flask
import flask_login
import base64
+import sqlalchemy.exc
@internal.route("/auth/email")
def nginx_authentication():
@@ -96,13 +97,19 @@ def basic_authentication():
response.headers["WWW-Authenticate"] = 'Basic realm="Authentication rate limit for this username exceeded"'
response.headers['Retry-After'] = '60'
return response
- user = models.User.query.get(user_email)
- if user and nginx.check_credentials(user, password.decode('utf-8'), client_ip, "web"):
- response = flask.Response()
- response.headers["X-User"] = models.IdnaEmail.process_bind_param(flask_login, user.email, "")
- utils.limiter.exempt_ip_from_ratelimits(client_ip)
- return response
- utils.limiter.rate_limit_user(user_email, client_ip) if user else utils.limiter.rate_limit_ip(client_ip)
+ try:
+ user = models.User.query.get(user_email) if '@' in user_email else None
+ except sqlalchemy.exc.StatementError as exc:
+ exc = str(exc).split('\n', 1)[0]
+ app.logger.warn(f'Invalid user {user_email!r}: {exc}')
+ else:
+ if user is not None and nginx.check_credentials(user, password.decode('utf-8'), client_ip, "web"):
+ response = flask.Response()
+ response.headers["X-User"] = models.IdnaEmail.process_bind_param(flask_login, user.email, "")
+ utils.limiter.exempt_ip_from_ratelimits(client_ip)
+ return response
+ # We failed check_credentials
+ utils.limiter.rate_limit_user(user_email, client_ip) if user else utils.limiter.rate_limit_ip(client_ip)
response = flask.Response(status=401)
response.headers["WWW-Authenticate"] = 'Basic realm="Login Required"'
return response
| ADMIN: Autentication is locked if error is thrown
## Environment & Versions
### Environment
- [x] docker-compose
- [ ] kubernetes
- [ ] docker swarm
### Versions
To find your version, get the image name of a mailu container and read the version from the tag (example for version 1.7).
```
1.9.23
```
## Description
The when the basic auth api throw an error make nginx enter an "invalid" state where it think the it has successfully auteticated.
a thorw-catch block would solve this issue.
## Replication Steps
- Access `https://mail.example.com/webdav/` and login with username only (no `@example.com`)
- Now you cannot "log out". The page is completly stuck. Clearing cache/cookie does nothing.
## Expected behaviour
I would expect an auth rejection. So I can properly login with the right user.
## Logs
```
[2022-03-26 15:48:38,308] ERROR in app: Exception on /internal/auth/basic [GET]
Traceback (most recent call last):
File "/usr/lib/python3.9/site-packages/sqlalchemy/engine/base.py", line 1699, in _execute_context
context = constructor(
File "/usr/lib/python3.9/site-packages/sqlalchemy/engine/default.py", line 1076, in _init_compiled
param = {
File "/usr/lib/python3.9/site-packages/sqlalchemy/engine/default.py", line 1077, in <dictcomp>
key: processors[key](compiled_params[key])
File "/usr/lib/python3.9/site-packages/sqlalchemy/sql/type_api.py", line 1379, in process
return process_param(value, dialect)
File "/app/mailu/models.py", line 63, in process_bind_param
raise ValueError('invalid email address (no "@")')
ValueError: invalid email address (no "@")
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/lib/python3.9/site-packages/flask/app.py", line 2073, in wsgi_app
response = self.full_dispatch_request()
File "/usr/lib/python3.9/site-packages/flask/app.py", line 1518, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/lib/python3.9/site-packages/flask/app.py", line 1516, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/lib/python3.9/site-packages/flask/app.py", line 1502, in dispatch_request
return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)
File "/app/mailu/internal/views/auth.py", line 99, in basic_authentication
user = models.User.query.get(user_email)
File "<string>", line 2, in get
File "/usr/lib/python3.9/site-packages/sqlalchemy/util/deprecations.py", line 390, in warned
return fn(*args, **kwargs)
File "/usr/lib/python3.9/site-packages/sqlalchemy/orm/query.py", line 943, in get
return self._get_impl(ident, loading.load_on_pk_identity)
File "/usr/lib/python3.9/site-packages/sqlalchemy/orm/query.py", line 947, in _get_impl
return self.session._get_impl(
File "/usr/lib/python3.9/site-packages/sqlalchemy/orm/session.py", line 2869, in _get_impl
return db_load_fn(
File "/usr/lib/python3.9/site-packages/sqlalchemy/orm/loading.py", line 524, in load_on_pk_identity
session.execute(
File "/usr/lib/python3.9/site-packages/sqlalchemy/orm/session.py", line 1689, in execute
result = conn._execute_20(statement, params or {}, execution_options)
File "/usr/lib/python3.9/site-packages/sqlalchemy/engine/base.py", line 1611, in _execute_20
return meth(self, args_10style, kwargs_10style, execution_options)
File "/usr/lib/python3.9/site-packages/sqlalchemy/sql/elements.py", line 325, in _execute_on_connection
return connection._execute_clauseelement(
File "/usr/lib/python3.9/site-packages/sqlalchemy/engine/base.py", line 1478, in _execute_clauseelement
ret = self._execute_context(
File "/usr/lib/python3.9/site-packages/sqlalchemy/engine/base.py", line 1705, in _execute_context
self._handle_dbapi_exception(
File "/usr/lib/python3.9/site-packages/sqlalchemy/engine/base.py", line 2023, in _handle_dbapi_exception
util.raise_(
File "/usr/lib/python3.9/site-packages/sqlalchemy/util/compat.py", line 207, in raise_
raise exception
File "/usr/lib/python3.9/site-packages/sqlalchemy/engine/base.py", line 1699, in _execute_context
context = constructor(
File "/usr/lib/python3.9/site-packages/sqlalchemy/engine/default.py", line 1076, in _init_compiled
param = {
File "/usr/lib/python3.9/site-packages/sqlalchemy/engine/default.py", line 1077, in <dictcomp>
key: processors[key](compiled_params[key])
File "/usr/lib/python3.9/site-packages/sqlalchemy/sql/type_api.py", line 1379, in process
return process_param(value, dialect)
File "/app/mailu/models.py", line 63, in process_bind_param
raise ValueError('invalid email address (no "@")')
sqlalchemy.exc.StatementError: (builtins.ValueError) invalid email address (no "@")
[SQL: SELECT "user".email AS user_email, "user".created_at AS user_created_at, "user".updated_at AS user_updated_at, "user".comment AS user_comment, "user".localpart AS user_localpart, "user".password AS user_password, "user".quota_bytes AS user_quota_bytes, "user".quota_bytes_used AS user_quota_bytes_used, "user".global_admin AS user_global_admin, "user".enabled AS user_enabled, "user".enable_imap AS user_enable_imap, "user".enable_pop AS user_enable_pop, "user".forward_enabled AS user_forward_enabled, "user".forward_destination AS user_forward_destination, "user".forward_keep AS user_forward_keep, "user".reply_enabled AS user_reply_enabled, "user".reply_subject AS user_reply_subject, "user".reply_body AS user_reply_body, "user".reply_startdate AS user_reply_startdate, "user".reply_enddate AS user_reply_enddate, "user".displayed_name AS user_displayed_name, "user".spam_enabled AS user_spam_enabled, "user".spam_threshold AS user_spam_threshold, "user".domain_name AS user_domain_name
```
| Odds are it's something with your user-agent... nginx should eventually reply with something (most likely an HTTP 400)
Does browsing to https://[email protected]/webdav/ fix it? | 2022-03-27T11:24:37 |
|
Mailu/Mailu | 2,397 | Mailu__Mailu-2397 | [
"2393"
] | e50f6c58c0f81f639295033463584dab92f16808 | diff --git a/core/admin/mailu/internal/views/postfix.py b/core/admin/mailu/internal/views/postfix.py
--- a/core/admin/mailu/internal/views/postfix.py
+++ b/core/admin/mailu/internal/views/postfix.py
@@ -33,8 +33,9 @@ def postfix_alias_map(alias):
localpart, domain_name = models.Email.resolve_domain(alias)
if localpart is None:
return flask.jsonify(domain_name)
- destination = models.Email.resolve_destination(localpart, domain_name)
- return flask.jsonify(",".join(destination)) if destination else flask.abort(404)
+ if destinations := models.Email.resolve_destination(localpart, domain_name):
+ return flask.jsonify(",".join(idna_encode(destinations)))
+ return flask.abort(404)
@internal.route("/postfix/transport/<path:email>")
def postfix_transport(email):
@@ -142,9 +143,11 @@ def postfix_sender_login(sender):
if localpart is None:
return flask.jsonify(",".join(wildcard_senders)) if wildcard_senders else flask.abort(404)
localpart = localpart[:next((i for i, ch in enumerate(localpart) if ch in flask.current_app.config.get('RECIPIENT_DELIMITER')), None)]
- destination = models.Email.resolve_destination(localpart, domain_name, True)
- destination = [*destination, *wildcard_senders] if destination else [*wildcard_senders]
- return flask.jsonify(",".join(destination)) if destination else flask.abort(404)
+ destinations = models.Email.resolve_destination(localpart, domain_name, True) or []
+ destinations.extend(wildcard_senders)
+ if destinations:
+ return flask.jsonify(",".join(idna_encode(destinations)))
+ return flask.abort(404)
@internal.route("/postfix/sender/rate/<path:sender>")
def postfix_sender_rate(sender):
@@ -169,3 +172,11 @@ def postfix_sender_access(sender):
except sqlalchemy.exc.StatementError:
pass
return flask.abort(404)
+
+# idna encode domain part of each address in list of addresses
+def idna_encode(addresses):
+ return [
+ f"{localpart}@{idna.encode(domain).decode('ascii')}"
+ for (localpart, domain) in
+ (address.rsplit("@", 1) for address in addresses)
+ ]
diff --git a/core/admin/mailu/models.py b/core/admin/mailu/models.py
--- a/core/admin/mailu/models.py
+++ b/core/admin/mailu/models.py
@@ -439,10 +439,15 @@ def resolve_destination(cls, localpart, domain_name, ignore_forward_keep=False):
localpart_stripped = None
stripped_alias = None
- delim = os.environ.get('RECIPIENT_DELIMITER')
- if delim in localpart:
- localpart_stripped = localpart.rsplit(delim, 1)[0]
+ if delims := os.environ.get('RECIPIENT_DELIMITER'):
+ try:
+ pos = next(i for i, c in enumerate(localpart) if c in delims)
+ except StopIteration:
+ pass
+ else:
+ localpart_stripped = localpart[:pos]
+ # is localpart@domain_name or localpart_stripped@domain_name an user?
user = User.query.get(f'{localpart}@{domain_name}')
if not user and localpart_stripped:
user = User.query.get(f'{localpart_stripped}@{domain_name}')
@@ -450,19 +455,18 @@ def resolve_destination(cls, localpart, domain_name, ignore_forward_keep=False):
if user:
email = f'{localpart}@{domain_name}'
- if user.forward_enabled:
- destination = user.forward_destination
- if user.forward_keep or ignore_forward_keep:
- destination.append(email)
- else:
- destination = [email]
+ if not user.forward_enabled:
+ return [email]
+ destination = user.forward_destination
+ if user.forward_keep or ignore_forward_keep:
+ destination.append(email)
return destination
- pure_alias = Alias.resolve(localpart, domain_name)
-
- if pure_alias and not pure_alias.wildcard:
- return pure_alias.destination
+ # is localpart, domain_name or localpart_stripped@domain_name an alias?
+ if pure_alias := Alias.resolve(localpart, domain_name):
+ if not pure_alias.wildcard:
+ return pure_alias.destination
if stripped_alias := Alias.resolve(localpart_stripped, domain_name):
return stripped_alias.destination
| Alternative domain of unicode domain results in "Bad adress syntax"
## Environment & Versions
### Environment
- [x] docker-compose
- ~~[ ] kubernetes~~
- ~~[ ] docker swarm~~
### Versions
Version 1.9
## Description
My setup includes a domain that has Unicode characters (i.e. German umlauts), e.g. `exΓ€mple.com`. There is an alternative domain that provides a 'fallback' so users can also send emails to `example.com`. The domain has an account `mail@exΓ€mple.com` which is able to receive emails as expected. When sending an email to `[email protected]`, the sender receives an error message provided with the Postfix error message:
```
<mail@ex??mple.com> (expanded from <[email protected]>): bad address syntax
```
I suspect the cause might be that when looking up the alternative domain, the unicode version is provided to Postfix instead of the IDNA code (`xn--exmple-cua.com`).
It should be noted that my setup doesn't have a top-level domain with an umlaut but instead a subdomain with an umlaut (e.g. `ΓΌmlaut.example.com`). I didn't have a way to test it using a domain that contains any unicode characters itself.
## Replication Steps
1. Create a new mail domain containing an umlaut (e.g. `exΓ€mple.com`)
2. Add an account to this domain
3. Create an alternative for this domain that doesn't contain any umlaut (e.g. `example.com`)
4. Send an email to the account using the alternative domain
## Expected behaviour
The email sent to the alternative domain should be received without any error and should appear in the account's inbox (which has a primary domain with an umlaut).
## Logs
Postfix logs:
````markdown
smtp_1 | INFO:root:Connect
smtp_1 | DEBUG:root:Received bytearray(b'sendermap [email protected]')
smtp_1 | DEBUG:root:Request sendermap/[email protected]
smtp_1 | DEBUG:root:Table get [email protected]
smtp_1 | DEBUG:root:Replying b'NOTFOUND '
smtp_1 | DEBUG:root:Received bytearray(b'recipientmap [email protected]')
smtp_1 | DEBUG:root:Request recipientmap/[email protected]
smtp_1 | DEBUG:root:Table get [email protected]
smtp_1 | DEBUG:root:Replying b'NOTFOUND '
smtp_1 | DEBUG:root:Received bytearray(b'alias [email protected]')
smtp_1 | DEBUG:root:Request alias/[email protected]
smtp_1 | DEBUG:root:Table get [email protected]
smtp_1 | DEBUG:root:Table get [email protected] is recipient@umlΓ€ut.example.com
smtp_1 | DEBUG:root:Replying b'OK recipient@uml\xc3\xa4ut.example.com'
smtp_1 | DEBUG:root:Received bytearray(b'alias recipient@uml\xc3\xa4ut.example.com')
smtp_1 | DEBUG:root:Request alias/recipient@umlΓ€ut.example.com
smtp_1 | DEBUG:root:Table get recipient@umlΓ€ut.example.com
smtp_1 | DEBUG:root:Table get recipient@umlΓ€ut.example.com is recipient@umlΓ€ut.example.com
smtp_1 | DEBUG:root:Replying b'OK recipient@uml\xc3\xa4ut.example.com'
smtp_1 | 2022-07-26T16:25:19.988453+00:00 dc2c0dc34192 postfix/cleanup[3824]: E3B23192FB03: message-id=<####@example.com>
smtp_1 | 2022-07-26T16:25:20.081615+00:00 dc2c0dc34192 postfix/qmgr[339]: E3B23192FB03: from=<[email protected]>, size=281, nrcpt=1 (queue active)
smtp_1 | 2022-07-26T16:25:20.083187+00:00 dc2c0dc34192 postfix/smtpd[3821]: disconnect from mailu_webmail_1.mailu_default[192.168.203.7] ehlo=2 xclient=0/1 mail=1 rcpt=1 data=1 quit=1 commands=6/7
smtp_1 | 2022-07-26T16:25:20.103349+00:00 dc2c0dc34192 postfix/error[3825]: E3B23192FB03: to=<recipient@uml??ut.example.com>, orig_to=<[email protected]>, relay=none, delay=0.36, delays=0.34/0.01/0/0.01, dsn=5.1.3, status=bounced (bad address syntax)
````
Error message provided by Postfix:
```
This is the mail system at host mail.example.com.
I'm sorry to have to inform you that your message could not
be delivered to one or more recipients. It's attached below.
For further assistance, please send mail to postmaster.
If you do so, please include this problem report. You can
delete your own text from the attached returned message.
The mail system
<recipient@uml??ut.example.com> (expanded from
<[[email protected]](mailto:[email protected])>): bad address syntax
Reporting-MTA: dns; mail.example.com
X-Postfix-Queue-ID: E3B23192FB03
X-Postfix-Sender: rfc822; [[email protected]](mailto:[email protected])
Arrival-Date: Tue, 26 Jul 2022 16:25:19 +0000 (UTC)
Final-Recipient: rfc822; recipient@uml??ut.example.com
Original-Recipient: rfc822;[[email protected]](mailto:[email protected])
Action: failed
Status: 5.1.3
Diagnostic-Code: X-Postfix; bad address syntax
```
| This should work. IDNA encoding should already be used internally. Apparently there is an edge case.
Do you also receive this error if you send an email to the domain with the umlaut?
If you remove the alternative and send the email to the domain with the umlaut, do you still receive an error?
This can be fixed in https://github.com/Mailu/Mailu/blob/e50f6c58c0f81f639295033463584dab92f16808/core/admin/mailu/internal/views/postfix.py#L32
The return value needs to be encoded as @KingOfDog suggested.
I can prepare a PR, but I want to check the use of resolve_destination first, as it seems to return None, an address or a list of addresses - which would lead to nasty errors when joining with "," as the postfix_alias_map function does.
https://github.com/Mailu/Mailu/blob/e50f6c58c0f81f639295033463584dab92f16808/core/admin/mailu/models.py#L436
That would be great. I've assigned it to you so it is clear you will work on this whenever you have time. This basically blocks people from using domains with a special character and alternative with normal notation. | 2022-07-28T14:27:40 |
|
Mailu/Mailu | 2,404 | Mailu__Mailu-2404 | [
"2402"
] | cb70f10a49f5f9c67d688a1a2eb282bd966cab4f | diff --git a/core/admin/mailu/ui/forms.py b/core/admin/mailu/ui/forms.py
--- a/core/admin/mailu/ui/forms.py
+++ b/core/admin/mailu/ui/forms.py
@@ -37,7 +37,7 @@ def __init__(self,message=_('Invalid email address.')):
self.message = message
def __call__(self, form, field):
- pattern = re.compile(r'^([_a-z0-9\-]+)(\.[_a-z0-9\-]+)*@([a-z0-9\-]{2,}\.)*([a-z]{2,})(,([_a-z0-9\-]+)(\.[_a-z0-9\-]+)*@([a-z0-9\-]{2,}\.)*([a-z]{2,}))*$')
+ pattern = re.compile(r'^([_a-z0-9\-]+)(\.[_a-z0-9\-]+)*@([a-z0-9\-]{1,}\.)*([a-z]{1,})(,([_a-z0-9\-]+)(\.[_a-z0-9\-]+)*@([a-z0-9\-]{1,}\.)*([a-z]{2,}))*$')
if not pattern.match(field.data.replace(" ", "")):
raise validators.ValidationError(self.message)
| Cant setup forward to short-named email address.
### Environment
- docker-compose
### Versions
```
docker ps -a | grep mailu/admin
f14b60868ade mailu/admin:1.9 "/bin/sh -c /start.py" 7 weeks ago Up 7 weeks (healthy) 80/tcp mailu-admin-1
```
## Description
User have email-address like [email protected] . I cant setup forward to this email via WebUI or CLI
## Replication Steps
Setup forward for user to email like [email protected]
| I can replicate it as well on the demo site test.mailu.io.
The regex for checking the email addresses for this field must be invalid.
https://github.com/Mailu/Mailu/blob/cb70f10a49f5f9c67d688a1a2eb282bd966cab4f/core/admin/mailu/ui/forms.py#L40
@Diman0 You can change the regex to the following to allow one-letter domains:
re.compile(r'^([_a-z0-9\-]+)(\.[_a-z0-9\-]+)*@([a-z0-9\-]+\.)*([a-z]{2,})(,([_a-z0-9\-]+)(\.[_a-z0-9\-]+)*@([a-z0-9\-]+\.)*([a-z]{2,}))*$')
Technically a one-letter TLD it's possible but AFAIK there is none as of today. The regex could also be refined further to really only match valid domains.
Alex
> @Diman0 You can change the regex to the following to allow one-letter domains:
>
> re.compile(r'^([_a-z0-9-]+)(.[_a-z0-9-]+)_@([a-z0-9-]+.)_([a-z]{2,})(,([_a-z0-9-]+)(.[_a-z0-9-]+)_@([a-z0-9-]+.)_([a-z]{2,}))*$')
>
> Technically a one-letter TLD it's possible but AFAIK there is none as of today. The regex could also be refined further to really only match valid domains.
>
> Alex
```
dig m.ht mx +short
10 mail.m.ht.
```
> ^([_a-z0-9-]+)(.[_a-z0-9-]+)_@([a-z0-9-]+.)_([a-z]{2,})(,([_a-z0-9-]+)(.[_a-z0-9-]+)_@([a-z0-9-]+.)_([a-z]{2,}))*$
^([_a-z0-9-]+)(.[_a-z0-9-]+)@([a-z0-9-]+.)([a-z]{2,})(,([_a-z0-9-]+)(.[_a-z0-9-]+)@([a-z0-9-]+.)([a-z]{2,}))*$
When I test your regex on regex101.com it does not work.
^([_a-z0-9-]+)(.[_a-z0-9-]+)@([a-z0-9-]+.)([a-z]{2,})(,([_a-z0-9-]+)(.[_a-z0-9-]+)@([a-z0-9-]+.)([a-z]{2,}))*$
The old regex does work on regex101.com.
This adapted regex also does work for me:
`^([_a-z0-9\-]+)(\.[_a-z0-9\-]+)*@([a-z0-9\-]{1,}\.)*([a-z]{1,})(,([_a-z0-9\-]+)(\.[_a-z0-9\-]+)*@([a-z0-9\-]{1,}\.)*([a-z]{2,}))*$`
I can successfully test:
`[email protected]`
`[email protected],[email protected],[email protected]` | 2022-08-04T14:54:06 |
|
Mailu/Mailu | 2,437 | Mailu__Mailu-2437 | [
"2435"
] | 60f94abc94ab11e78684c144cc5c233d8956a3a1 | diff --git a/core/admin/mailu/configuration.py b/core/admin/mailu/configuration.py
--- a/core/admin/mailu/configuration.py
+++ b/core/admin/mailu/configuration.py
@@ -98,7 +98,7 @@ class ConfigManager:
DB_TEMPLATES = {
'sqlite': 'sqlite:////{SQLITE_DATABASE_FILE}',
'postgresql': 'postgresql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}',
- 'mysql': 'mysql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}'
+ 'mysql': 'mysql+mysqlconnector://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}'
}
def __init__(self):
| admin container on master branch fails with mysql
Thank you for opening an issue with Mailu. Please understand that issues are meant for bugs and enhancement-requests.
For **user-support questions**, reach out to us on [matrix](https://matrix.to/#/#mailu:tedomum.net).
To be able to help you best, we need some more information.
## Before you open your issue
- [*] Check if no issue or pull-request for this already exists.
- [*] Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)
- [*] You understand `Mailu` is made by volunteers in their **free time** β be conscise, civil and accept that delays can occur.
- [*] The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.
## Environment & Versions
### Environment
- [*] docker-compose
- [ ] kubernetes
- [ ] docker swarm
### Versions
To find your version, get the image name of a mailu container and read the version from the tag (example for version 1.7).
```
image: ${DOCKER_ORG:-mailu}/${DOCKER_PREFIX:-}admin:${MAILU_VERSION:-master}
```
## Description
The last 2-3 admin builds of the master branch fail on mysql with this error:
ModuleNotFoundError: No module named 'MySQLdb'
This is not a production environment, just wanted to report.
My mysql server is as follows, but with the admin:1.9 image the setup works properly, so it isn't a mysql issue.
mysql Ver 15.1 Distrib 10.8.4-MariaDB, for debian-linux-gnu (x86_64) using EditLine wrapper
## Replication Steps
Steps for replicating your issue
## Expected behaviour
## Logs
````markdown
```
admin_1 | Traceback (most recent call last):
admin_1 | File "/usr/lib/python3.9/site-packages/sqlalchemy/util/_collections.py", line 1008, in __call__
admin_1 | return self.registry[key]
admin_1 | KeyError: <greenlet.greenlet object at 0x7f231b79da90 (otid=0x7f231b623340) current active started main>
admin_1 | During handling of the above exception, another exception occurred:
admin_1 | Traceback (most recent call last):
admin_1 | File "/usr/lib/python3.9/site-packages/flask/app.py", line 2073, in wsgi_app
admin_1 | response = self.full_dispatch_request()
admin_1 | File "/usr/lib/python3.9/site-packages/flask/app.py", line 1518, in full_dispatch_request
admin_1 | rv = self.handle_user_exception(e)
admin_1 | File "/usr/lib/python3.9/site-packages/flask/app.py", line 1516, in full_dispatch_request
admin_1 | rv = self.dispatch_request()
admin_1 | File "/usr/lib/python3.9/site-packages/flask/app.py", line 1502, in dispatch_request
admin_1 | return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)
admin_1 | File "/app/mailu/sso/views/base.py", line 49, in login
admin_1 | return flask.render_template('login.html', form=form, fields=fields)
admin_1 | File "/usr/lib/python3.9/site-packages/flask/templating.py", line 146, in render_template
admin_1 | ctx.app.update_template_context(context)
admin_1 | File "/usr/lib/python3.9/site-packages/flask/app.py", line 756, in update_template_context
admin_1 | context.update(func())
admin_1 | File "/app/mailu/__init__.py", line 54, in inject_defaults
admin_1 | signup_domains = models.Domain.query.filter_by(signup_enabled=True).all()
admin_1 | File "/usr/lib/python3.9/site-packages/flask_sqlalchemy/__init__.py", line 552, in __get__
admin_1 | return type.query_class(mapper, session=self.sa.session())
admin_1 | File "/usr/lib/python3.9/site-packages/sqlalchemy/orm/scoping.py", line 47, in __call__
admin_1 | sess = self.registry()
admin_1 | File "/usr/lib/python3.9/site-packages/sqlalchemy/util/_collections.py", line 1010, in __call__
admin_1 | return self.registry.setdefault(key, self.createfunc())
admin_1 | File "/usr/lib/python3.9/site-packages/sqlalchemy/orm/session.py", line 4172, in __call__
admin_1 | return self.class_(**local_kw)
admin_1 | File "/usr/lib/python3.9/site-packages/flask_sqlalchemy/__init__.py", line 176, in __init__
admin_1 | bind = options.pop('bind', None) or db.engine
admin_1 | File "/usr/lib/python3.9/site-packages/flask_sqlalchemy/__init__.py", line 998, in engine
admin_1 | return self.get_engine()
admin_1 | File "/usr/lib/python3.9/site-packages/flask_sqlalchemy/__init__.py", line 1017, in get_engine
admin_1 | return connector.get_engine()
admin_1 | File "/usr/lib/python3.9/site-packages/flask_sqlalchemy/__init__.py", line 594, in get_engine
admin_1 | self._engine = rv = self._sa.create_engine(sa_url, options)
admin_1 | File "/usr/lib/python3.9/site-packages/flask_sqlalchemy/__init__.py", line 1027, in create_engine
admin_1 | return sqlalchemy.create_engine(sa_url, **engine_opts)
admin_1 | File "<string>", line 2, in create_engine
admin_1 | File "/usr/lib/python3.9/site-packages/sqlalchemy/util/deprecations.py", line 298, in warned
admin_1 | return fn(*args, **kwargs)
admin_1 | File "/usr/lib/python3.9/site-packages/sqlalchemy/engine/create.py", line 560, in create_engine
admin_1 | dbapi = dialect_cls.dbapi(**dbapi_args)
admin_1 | File "/usr/lib/python3.9/site-packages/sqlalchemy/dialects/mysql/mysqldb.py", line 167, in dbapi
admin_1 | return __import__("MySQLdb")
admin_1 | ModuleNotFoundError: No module named 'MySQLdb'
```
````
| when i use v1.9.34, i meet the same error.
i downgrade to v1.9.26 no error.
ACK. https://github.com/Mailu/Mailu/pull/2422/files is what broke it. | 2022-08-30T12:29:24 |
|
Mailu/Mailu | 2,444 | Mailu__Mailu-2444 | [
"1258"
] | cdb4833e77e64362781656aabc54b29864efe753 | diff --git a/core/admin/mailu/configuration.py b/core/admin/mailu/configuration.py
--- a/core/admin/mailu/configuration.py
+++ b/core/admin/mailu/configuration.py
@@ -87,8 +87,7 @@
'HOST_REDIS': 'redis',
'HOST_FRONT': 'front',
'SUBNET': '192.168.203.0/24',
- 'SUBNET6': None,
- 'POD_ADDRESS_RANGE': None
+ 'SUBNET6': None
}
class ConfigManager:
diff --git a/core/admin/mailu/internal/views/dovecot.py b/core/admin/mailu/internal/views/dovecot.py
--- a/core/admin/mailu/internal/views/dovecot.py
+++ b/core/admin/mailu/internal/views/dovecot.py
@@ -13,8 +13,6 @@ def dovecot_passdb_dict(user_email):
allow_nets.append(app.config["SUBNET"])
if app.config["SUBNET6"]:
allow_nets.append(app.config["SUBNET6"])
- if app.config["POD_ADDRESS_RANGE"]:
- allow_nets.append(app.config["POD_ADDRESS_RANGE"])
return flask.jsonify({
"password": None,
"nopassword": "Y",
| Deprecate (and remove) POD_ADDRESS_RANGE
As discussed in #1209, POD_ADDRESS_RANGE should be removed and SUBNET should be used instead.
Tasks:
- remove all occurences of POD_ADDRESS_RANGE from containers
- update the docs
- write a changelog (breaking change!)
| Hi There,
The `Mailu`-Project is currently in a bit of a bind! We are short on man-power, and we need to judge if it is possible for us to put in some work on this issue.
To help with that, we are currently trying to find out which issues are actively keeping users from using `Mailu`, which issues have someone who want to work on them β and which issues may be less important. These a less important ones could be discarded for the time being, until the project is in a more stable and regular state once again.
In order for us to better assess this, it would be helpful if you could put a **reaction on this post** (use the :smiley: icon to the top-right).
- ποΈ if you **need this** to be able to use Mailu. Ideally, youβd also be able to test this on your installation, and provide feedback β¦
- π if you find it a **nice bonus**, but no deal-breaker
- π if you want to **work on it yourself**!
We want to keep this **voting open for 2 weeks** from now, so please help out! | 2022-09-01T13:18:32 |
|
Mailu/Mailu | 2,450 | Mailu__Mailu-2450 | [
"1945"
] | 48e1e91a2cf0e57c0fa971fda464dc145a271da6 | diff --git a/core/admin/mailu/configuration.py b/core/admin/mailu/configuration.py
--- a/core/admin/mailu/configuration.py
+++ b/core/admin/mailu/configuration.py
@@ -74,6 +74,7 @@
'PERMANENT_SESSION_LIFETIME': 30*24*3600,
'SESSION_COOKIE_SECURE': True,
'CREDENTIAL_ROUNDS': 12,
+ 'TLS_PERMISSIVE': True,
'TZ': 'Etc/UTC',
# Host settings
'HOST_IMAP': 'imap',
diff --git a/core/nginx/config.py b/core/nginx/config.py
--- a/core/nginx/config.py
+++ b/core/nginx/config.py
@@ -9,6 +9,8 @@
log.basicConfig(stream=sys.stderr, level=args.get("LOG_LEVEL", "WARNING"))
+args['TLS_PERMISSIVE'] = str(args.get('TLS_PERMISSIVE')).lower() not in ('false', 'no')
+
# Get the first DNS server
with open("/etc/resolv.conf") as handle:
content = handle.read().split()
| Feature request: Overwriting the TLS configuration on port 25
## Environment & Versions
### Environment
- [x] docker-compose
- [ ] kubernetes
- [ ] docker swarm
### Versions
1.8
## Description
Feature request: Overwriting TLS settings for smtp.
while overwriting the TLS settings for https works perfectly using a volume mapping to /conf/tls.conf it doesn't work for port 25.
Somewhat logical because the TLS settings are hardcoded in the codefragment from [nginx.conf](https://github.com/Mailu/Mailu/blob/master/core/nginx/conf/nginx.conf)
```
# SMTP is always enabled, to avoid losing emails when TLS is failing
server {
listen 25;
listen [::]:25;
{% if TLS and not TLS_ERROR %}
ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3;
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES256-SHA256:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:DES-CBC3-SHA;
ssl_prefer_server_ciphers on;
starttls on;
{% endif %}
protocol smtp;
smtp_auth none;
auth_http_header Auth-Port 25;
}
```
Would be nice to include the tls.conf, or better a separate smtptls.conf here too.
| What's the use-case for this? surely "any" encryption beats no encryption, no?
Best practice - not use TLS 1.0 and 1.1 and specific cipher suites. The guidance for web and for mail doesnβt differ really, does it?
See e.g. the Internet.nl <http://internet.nl/> platform that issues warnings for supporting TLS1 and TLS1.1
βOlaf
> Op 24 aug. 2021, om 14:46 heeft Florent Daigniere ***@***.*** ***@***.***>> het volgende geschreven:
>
>
> What's the use-case for this? surely "any" encryption beats no encryption, no?
>
> β
> You are receiving this because you authored the thread.
> Reply to this email directly, view it on GitHub <https://github.com/Mailu/Mailu/issues/1945#issuecomment-904608675>, or unsubscribe <https://github.com/notifications/unsubscribe-auth/AHFHGTFPONXWGAVRDXJQ6IDT6OICFANCNFSM5CWEXRMQ>.
>
The current config is what's recommended as the "old" profile for maximum compatibility:
https://ssl-config.mozilla.org/#server=nginx&version=1.17.7&config=old&openssl=1.1.1d&guideline=5.6
There is "ssl_prefer_server_ciphers on;" so any client that can do "better" will... "any" encryption beats no encryption
As nextgens explained this is by design. This will not change in the foreseeable future. We prefer "any" encryption over no encryption. Unfortunately there exist still many email servers that do not support TLS 1.2+. | 2022-09-12T10:58:54 |
|
Mailu/Mailu | 2,458 | Mailu__Mailu-2458 | [
"1363"
] | ba27cdb3a8007b718a822c09cea7a2e655cb80b5 | diff --git a/core/admin/mailu/internal/views/dovecot.py b/core/admin/mailu/internal/views/dovecot.py
--- a/core/admin/mailu/internal/views/dovecot.py
+++ b/core/admin/mailu/internal/views/dovecot.py
@@ -33,6 +33,7 @@ def dovecot_quota(ns, user_email):
user = models.User.query.get(user_email) or flask.abort(404)
if ns == "storage":
user.quota_bytes_used = flask.request.get_json()
+ user.dont_change_updated_at()
models.db.session.commit()
return flask.jsonify(None)
diff --git a/core/admin/mailu/internal/views/fetch.py b/core/admin/mailu/internal/views/fetch.py
--- a/core/admin/mailu/internal/views/fetch.py
+++ b/core/admin/mailu/internal/views/fetch.py
@@ -27,6 +27,7 @@ def fetch_done(fetch_id):
fetch = models.Fetch.query.get(fetch_id) or flask.abort(404)
fetch.last_check = datetime.datetime.now()
fetch.error_message = str(flask.request.get_json())
+ fetch.dont_change_updated_at()
models.db.session.add(fetch)
models.db.session.commit()
return ""
diff --git a/core/admin/mailu/models.py b/core/admin/mailu/models.py
--- a/core/admin/mailu/models.py
+++ b/core/admin/mailu/models.py
@@ -25,6 +25,7 @@
from sqlalchemy.ext import declarative
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.inspection import inspect
+from sqlalchemy.orm.attributes import flag_modified
from werkzeug.utils import cached_property
from mailu import dkim, utils
@@ -154,6 +155,10 @@ def __hash__(self):
self.__hashed = id(self) if primary is None else hash(primary)
return self.__hashed
+ def dont_change_updated_at(self):
+ """ Mark updated_at as modified, but keep the old date when updating the model"""
+ flag_modified(self, 'updated_at')
+
# Many-to-many association table for domain managers
managers = db.Table('manager', Base.metadata,
| Strange behavior of user updated_at field
I have mailboxes with updated_at date of today. After some testing i found that this field is updated once a mailbox is receiving a new mail. Can somebody confirm this?
I use the default sqlite backend.
EDIT:
seems like /internal/dovecot/quota/* endpoint is updating the user entry and refreshing the changed_at date for that row.
| This is long time regression. I will update recommendations in the security adviso and we need to check how to avoid this using sqlalchemy.
Hi There,
The `Mailu`-Project is currently in a bit of a bind! We are short on man-power, and we need to judge if it is possible for us to put in some work on this issue.
To help with that, we are currently trying to find out which issues are actively keeping users from using `Mailu`, which issues have someone who want to work on them β and which issues may be less important. These a less important ones could be discarded for the time being, until the project is in a more stable and regular state once again.
In order for us to better assess this, it would be helpful if you could put a **reaction on this post** (use the :smiley: icon to the top-right).
- ποΈ if you **need this** to be able to use Mailu. Ideally, youβd also be able to test this on your installation, and provide feedback β¦
- π if you find it a **nice bonus**, but no deal-breaker
- π if you want to **work on it yourself**!
We want to keep this **voting open for 2 weeks** from now, so please help out!
This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.
Why stale-bot is targeting this? This is a bug and should be resolved since it makes the updated_at field completly useless. Past has shown that it might be security related to know when a account has modified. Just closing does not fix the issue. | 2022-09-26T08:05:41 |
|
Mailu/Mailu | 2,468 | Mailu__Mailu-2468 | [
"2467"
] | 1cdc4e76b405ea13eaba5c4334b1de3f667a5634 | diff --git a/core/nginx/letsencrypt.py b/core/nginx/letsencrypt.py
--- a/core/nginx/letsencrypt.py
+++ b/core/nginx/letsencrypt.py
@@ -15,6 +15,7 @@
"--cert-name", "mailu",
"--preferred-challenges", "http", "--http-01-port", "8008",
"--keep-until-expiring",
+ "--allow-subset-of-names",
"--renew-with-new-domains",
"--config-dir", "/certs/letsencrypt",
"--post-hook", "/config.py"
@@ -28,6 +29,7 @@
"--cert-name", "mailu-ecdsa",
"--preferred-challenges", "http", "--http-01-port", "8008",
"--keep-until-expiring",
+ "--allow-subset-of-names",
"--key-type", "ecdsa",
"--renew-with-new-domains",
"--config-dir", "/certs/letsencrypt",
| SSL fails for all domains if a single domain fails LetsEncrypt challenge
## Before you open your issue
- [X] Check if no issue or pull-request for this already exists.
- [X] Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html).
- [X] You understand `Mailu` is made by volunteers in their **free time** β be conscise, civil and accept that delays can occur.
- [X] The title of the issue should be short and simple.
-
## Environment & Versions
### Environment
- [X] docker compose
- [ ] kubernetes
- [ ] docker swarm
### Versions
```
1.9
```
## Description
I used [mailu setup utility](https://setup.mailu.io/1.9/) to create a docker-compose.yml with multiple email domains. Turns out not all domains were already pointing to the server IP, so some challenges failed. This leads to nginx closing port 443. So even the main domain is not reachable via SSL.
After removing the non-working domains the cert is created successfully and SSL is working.
## Replication Steps
Create a new mailu setup, add multiple domains of which some are not pointing to the server.
## Expected behaviour
There should be a certificate for the domains that are reachable and nginx should make those accessible with SSL on port 443.
## Logs
```
2022-10-05T19:47:24.203180336Z Domain: email.example.com
2022-10-05T19:47:24.203182530Z Type: dns
2022-10-05T19:47:24.203184754Z Detail: no valid A records found for email.example.com; no valid AAAA records found for email.example.com
2022-10-05T19:47:24.203187149Z
2022-10-05T19:47:24.203189393Z Hint: The Certificate Authority couldn't exterally verify that the standalone plugin completed the required http-01 challenges. Ensure the plugin is configured correctly and that the changes it makes are accessible from the internet.
2022-10-05T19:47:24.203192008Z
2022-10-05T19:47:24.702017069Z 2022/10/05 21:47:24 [notice] 1#1: signal 1 (SIGHUP) received from 22, reconfiguring
2022-10-05T19:47:24.702118810Z 2022/10/05 21:47:24 [notice] 1#1: reconfiguring
2022-10-05T19:47:24.705542967Z 2022/10/05 21:47:24 [warn] 1#1: conflicting server name "" on 0.0.0.0:80, ignored
2022-10-05T19:47:24.705911789Z 2022/10/05 21:47:24 [notice] 1#1: using the "epoll" event method
2022-10-05T19:47:24.706081756Z 2022/10/05 21:47:24 [notice] 1#1: start worker processes
2022-10-05T19:47:24.706331032Z 2022/10/05 21:47:24 [notice] 1#1: start worker process 23
2022-10-05T19:47:24.706639951Z 2022/10/05 21:47:24 [notice] 1#1: start worker process 24
2022-10-05T19:47:24.706852248Z 2022/10/05 21:47:24 [notice] 1#1: start worker process 25
2022-10-05T19:47:24.730032307Z Hook 'post-hook' ran with output:
2022-10-05T19:47:24.730052144Z Missing cert or key file, disabling TLS
2022-10-05T19:47:24.730291842Z Hook 'post-hook' ran with error output:
2022-10-05T19:47:24.730302613Z nginx: [warn] conflicting server name "" on 0.0.0.0:80, ignored
2022-10-05T19:47:24.732101009Z Some challenges have failed.
2022-10-05T19:47:24.732342892Z Ask for help or search for solutions at https://community.letsencrypt.org. See the logfile /var/log/letsencrypt/letsencrypt.log or re-run Certbot with -v for more details.
```
| 2022-10-08T13:35:58 |
||
Mailu/Mailu | 2,483 | Mailu__Mailu-2483 | [
"2127"
] | bbbed4d9ac8d33d45b05c064546472a7e9ca01be | diff --git a/core/admin/mailu/configuration.py b/core/admin/mailu/configuration.py
--- a/core/admin/mailu/configuration.py
+++ b/core/admin/mailu/configuration.py
@@ -16,13 +16,14 @@
'DOMAIN_REGISTRATION': False,
'TEMPLATES_AUTO_RELOAD': True,
'MEMORY_SESSIONS': False,
+ 'FETCHMAIL_ENABLED': False,
# Database settings
'DB_FLAVOR': None,
'DB_USER': 'mailu',
'DB_PW': None,
'DB_HOST': 'database',
'DB_NAME': 'mailu',
- 'SQLITE_DATABASE_FILE':'data/main.db',
+ 'SQLITE_DATABASE_FILE': 'data/main.db',
'SQLALCHEMY_DATABASE_URI': 'sqlite:////data/main.db',
'SQLALCHEMY_TRACK_MODIFICATIONS': False,
# Statistics management
@@ -59,7 +60,7 @@
# Web settings
'SITENAME': 'Mailu',
'WEBSITE': 'https://mailu.io',
- 'ADMIN' : 'none',
+ 'ADMIN': 'none',
'WEB_ADMIN': '/admin',
'WEB_WEBMAIL': '/webmail',
'WEBMAIL': 'none',
diff --git a/core/admin/mailu/ui/views/fetches.py b/core/admin/mailu/ui/views/fetches.py
--- a/core/admin/mailu/ui/views/fetches.py
+++ b/core/admin/mailu/ui/views/fetches.py
@@ -1,5 +1,6 @@
from mailu import models
from mailu.ui import ui, forms, access
+from flask import current_app as app
import flask
import flask_login
@@ -10,6 +11,8 @@
@ui.route('/fetch/list/<path:user_email>', methods=['GET'])
@access.owner(models.User, 'user_email')
def fetch_list(user_email):
+ if not app.config['FETCHMAIL_ENABLED']:
+ flask.abort(404)
user_email = user_email or flask_login.current_user.email
user = models.User.query.get(user_email) or flask.abort(404)
return flask.render_template('fetch/list.html', user=user)
@@ -19,6 +22,8 @@ def fetch_list(user_email):
@ui.route('/fetch/create/<path:user_email>', methods=['GET', 'POST'])
@access.owner(models.User, 'user_email')
def fetch_create(user_email):
+ if not app.config['FETCHMAIL_ENABLED']:
+ flask.abort(404)
user_email = user_email or flask_login.current_user.email
user = models.User.query.get(user_email) or flask.abort(404)
form = forms.FetchForm()
@@ -37,6 +42,8 @@ def fetch_create(user_email):
@ui.route('/fetch/edit/<fetch_id>', methods=['GET', 'POST'])
@access.owner(models.Fetch, 'fetch_id')
def fetch_edit(fetch_id):
+ if not app.config['FETCHMAIL_ENABLED']:
+ flask.abort(404)
fetch = models.Fetch.query.get(fetch_id) or flask.abort(404)
form = forms.FetchForm(obj=fetch)
if form.validate_on_submit():
@@ -55,6 +62,8 @@ def fetch_edit(fetch_id):
@access.confirmation_required("delete a fetched account")
@access.owner(models.Fetch, 'fetch_id')
def fetch_delete(fetch_id):
+ if not app.config['FETCHMAIL_ENABLED']:
+ flask.abort(404)
fetch = models.Fetch.query.get(fetch_id) or flask.abort(404)
user = fetch.user
models.db.session.delete(fetch)
diff --git a/optional/fetchmail/fetchmail.py b/optional/fetchmail/fetchmail.py
--- a/optional/fetchmail/fetchmail.py
+++ b/optional/fetchmail/fetchmail.py
@@ -95,6 +95,13 @@ def run(debug):
if __name__ == "__main__":
while True:
- time.sleep(int(os.environ.get("FETCHMAIL_DELAY", 60)))
+ delay = int(os.environ.get("FETCHMAIL_DELAY", 60))
+ print("Sleeping for {} seconds".format(delay))
+ time.sleep(delay)
+
+ if not os.environ.get("FETCHMAIL_ENABLED", 'True') in ('True', 'true'):
+ print("Fetchmail disabled, skipping...")
+ continue
+
run(os.environ.get("DEBUG", None) == "True")
sys.stdout.flush()
| Suggestion: Only show "Fetched accounts" menu entry if fetchmail is enabled
I think it would be neat if the sidebar entry `Fetched accounts` in the admin portal is only displayed when fetchmail is really enabled. When fetchmail is disabled, the tab may cause confusion for users, as it has no functionality in this case.
| I noticed the same. Good idea to disable or remove the `Fetched accounts` menu in admin when the container is not running or reachable, because the `fetchmail` container is optional to use. | 2022-10-20T11:47:59 |
|
Mailu/Mailu | 2,498 | Mailu__Mailu-2498 | [
"2499"
] | 62c919da09dbb57b2b8ee9c0af574aa389c23aa4 | diff --git a/core/admin/mailu/internal/views/dovecot.py b/core/admin/mailu/internal/views/dovecot.py
--- a/core/admin/mailu/internal/views/dovecot.py
+++ b/core/admin/mailu/internal/views/dovecot.py
@@ -5,6 +5,7 @@
import flask
import socket
import os
+import sqlalchemy.exc
@internal.route("/dovecot/passdb/<path:user_email>")
def dovecot_passdb_dict(user_email):
@@ -19,12 +20,20 @@ def dovecot_passdb_dict(user_email):
"allow_nets": ",".join(allow_nets)
})
[email protected]("/dovecot/userdb/")
+def dovecot_userdb_dict_list():
+ return flask.jsonify([
+ user[0] for user in models.User.query.filter(models.User.enabled.is_(True)).with_entities(models.User.email).all()
+ ])
@internal.route("/dovecot/userdb/<path:user_email>")
def dovecot_userdb_dict(user_email):
- user = models.User.query.get(user_email) or flask.abort(404)
+ try:
+ quota = models.User.query.filter(models.User.email==user_email).with_entities(models.User.quota_bytes).one_or_none() or flask.abort(404)
+ except sqlalchemy.exc.StatementError as exc:
+ flask.abort(404)
return flask.jsonify({
- "quota_rule": "*:bytes={}".format(user.quota_bytes)
+ "quota_rule": f"*:bytes={quota[0]}"
})
diff --git a/core/base/libs/podop/podop/dovecot.py b/core/base/libs/podop/podop/dovecot.py
--- a/core/base/libs/podop/podop/dovecot.py
+++ b/core/base/libs/podop/podop/dovecot.py
@@ -40,6 +40,7 @@ def __init__(self, table_map):
def connection_made(self, transport):
logging.info('Connect {}'.format(transport.get_extra_info('peername')))
self.transport = transport
+ self.transport_lock = asyncio.Lock()
def data_received(self, data):
logging.debug("Received {}".format(data))
@@ -77,10 +78,11 @@ def process_hello(self, major, minor, value_type, user, dict_name):
logging.debug("Client {}.{} type {}, user {}, dict {}".format(
self.major, self.minor, self.value_type, self.user, dict_name))
- async def process_lookup(self, key, user=None):
+ async def process_lookup(self, key, user=None, is_iter=False):
""" Process a dict lookup message
"""
logging.debug("Looking up {} for {}".format(key, user))
+ orig_key = key
# Priv and shared keys are handled slighlty differently
key_type, key = key.decode("utf8").split("/", 1)
try:
@@ -93,9 +95,38 @@ async def process_lookup(self, key, user=None):
response = result
else:
response = json.dumps(result).encode("ascii")
- return self.reply(b"O", response)
+ return await (self.reply(b"O", orig_key, response) if is_iter else self.reply(b"O", response))
except KeyError:
- return self.reply(b"N")
+ return await self.reply(b"N")
+
+ async def process_iterate(self, flags, max_rows, path, user=None):
+ """ Process an iterate command
+ """
+ logging.debug("Iterate flags {} max_rows {} on {} for {}".format(flags, max_rows, path, user))
+ # Priv and shared keys are handled slighlty differently
+ key_type, key = path.decode("utf8").split("/", 1)
+ max_rows = int(max_rows.decode("utf-8"))
+ flags = int(flags.decode("utf-8"))
+ if flags != 0: # not implemented
+ return await self.reply(b"F")
+ rows = []
+ try:
+ result = await self.dict.iter(key)
+ logging.debug("Found {} entries: {}".format(len(result), result))
+ for i,k in enumerate(result):
+ if max_rows > 0 and i >= max_rows:
+ break
+ rows.append(self.process_lookup((path.decode("utf8")+k).encode("utf8"), user, is_iter=True))
+ await asyncio.gather(*rows)
+ async with self.transport_lock:
+ self.transport.write(b"\n") # ITER_FINISHED
+ return
+ except KeyError:
+ return await self.reply(b"F")
+ except Exception as e:
+ for task in rows:
+ task.cancel()
+ raise e
def process_begin(self, transaction_id, user=None):
""" Process a dict begin message
@@ -124,13 +155,14 @@ async def process_commit(self, transaction_id):
# Remove stored transaction
del self.transactions[transaction_id]
del self.transactions_user[transaction_id]
- return self.reply(b"O", transaction_id)
+ return await self.reply(b"O", transaction_id)
- def reply(self, command, *args):
- logging.debug("Replying {} with {}".format(command, args))
- self.transport.write(command)
- self.transport.write(b"\t".join(map(tabescape, args)))
- self.transport.write(b"\n")
+ async def reply(self, command, *args):
+ async with self.transport_lock:
+ logging.debug("Replying {} with {}".format(command, args))
+ self.transport.write(command)
+ self.transport.write(b"\t".join(map(tabescape, args)))
+ self.transport.write(b"\n")
@classmethod
def factory(cls, table_map):
@@ -141,6 +173,7 @@ def factory(cls, table_map):
COMMANDS = {
ord("H"): process_hello,
ord("L"): process_lookup,
+ ord("I"): process_iterate,
ord("B"): process_begin,
ord("C"): process_commit,
ord("S"): process_set
| Doveadm Cannot List Users
<!--
Thank you for opening an issue with Mailu. Please understand that issues are meant for bugs and enhancement-requests.
For **user-support questions**, reach out to us on [matrix](https://matrix.to/#/#mailu:tedomum.net).
To be able to help you best, we need some more information.
Before you open your issue
- Check if no issue or pull-request for this already exists.
- Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)
- You understand `Mailu` is made by volunteers in their **free time** β be concise, civil and accept that delays can occur.
- The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.
Please put your text outside of the comment blocks to be visible. You can use the button "Preview" above to check.
-->
## Environment & Version
### Environment
- [x] docker-compose
- [ ] kubernetes
- [ ] docker swarm
### Version
- Version: `1.9`
## Description
`doveadm search -A` doesn't work
## Replication Steps
Deploy a fresh new server, add a few users and execute `doveadm search -A ALL` in `imap` container.
## Observed behaviour
Nothing showed.
## Expected behaviour
Should give a list of existing mails.
## Extra Info
If I run `doveadm -Dv search -u [email protected] ALL`, it succeeded.
## Logs
Result of `doveadm -Dv search -A ALL`
```
Oct 31 15:58:26 Debug: Loading modules from directory: /usr/lib/dovecot
Oct 31 15:58:26 Debug: Module loaded: /usr/lib/dovecot/lib10_quota_plugin.so
Oct 31 15:58:26 Debug: Module loaded: /usr/lib/dovecot/lib20_fts_plugin.so
Oct 31 15:58:26 Debug: Module loaded: /usr/lib/dovecot/lib20_quota_clone_plugin.so
Oct 31 15:58:26 Debug: Module loaded: /usr/lib/dovecot/lib21_fts_xapian_plugin.so
Oct 31 15:58:26 Debug: Loading modules from directory: /usr/lib/dovecot/doveadm
Oct 31 15:58:26 Debug: Skipping module doveadm_acl_plugin, because dlopen() failed: Error relocating /usr/lib/dovecot/doveadm/lib10_doveadm_acl_plugin.so: acl_rights_update_import: symbol not found (this is usually intentional, so just ignore this message)
Oct 31 15:58:26 Debug: Module loaded: /usr/lib/dovecot/doveadm/lib10_doveadm_quota_plugin.so
Oct 31 15:58:26 Debug: Module loaded: /usr/lib/dovecot/doveadm/lib10_doveadm_sieve_plugin.so
Oct 31 15:58:26 Debug: Module loaded: /usr/lib/dovecot/doveadm/lib20_doveadm_fts_plugin.so
Oct 31 15:58:26 Debug: Skipping module doveadm_mail_crypt_plugin, because dlopen() failed: Error relocating /usr/lib/dovecot/doveadm/libdoveadm_mail_crypt_plugin.so: mail_crypt_box_get_public_key: symbol not found (this is usually intentional, so just ignore this message)
Oct 31 15:58:26 Debug: auth-master: userdb list: Started listing users (user_mask=)
Oct 31 15:58:26 Debug: auth-master: conn unix:/run/dovecot/auth-userdb: Connecting
Oct 31 15:58:26 Debug: auth-master: conn unix:/run/dovecot/auth-userdb (pid=1,uid=0): Client connected (fd=9)
Oct 31 15:58:26 Debug: auth-master: userdb list: Finished listing users
Oct 31 15:58:26 Debug: auth-master: conn unix:/run/dovecot/auth-userdb (pid=1,uid=0): Disconnected: Connection closed (fd=9)
```
Result of `doveadm -Dv search -u [email protected] ALL`
```
Oct 31 16:04:46 Debug: Loading modules from directory: /usr/lib/dovecot
Oct 31 16:04:46 Debug: Module loaded: /usr/lib/dovecot/lib10_quota_plugin.so
Oct 31 16:04:46 Debug: Module loaded: /usr/lib/dovecot/lib20_fts_plugin.so
Oct 31 16:04:46 Debug: Module loaded: /usr/lib/dovecot/lib20_quota_clone_plugin.so
Oct 31 16:04:46 Debug: Module loaded: /usr/lib/dovecot/lib21_fts_xapian_plugin.so
Oct 31 16:04:46 Debug: Loading modules from directory: /usr/lib/dovecot/doveadm
Oct 31 16:04:46 Debug: Skipping module doveadm_acl_plugin, because dlopen() failed: Error relocating /usr/lib/dovecot/doveadm/lib10_doveadm_acl_plugin.so: acl_rights_update_import: symbol not found (this is usually intentional, so just ignore this message)
Oct 31 16:04:46 Debug: Module loaded: /usr/lib/dovecot/doveadm/lib10_doveadm_quota_plugin.so
Oct 31 16:04:46 Debug: Module loaded: /usr/lib/dovecot/doveadm/lib10_doveadm_sieve_plugin.so
Oct 31 16:04:46 Debug: Module loaded: /usr/lib/dovecot/doveadm/lib20_doveadm_fts_plugin.so
Oct 31 16:04:46 Debug: Skipping module doveadm_mail_crypt_plugin, because dlopen() failed: Error relocating /usr/lib/dovecot/doveadm/libdoveadm_mail_crypt_plugin.so: mail_crypt_box_get_public_key: symbol not found (this is usually intentional, so just ignore this message)
Oct 31 16:04:46 doveadm([email protected])<175><>: Debug: auth-master: userdb lookup([email protected]): Started userdb lookup
Oct 31 16:04:46 doveadm([email protected])<175><>: Debug: auth-master: conn unix:/run/dovecot/auth-userdb: Connecting
Oct 31 16:04:46 doveadm([email protected])<175><>: Debug: auth-master: conn unix:/run/dovecot/auth-userdb (pid=1,uid=0): Client connected (fd=9)
Oct 31 16:04:46 doveadm([email protected])<175><>: Debug: auth-master: userdb lookup([email protected]): auth USER input: [email protected] quota_rule=*:bytes=1000000000
Oct 31 16:04:46 doveadm([email protected])<175><>: Debug: auth-master: userdb lookup([email protected]): Finished userdb lookup ([email protected] quota_rule=*:bytes=1000000000)
Oct 31 16:04:46 doveadm([email protected])<175><>: Debug: Added userdb setting: plugin/quota_rule=*:bytes=1000000000
Oct 31 16:04:46 doveadm([email protected])<175><nWh4MR7yX2OvAAAAU8qUbw>: Debug: Effective uid=8, gid=12, home=/mail/[email protected]
Oct 31 16:04:46 doveadm([email protected])<175><nWh4MR7yX2OvAAAAU8qUbw>: Debug: Quota root: name=User quota backend=count args=
Oct 31 16:04:46 doveadm([email protected])<175><nWh4MR7yX2OvAAAAU8qUbw>: Debug: Quota rule: root=User quota mailbox=* bytes=1000000000 messages=0
Oct 31 16:04:46 doveadm([email protected])<175><nWh4MR7yX2OvAAAAU8qUbw>: Debug: Quota grace: root=User quota bytes=100000000 (10%)
Oct 31 16:04:46 doveadm([email protected])<175><nWh4MR7yX2OvAAAAU8qUbw>: Debug: Namespace inbox: type=private, prefix=, sep=, inbox=yes, hidden=no, list=yes, subscriptions=yes location=maildir:/mail/[email protected]
Oct 31 16:04:46 doveadm([email protected])<175><nWh4MR7yX2OvAAAAU8qUbw>: Debug: maildir++: root=/mail/[email protected], index=, indexpvt=, control=, inbox=/mail/[email protected], alt=
Oct 31 16:04:46 doveadm([email protected])<175><nWh4MR7yX2OvAAAAU8qUbw>: Debug: quota: quota_over_flag check: quota_over_script unset - skipping
Oct 31 16:04:46 doveadm([email protected]): Debug: Mailbox Trash: Mailbox opened because: search
Oct 31 16:04:46 doveadm([email protected]): Debug: Mailbox Junk: Mailbox opened because: search
Oct 31 16:04:46 doveadm([email protected]): Debug: Mailbox Sent: Mailbox opened because: search
Oct 31 16:04:46 doveadm([email protected]): Debug: Mailbox Drafts: Mailbox opened because: search
Oct 31 16:04:46 doveadm([email protected]): Debug: Mailbox INBOX: Mailbox opened because: search
Oct 31 16:04:46 doveadm([email protected]): Debug: dict(proxy)<[email protected]>: Waiting for dict to finish pending operations
Oct 31 16:04:46 doveadm([email protected]): Debug: dict(proxy)<[email protected]>: Waiting for dict to finish pending operations
8e5bb0397c244b630f351500ed1cffc7 1
8e5bb0397c244b630f351500ed1cffc7 2
8e5bb0397c244b630f351500ed1cffc7 3
8e5bb0397c244b630f351500ed1cffc7 4
8e5bb0397c244b630f351500ed1cffc7 5
...
Oct 31 16:04:46 doveadm(175): Debug: auth-master: conn unix:/run/dovecot/auth-userdb (pid=1,uid=0): Disconnected: Connection closed (fd=9)
```
| 2022-10-30T19:43:26 |
||
Mailu/Mailu | 2,513 | Mailu__Mailu-2513 | [
"2512"
] | 323f0a4e70059c577d94b1c879cf0c5c766f3973 | diff --git a/core/admin/mailu/models.py b/core/admin/mailu/models.py
--- a/core/admin/mailu/models.py
+++ b/core/admin/mailu/models.py
@@ -546,8 +546,8 @@ def reply_active(self):
now = date.today()
return (
self.reply_enabled and
- self.reply_startdate < now and
- self.reply_enddate > now
+ self.reply_startdate <= now and
+ self.reply_enddate >= now
)
@property
| Include start and end dates in the auto-reply period
<!--
Thank you for opening an issue with Mailu. Please understand that issues are meant for bugs and enhancement-requests.
For **user-support questions**, reach out to us on [matrix](https://matrix.to/#/#mailu:tedomum.net).
To be able to help you best, we need some more information.
Before you open your issue
- Check if no issue or pull-request for this already exists.
- Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)
- You understand `Mailu` is made by volunteers in their **free time** β be concise, civil and accept that delays can occur.
- The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.
Please put your text outside of the comment blocks to be visible. You can use the button "Preview" above to check.
-->
## Environment & Version
### Environment
- [X] docker-compose
- [ ] kubernetes
- [ ] docker swarm
### Version
- Version: `1.9`
## Description
The administration interface provides an [auto-reply](https://mailu.io/master/webadministration.html#auto-reply) page where automatic replies can be configured with start and end dates. Unfortunately both the start date and the end date are not included in the auto-reply period (i.e. no auto replies are being sent on these two days). To work around this issue you have to insert the day before your vacation as start date and the day after your vacation as end date. This is not intuitive.
## Replication Steps
Activate the auto-reply feature, insert subject and body text and the current date as "start of vacation" ("end of vacation" has to be a date in the future). Then send an email from another email account (external) to your email address hosted on Mailu.
## Observed behaviour
No auto reply message received by the sender.
## Expected behaviour
Auto reply message received by the sender.
To verify this behaviour you can add yesterday's date as "start of vacation" date and send another email to your Mailu account ... the sender will receive an auto reply message ...
The same applies to the "end of vacation" date.
## Logs
n/a
| 2022-11-02T16:52:26 |
||
Mailu/Mailu | 2,524 | Mailu__Mailu-2524 | [
"1521"
] | 745c211c4abb17a26930902e8190f19d85b84352 | diff --git a/webmails/roundcube/start.py b/webmails/roundcube/start.py
--- a/webmails/roundcube/start.py
+++ b/webmails/roundcube/start.py
@@ -1,12 +1,13 @@
-#!/usr/bin/python3
+#!/usr/bin/env python3
import os
import logging
import sys
-from socrate import conf
import subprocess
import hmac
+from socrate import conf
+
env = os.environ
logging.basicConfig(stream=sys.stderr, level=env.get("LOG_LEVEL", "WARNING"))
@@ -51,7 +52,7 @@
# roundcube plugins
# (using "dict" because it is ordered and "set" is not)
-plugins = dict((p, None) for p in env.get("ROUNDCUBE_PLUGINS", "").replace(" ", "").split(",") if p and os.path.isdir(os.path.join("/var/www/html/plugins", p)))
+plugins = dict((p, None) for p in env.get("ROUNDCUBE_PLUGINS", "").replace(" ", "").split(",") if p and os.path.isdir(os.path.join("/var/www/webmail/plugins", p)))
if plugins:
plugins["mailu"] = None
else:
@@ -66,18 +67,15 @@
context["SESSION_TIMEOUT_MINUTES"] = max(int(env.get("SESSION_TIMEOUT", "3600")) // 60, 1)
# create config files
-conf.jinja("/php.ini", context, "/usr/local/etc/php/conf.d/roundcube.ini")
-conf.jinja("/config.inc.php", context, "/var/www/html/config/config.inc.php")
+conf.jinja("/conf/php.ini", context, "/etc/php81/php.ini")
+conf.jinja("/conf/config.inc.php", context, "/var/www/webmail/config/config.inc.php")
# create dirs
os.system("mkdir -p /data/gpg")
-# disable access log for VirtualHosts that don't define their own logfile
-os.system("a2disconf other-vhosts-access-log")
-
print("Initializing database")
try:
- result = subprocess.check_output(["/var/www/html/bin/initdb.sh", "--dir", "/var/www/html/SQL"],
+ result = subprocess.check_output(["/var/www/webmail/bin/initdb.sh", "--dir", "/var/www/webmail/SQL"],
stderr=subprocess.STDOUT)
print(result.decode())
except subprocess.CalledProcessError as exc:
@@ -90,22 +88,29 @@
print("Upgrading database")
try:
- subprocess.check_call(["/var/www/html/bin/update.sh", "--version=?", "-y"], stderr=subprocess.STDOUT)
+ subprocess.check_call(["/var/www/webmail/bin/update.sh", "--version=?", "-y"], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as exc:
exit(4)
else:
print("Cleaning database")
try:
- subprocess.check_call(["/var/www/html/bin/cleandb.sh"], stderr=subprocess.STDOUT)
+ subprocess.check_call(["/var/www/webmail/bin/cleandb.sh"], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as exc:
exit(5)
# setup permissions
-os.system("chown -R www-data:www-data /data")
+os.system("chown -R nginx:nginx /data")
+os.system("chmod -R a+rX /var/www/webmail/")
+
+# Configure nginx
+conf.jinja("/conf/nginx-roundcube.conf", context, "/etc/nginx/http.d/roundcube.conf")
+if os.path.exists("/var/run/nginx.pid"):
+ os.system("nginx -s reload")
# clean env
[env.pop(key, None) for key in env.keys() if key == "SECRET_KEY" or key.startswith("ROUNDCUBE_")]
-# run apache
-os.execve("/usr/local/bin/apache2-foreground", ["apache2-foreground"], env)
+# run nginx
+os.system("php-fpm81")
+os.execv("/usr/sbin/nginx", ["nginx", "-g", "daemon off;"])
diff --git a/webmails/snappymail/config.py b/webmails/snappymail/config.py
--- a/webmails/snappymail/config.py
+++ b/webmails/snappymail/config.py
@@ -1,8 +1,9 @@
-#!/usr/bin/python3
+#!/usr/bin/env python3
import os
import logging as log
import sys
+
from socrate import system, conf
args = os.environ.copy()
diff --git a/webmails/snappymail/start.py b/webmails/snappymail/start.py
--- a/webmails/snappymail/start.py
+++ b/webmails/snappymail/start.py
@@ -1,10 +1,11 @@
-#!/usr/bin/python3
+#!/usr/bin/env python3
import os
import shutil
import logging as log
import sys
import subprocess
+
from socrate import system, conf
log.basicConfig(stream=sys.stderr, level=os.environ.get("LOG_LEVEL", "WARNING"))
@@ -22,9 +23,9 @@
conf.jinja("/defaults/default.ini", os.environ, "/data/_data_/_default_/domains/default.ini")
conf.jinja("/defaults/application.ini", os.environ, "/data/_data_/_default_/configs/application.ini")
-conf.jinja("/defaults/php.ini", os.environ, "/etc/php7/php.ini")
+conf.jinja("/defaults/php.ini", os.environ, "/etc/php81/php.ini")
# Start the fastcgi process manager now that config files have been adjusted
-os.system("php-fpm7")
+os.system("php-fpm81")
os.system("chown -R nginx:nginx /data")
os.system("chmod -R a+rX /var/www/webmail/")
| diff --git a/tests/build.hcl b/tests/build.hcl
--- a/tests/build.hcl
+++ b/tests/build.hcl
@@ -174,12 +174,18 @@ target "smtp" {
target "snappymail" {
inherits = ["defaults"]
context = "webmails/snappymail/"
+ contexts = {
+ base = "target:base"
+ }
tags = tag("snappymail")
}
target "roundcube" {
inherits = ["defaults"]
context = "webmails/roundcube/"
+ contexts = {
+ base = "target:base"
+ }
tags = tag("roundcube")
}
| Build webmails on php:fpm-alpine
Mailu's webmails are both built on debian/apache/php.
AFAIK all other mailu components are based on alpine, and we already have an nginx frontend.
IMHO it would make sense to rebase webmails on php:fpm-alpine and use the same nginx frontend for the static parts of the webmails.
If we want independent webmail containers (which is also nice), we should at least build the images with alpine/nginx/php-fpm built-in.
| Hi There,
The `Mailu`-Project is currently in a bit of a bind! We are short on man-power, and we need to judge if it is possible for us to put in some work on this issue.
To help with that, we are currently trying to find out which issues are actively keeping users from using `Mailu`, which issues have someone who want to work on them β and which issues may be less important. These a less important ones could be discarded for the time being, until the project is in a more stable and regular state once again.
In order for us to better assess this, it would be helpful if you could put a **reaction on this post** (use the :smiley: icon to the top-right).
- ποΈ if you **need this** to be able to use Mailu. Ideally, youβd also be able to test this on your installation, and provide feedback β¦
- π if you find it a **nice bonus**, but no deal-breaker
- π if you want to **work on it yourself**!
We want to keep this **voting open for 2 weeks** from now, so please help out!
This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.
Using fpm and serving the static files via the existing would require sharing the files between both containers either via volumes (and subsequently some upgrade logic) or by putting all webmail files also in the nginx image.
I think your second proposal (completely independent containers) makes more sense, which is probably why it's currently built that way.
For using alpine we'd need to switch the base image, as docker.io/php doesn't provide tags with both alpine base and an integrated web server as far as I can see.
I don't really see a benefit in switching to alpine just for the sake of it, considering we'd need different base images and thus would have to switch away from the official Docker image for php.
Hi There,
we see this issue had only little attention π. As much as it pains us: In order to get the current issues a bit more manageable for us, we decided to close it. ππ
We hope that no one feels offended by doing so. Should the issue get really pressing in the future, please feel free to re-open it.
Thank you for your patience and understanding, π
- Your Mailu Team
I'd like to re-open this.
Here the image of the webmail is over 600MiB, fresh start with no user it's 70MiB of RAM... we can do better.
One good reason to switch is to limit the number of technologies (and configuration syntaxes) one has to be familiar with. If front is nginx, whatever is behind should either have very little configuration (FPM) or be the same (another nginx instance)
There are a couple of possibilities:
- Use php:fpm-alpine and use the same nginx frontend (front image) for the static parts of the webmails.
- Use php:fpm-alpine and also use this image for serving static files
For roundcube we could chose to base our image on the official roundcube php-fpm-alpine image.
https://github.com/roundcube/roundcubemail-docker
It is already has the functionality to set via env variable what plugins you want to run. A much requested feature for mailu roundcube.
For rainloop I could not find any official image. But there is a dockerfile on the rainloop repo we could use as a basis.
https://github.com/RainLoop/rainloop-webmail/tree/master/.docker/php
| 2022-11-10T15:53:45 |
Mailu/Mailu | 2,526 | Mailu__Mailu-2526 | [
"2250",
"2250",
"948"
] | 8a90f83bd039b2c24fc892ef03c93439caed1496 | diff --git a/webmails/snappymail/config.py b/webmails/snappymail/config.py
deleted file mode 100755
--- a/webmails/snappymail/config.py
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/usr/bin/env python3
-
-import os
-import logging as log
-import sys
-
-from socrate import system, conf
-
-args = os.environ.copy()
-
-log.basicConfig(stream=sys.stderr, level=args.get("LOG_LEVEL", "WARNING"))
-
-# Build final configuration paths
-conf.jinja("/config/nginx-snappymail.conf", args, "/etc/nginx/http.d/snappymail.conf")
-if os.path.exists("/var/run/nginx.pid"):
- os.system("nginx -s reload")
diff --git a/webmails/snappymail/start.py b/webmails/snappymail/start.py
deleted file mode 100755
--- a/webmails/snappymail/start.py
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/usr/bin/env python3
-
-import os
-import shutil
-import logging as log
-import sys
-import subprocess
-
-from socrate import system, conf
-
-log.basicConfig(stream=sys.stderr, level=os.environ.get("LOG_LEVEL", "WARNING"))
-
-# Actual startup script
-os.environ["FRONT_ADDRESS"] = system.resolve_address(os.environ.get("HOST_FRONT", "front"))
-os.environ["IMAP_ADDRESS"] = system.resolve_address(os.environ.get("HOST_IMAP", "imap"))
-
-os.environ["MAX_FILESIZE"] = str(int(int(os.environ.get("MESSAGE_SIZE_LIMIT"))*0.66/1048576))
-
-base = "/data/_data_/_default_/"
-shutil.rmtree(base + "domains/", ignore_errors=True)
-os.makedirs(base + "domains", exist_ok=True)
-os.makedirs(base + "configs", exist_ok=True)
-
-conf.jinja("/defaults/default.ini", os.environ, "/data/_data_/_default_/domains/default.ini")
-conf.jinja("/defaults/application.ini", os.environ, "/data/_data_/_default_/configs/application.ini")
-conf.jinja("/defaults/php.ini", os.environ, "/etc/php81/php.ini")
-# Start the fastcgi process manager now that config files have been adjusted
-os.system("php-fpm81")
-
-os.system("chown -R nginx:nginx /data")
-os.system("chmod -R a+rX /var/www/webmail/")
-
-subprocess.call(["/config.py"])
-os.execv("/usr/sbin/nginx", ["nginx", "-g", "daemon off;"])
diff --git a/webmails/roundcube/start.py b/webmails/start.py
similarity index 72%
rename from webmails/roundcube/start.py
rename to webmails/start.py
--- a/webmails/roundcube/start.py
+++ b/webmails/start.py
@@ -4,9 +4,10 @@
import logging
import sys
import subprocess
+import shutil
import hmac
-from socrate import conf
+from socrate import conf, system
env = os.environ
@@ -17,6 +18,8 @@
context.update(env)
context["MAX_FILESIZE"] = str(int(int(env.get("MESSAGE_SIZE_LIMIT", "50000000")) * 0.66 / 1048576))
+context["FRONT_ADDRESS"] = system.get_host_address_from_environment("FRONT", "front")
+context["IMAP_ADDRESS"] = system.get_host_address_from_environment("IMAP", "imap")
db_flavor = env.get("ROUNDCUBE_DB_FLAVOR", "sqlite")
if db_flavor == "sqlite":
@@ -52,7 +55,7 @@
# roundcube plugins
# (using "dict" because it is ordered and "set" is not)
-plugins = dict((p, None) for p in env.get("ROUNDCUBE_PLUGINS", "").replace(" ", "").split(",") if p and os.path.isdir(os.path.join("/var/www/webmail/plugins", p)))
+plugins = dict((p, None) for p in env.get("ROUNDCUBE_PLUGINS", "").replace(" ", "").split(",") if p and os.path.isdir(os.path.join("/var/www/roundcube/plugins", p)))
if plugins:
plugins["mailu"] = None
else:
@@ -67,15 +70,14 @@
context["SESSION_TIMEOUT_MINUTES"] = max(int(env.get("SESSION_TIMEOUT", "3600")) // 60, 1)
# create config files
-conf.jinja("/conf/php.ini", context, "/etc/php81/php.ini")
-conf.jinja("/conf/config.inc.php", context, "/var/www/webmail/config/config.inc.php")
+conf.jinja("/conf/config.inc.php", context, "/var/www/roundcube/config/config.inc.php")
# create dirs
os.system("mkdir -p /data/gpg")
print("Initializing database")
try:
- result = subprocess.check_output(["/var/www/webmail/bin/initdb.sh", "--dir", "/var/www/webmail/SQL"],
+ result = subprocess.check_output(["/var/www/roundcube/bin/initdb.sh", "--dir", "/var/www/roundcube/SQL"],
stderr=subprocess.STDOUT)
print(result.decode())
except subprocess.CalledProcessError as exc:
@@ -88,22 +90,30 @@
print("Upgrading database")
try:
- subprocess.check_call(["/var/www/webmail/bin/update.sh", "--version=?", "-y"], stderr=subprocess.STDOUT)
+ subprocess.check_call(["/var/www/roundcube/bin/update.sh", "--version=?", "-y"], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as exc:
exit(4)
else:
print("Cleaning database")
try:
- subprocess.check_call(["/var/www/webmail/bin/cleandb.sh"], stderr=subprocess.STDOUT)
+ subprocess.check_call(["/var/www/roundcube/bin/cleandb.sh"], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as exc:
exit(5)
+base = "/data/_data_/_default_/"
+shutil.rmtree(base + "domains/", ignore_errors=True)
+os.makedirs(base + "domains", exist_ok=True)
+os.makedirs(base + "configs", exist_ok=True)
+
+conf.jinja("/defaults/default.json", context, "/data/_data_/_default_/domains/default.json")
+conf.jinja("/defaults/application.ini", context, "/data/_data_/_default_/configs/application.ini")
+conf.jinja("/defaults/php.ini", context, "/etc/php81/php.ini")
+
# setup permissions
-os.system("chown -R nginx:nginx /data")
-os.system("chmod -R a+rX /var/www/webmail/")
+os.system("chown -R mailu:mailu /data")
# Configure nginx
-conf.jinja("/conf/nginx-roundcube.conf", context, "/etc/nginx/http.d/roundcube.conf")
+conf.jinja("/conf/nginx-webmail.conf", context, "/etc/nginx/http.d/webmail.conf")
if os.path.exists("/var/run/nginx.pid"):
os.system("nginx -s reload")
| diff --git a/.github/workflows/build_test_deploy.yml b/.github/workflows/build_test_deploy.yml
--- a/.github/workflows/build_test_deploy.yml
+++ b/.github/workflows/build_test_deploy.yml
@@ -340,7 +340,7 @@ jobs:
strategy:
fail-fast: false
matrix:
- target: ["core", "fetchmail", "filters", "snappymail", "roundcube", "webdav"]
+ target: ["core", "fetchmail", "filters", "webmail", "webdav"]
time: ["2"]
include:
- target: "filters"
@@ -394,7 +394,7 @@ jobs:
strategy:
fail-fast: false
matrix:
- target: ["setup", "docs", "fetchmail", "roundcube", "admin", "traefik-certdumper", "radicale", "clamav", "rspamd", "postfix", "dovecot", "unbound", "nginx", "snappymail"]
+ target: ["setup", "docs", "fetchmail", "webmail", "admin", "traefik-certdumper", "radicale", "clamav", "rspamd", "postfix", "dovecot", "unbound", "nginx"]
steps:
- uses: actions/checkout@v3
- name: Retrieve global variables
@@ -439,7 +439,7 @@ jobs:
strategy:
fail-fast: false
matrix:
- target: ["setup", "docs", "fetchmail", "roundcube", "admin", "traefik-certdumper", "radicale", "clamav", "rspamd", "postfix", "dovecot", "unbound", "nginx", "snappymail"]
+ target: ["setup", "docs", "fetchmail", "webmail", "admin", "traefik-certdumper", "radicale", "clamav", "rspamd", "postfix", "dovecot", "unbound", "nginx"]
steps:
- uses: actions/checkout@v3
- name: Retrieve global variables
diff --git a/tests/build.hcl b/tests/build.hcl
--- a/tests/build.hcl
+++ b/tests/build.hcl
@@ -36,8 +36,7 @@ group "default" {
"imap",
"smtp",
- "snappymail",
- "roundcube",
+ "webmail",
"antivirus",
"fetchmail",
@@ -169,24 +168,15 @@ target "smtp" {
}
# -----------------------------------------------------------------------------------------
-# Webmail images
+# Webmail image
# -----------------------------------------------------------------------------------------
-target "snappymail" {
+target "webmail" {
inherits = ["defaults"]
- context = "webmails/snappymail/"
+ context = "webmails/"
contexts = {
base = "target:base"
}
- tags = tag("snappymail")
-}
-
-target "roundcube" {
- inherits = ["defaults"]
- context = "webmails/roundcube/"
- contexts = {
- base = "target:base"
- }
- tags = tag("roundcube")
+ tags = tag("webmail")
}
# -----------------------------------------------------------------------------------------
diff --git a/tests/compose/snappymail/docker-compose.yml b/tests/compose/snappymail/docker-compose.yml
deleted file mode 100644
--- a/tests/compose/snappymail/docker-compose.yml
+++ /dev/null
@@ -1,106 +0,0 @@
-# This file is auto-generated by the Mailu configuration wizard.
-# Please read the documentation before attempting any change.
-# Generated for compose flavor
-
-version: '3.6'
-
-services:
-
- # External dependencies
- redis:
- image: redis:alpine
- restart: always
- volumes:
- - "/mailu/redis:/data"
-
- # Core services
- front:
- image: ${DOCKER_ORG:-mailu}/${DOCKER_PREFIX:-}nginx:${MAILU_VERSION:-local}
- restart: always
- env_file: mailu.env
- logging:
- driver: json-file
- ports:
- - "127.0.0.1:80:80"
- - "127.0.0.1:443:443"
- - "127.0.0.1:25:25"
- - "127.0.0.1:465:465"
- - "127.0.0.1:587:587"
- - "127.0.0.1:110:110"
- - "127.0.0.1:995:995"
- - "127.0.0.1:143:143"
- - "127.0.0.1:993:993"
- volumes:
- - "/mailu/certs:/certs"
-
- admin:
- image: ${DOCKER_ORG:-mailu}/${DOCKER_PREFIX:-}admin:${MAILU_VERSION:-local}
- restart: always
- env_file: mailu.env
- volumes:
- - "/mailu/data:/data"
- - "/mailu/dkim:/dkim"
- depends_on:
- - redis
- - resolver
- dns:
- - 192.168.203.254
-
- imap:
- image: ${DOCKER_ORG:-mailu}/${DOCKER_PREFIX:-}dovecot:${MAILU_VERSION:-local}
- restart: always
- env_file: mailu.env
- volumes:
- - "/mailu/mail:/mail"
- - "/mailu/overrides:/overrides"
- depends_on:
- - front
-
- smtp:
- image: ${DOCKER_ORG:-mailu}/${DOCKER_PREFIX:-}postfix:${MAILU_VERSION:-local}
- restart: always
- env_file: mailu.env
- volumes:
- - "/mailu/overrides:/overrides"
- depends_on:
- - front
-
- antispam:
- image: ${DOCKER_ORG:-mailu}/${DOCKER_PREFIX:-}rspamd:${MAILU_VERSION:-local}
- restart: always
- env_file: mailu.env
- volumes:
- - "/mailu/filter:/var/lib/rspamd"
- - "/mailu/dkim:/dkim"
- - "/mailu/overrides/rspamd:/etc/rspamd/override.d"
- depends_on:
- - front
-
- # Optional services
-
- resolver:
- image: ${DOCKER_ORG:-mailu}/${DOCKER_PREFIX:-}unbound:${MAILU_VERSION:-local}
- env_file: mailu.env
- restart: always
- networks:
- default:
- ipv4_address: 192.168.203.254
-
- # Webmail
- webmail:
- image: ${DOCKER_ORG:-mailu}/${DOCKER_PREFIX:-}snappymail:${MAILU_VERSION:-local}
- restart: always
- env_file: mailu.env
- volumes:
- - "/mailu/webmail:/data"
- depends_on:
- - imap
-
-
-networks:
- default:
- driver: bridge
- ipam:
- driver: default
- config:
- - subnet: 192.168.203.0/24
diff --git a/tests/compose/snappymail/mailu.env b/tests/compose/snappymail/mailu.env
deleted file mode 100644
--- a/tests/compose/snappymail/mailu.env
+++ /dev/null
@@ -1,138 +0,0 @@
-# Mailu main configuration file
-#
-# Generated for compose flavor
-#
-# This file is autogenerated by the configuration management wizard.
-# For a detailed list of configuration variables, see the documentation at
-# https://mailu.io
-
-###################################
-# Common configuration variables
-###################################
-
-# Set this to the path where Mailu data and configuration is stored
-# This variable is now set directly in `docker-compose.yml by the setup utility
-# ROOT=/mailu
-
-# Mailu version to run (1.0, 1.1, etc. or master)
-#VERSION=master
-
-# Set to a randomly generated 16 bytes string
-SECRET_KEY=V5J4SHRYVW9PZIQU
-
-# Address where listening ports should bind
-# This variables are now set directly in `docker-compose.yml by the setup utility
-# PUBLIC_IPV4= 127.0.0.1 (default: 127.0.0.1)
-# PUBLIC_IPV6= (default: ::1)
-
-# Subnet of the docker network. This should not conflict with any networks to which your system is connected. (Internal and external!)
-SUBNET=192.168.203.0/24
-
-# Main mail domain
-DOMAIN=mailu.io
-
-# Hostnames for this server, separated with comas
-HOSTNAMES=localhost
-
-# Postmaster local part (will append the main mail domain)
-POSTMASTER=admin
-
-# Choose how secure connections will behave (value: letsencrypt, cert, notls, mail, mail-letsencrypt)
-TLS_FLAVOR=cert
-
-# Authentication rate limit (per source IP address)
-AUTH_RATELIMIT=10/minute;1000/hour
-
-# Opt-out of statistics, replace with "True" to opt out
-DISABLE_STATISTICS=False
-
-###################################
-# Optional features
-###################################
-
-# Expose the admin interface (value: true, false)
-ADMIN=false
-
-# Choose which webmail to run if any (values: roundcube, snappymail, none)
-WEBMAIL=snappymail
-
-# Dav server implementation (value: radicale, none)
-WEBDAV=none
-
-# Antivirus solution (value: clamav, none)
-#ANTIVIRUS=none
-
-#Antispam solution
-ANTISPAM=none
-
-###################################
-# Mail settings
-###################################
-
-# Message size limit in bytes
-# Default: accept messages up to 50MB
-MESSAGE_SIZE_LIMIT=50000000
-
-# Networks granted relay permissions
-# Use this with care, all hosts in this networks will be able to send mail without authentication!
-RELAYNETS=
-
-# Will relay all outgoing mails if configured
-RELAYHOST=
-
-# Fetchmail delay
-FETCHMAIL_DELAY=600
-
-# Recipient delimiter, character used to delimiter localpart from custom address part
-RECIPIENT_DELIMITER=+
-
-# DMARC rua and ruf email
-DMARC_RUA=admin
-DMARC_RUF=admin
-
-
-# Maildir Compression
-# choose compression-method, default: none (value: gz, bz2, lz4, zstd)
-COMPRESSION=
-# change compression-level, default: 6 (value: 1-9)
-COMPRESSION_LEVEL=
-
-###################################
-# Web settings
-###################################
-
-# Path to the admin interface if enabled
-WEB_ADMIN=/admin
-
-# Path to the webmail if enabled
-WEB_WEBMAIL=/webmail
-
-# Website name
-SITENAME=Mailu
-
-# Linked Website URL
-WEBSITE=https://mailu.io
-
-
-
-###################################
-# Advanced settings
-###################################
-
-# Log driver for front service. Possible values:
-# json-file (default)
-# journald (On systemd platforms, useful for Fail2Ban integration)
-# syslog (Non systemd platforms, Fail2Ban integration. Disables `docker-compose log` for front!)
-# LOG_DRIVER=json-file
-
-# Docker-compose project name, this will prepended to containers names.
-COMPOSE_PROJECT_NAME=mailu
-
-# Header to take the real ip from
-REAL_IP_HEADER=
-
-# IPs for nginx set_real_ip_from (CIDR list separated by commas)
-REAL_IP_FROM=
-
-# choose wether mailu bounces (no) or rejects (yes) mail when recipient is unknown (value: yes, no)
-REJECT_UNLISTED_RECIPIENT=
diff --git a/tests/compose/webmail/01_ensure_admin_unreachable.sh b/tests/compose/webmail/01_ensure_admin_unreachable.sh
new file mode 100755
--- /dev/null
+++ b/tests/compose/webmail/01_ensure_admin_unreachable.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+IP="$(docker inspect webmail_webmail_1|jq -r '.[0].NetworkSettings.Networks.webmail_default.IPAddress')"
+
+MAIN_RETURN_CODE=$(curl -I -so /dev/null -w "%{http_code}" http://$IP/)
+[[ $MAIN_RETURN_CODE -ne 200 && $MAIN_RETURN_CODE -ne 302 ]] && echo "The default page of snappymail hasn't returned 200 but $MAIN_RETURN_CODE!" >>/dev/stderr && exit 1
+[[ $(curl -I -so /dev/null -w "%{http_code}" http://$IP/?admin) -ne 403 ]] && echo "The admin of snappymail is not disabled!" >>/dev/stderr && exit 1
+echo "Everything OK" >/dev/stderr
+
+exit 0
diff --git a/tests/compose/roundcube/docker-compose.yml b/tests/compose/webmail/docker-compose.yml
similarity index 96%
rename from tests/compose/roundcube/docker-compose.yml
rename to tests/compose/webmail/docker-compose.yml
--- a/tests/compose/roundcube/docker-compose.yml
+++ b/tests/compose/webmail/docker-compose.yml
@@ -88,7 +88,7 @@ services:
# Webmail
webmail:
- image: ${DOCKER_ORG:-mailu}/${DOCKER_PREFIX:-}roundcube:${MAILU_VERSION:-local}
+ image: ${DOCKER_ORG:-mailu}/${DOCKER_PREFIX:-}webmail:${MAILU_VERSION:-local}
restart: always
env_file: mailu.env
volumes:
diff --git a/tests/compose/roundcube/mailu.env b/tests/compose/webmail/mailu.env
similarity index 99%
rename from tests/compose/roundcube/mailu.env
rename to tests/compose/webmail/mailu.env
--- a/tests/compose/roundcube/mailu.env
+++ b/tests/compose/webmail/mailu.env
@@ -54,7 +54,7 @@ DISABLE_STATISTICS=False
ADMIN=false
# Choose which webmail to run if any (values: roundcube, snappymail, none)
-WEBMAIL=roundcube
+WEBMAIL=snappymail
# Dav server implementation (value: radicale, none)
WEBDAV=none
| Rainloop: Missing support for '*_ADDRESS' env vars
## Environment & Versions
### Environment
`docker-compose`
### Versions
`1.9`
## Description
Rainloop's `start.py` calls `system.resolve_address()` instead of `system.get_host_address_from_environment()`, thus name resolution breaks when using `*_ADDRESS` env vars.
As a workaround one can set `HOST_FRONT` and `HOST_IMAP` manually on the `rainloop` container to get the same behaviour, but it would be great to use the existing `mailu.env` for `rainloop` too.
## Expected behaviour
`rainloop` honors `FRONT_ADDRESS` and `IMAP_ADDRESS`, when set in `mailu.env`.
Rainloop: Missing support for '*_ADDRESS' env vars
## Environment & Versions
### Environment
`docker-compose`
### Versions
`1.9`
## Description
Rainloop's `start.py` calls `system.resolve_address()` instead of `system.get_host_address_from_environment()`, thus name resolution breaks when using `*_ADDRESS` env vars.
As a workaround one can set `HOST_FRONT` and `HOST_IMAP` manually on the `rainloop` container to get the same behaviour, but it would be great to use the existing `mailu.env` for `rainloop` too.
## Expected behaviour
`rainloop` honors `FRONT_ADDRESS` and `IMAP_ADDRESS`, when set in `mailu.env`.
Autotest for Rainloop admin console
In relation to #947, we should create a small script in the test suite which checks if the Rainloop admin interface is indeed disabled by default. Just to be safe for any future changes in config parsing of Rainloop.
A simple implementation would be testing the HTTP return code, however Rainloop still returns 200, OK even when the page is prompting "Access Denied". See RainLoop/rainloop-webmail#1838. If that gets not solved, we might resort to parsing the result page. THat would be more error prone if they ever choose to change the layout of the error page.
|
Hi There,
The `Mailu`-Project is currently in a bit of a bind! We are short on man-power, and we need to judge if it is possible for us to put in some work on this issue.
To help with that, we are currently trying to find out which issues are actively keeping users from using `Mailu`, which issues have someone who want to work on them β and which issues may be less important. These a less important ones could be discarded for the time being, until the project is in a more stable and regular state once again.
In order for us to better assess this, it would be helpful if you could put a **reaction on this post** (use the :smiley: icon to the top-right).
- ποΈ if you **need this** to be able to use Mailu. Ideally, youβd also be able to test this on your installation, and provide feedback β¦
- π if you find it a **nice bonus**, but no deal-breaker
- π if you want to **work on it yourself**!
We want to keep this **voting open for 2 weeks** from now, so please help out!
I've opened a rainloop PR for this:
https://github.com/RainLoop/rainloop-webmail/pull/2029 | 2022-11-12T10:37:53 |
Mailu/Mailu | 2,530 | Mailu__Mailu-2530 | [
"2527"
] | d8e2a2960b2b05090cb1067fbced3da5efcfa959 | diff --git a/core/admin/mailu/configuration.py b/core/admin/mailu/configuration.py
--- a/core/admin/mailu/configuration.py
+++ b/core/admin/mailu/configuration.py
@@ -73,7 +73,7 @@
'SESSION_KEY_BITS': 128,
'SESSION_TIMEOUT': 3600,
'PERMANENT_SESSION_LIFETIME': 30*24*3600,
- 'SESSION_COOKIE_SECURE': True,
+ 'SESSION_COOKIE_SECURE': None,
'CREDENTIAL_ROUNDS': 12,
'TLS_PERMISSIVE': True,
'TZ': 'Etc/UTC',
@@ -156,6 +156,8 @@ def init_app(self, app):
self.config['SESSION_STORAGE_URL'] = f'redis://{self.config["REDIS_ADDRESS"]}/3'
self.config['SESSION_COOKIE_SAMESITE'] = 'Strict'
self.config['SESSION_COOKIE_HTTPONLY'] = True
+ if self.config['SESSION_COOKIE_SECURE'] is None:
+ self.config['SESSION_COOKIE_SECURE'] = self.config['TLS_FLAVOR'] != 'notls'
self.config['SESSION_PERMANENT'] = True
self.config['SESSION_TIMEOUT'] = int(self.config['SESSION_TIMEOUT'])
self.config['PERMANENT_SESSION_LIFETIME'] = int(self.config['PERMANENT_SESSION_LIFETIME'])
| Can't Login as admin on initial creation
<!--
Thank you for opening an issue with Mailu. Please understand that issues are meant for bugs and enhancement-requests.
For **user-support questions**, reach out to us on [matrix](https://matrix.to/#/#mailu:tedomum.net).
To be able to help you best, we need some more information.
Before you open your issue
- Check if no issue or pull-request for this already exists.
- Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)
- You understand `Mailu` is made by volunteers in their **free time** β be concise, civil and accept that delays can occur.
- The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.
Please put your text outside of the comment blocks to be visible. You can use the button "Preview" above to check.
-->
## Environment & Version
### Environment
- [X] docker-compose
- [ ] kubernetes
- [ ] docker swarm
### Version
- Version: `1.9`
<!--
To find your version, get the image name of a mailu container and read the version from the tag (example for version 1.7).
$> docker ps -a | grep mailu
140b09d4b09c mailu/roundcube:1.7 "docker-php-entrypoiβ¦" 2 weeks ago Up 2 days (healthy) 80/tcp
$> grep MAILU_VERSION docker-compose.yml mailu.env
-->
## Description
<!--
Further explain the bug in a few words. It should be clear what the unexpected behaviour is. Share it in an easy-to-understand language.
-->
## Replication Steps
<!--
Steps for replicating your issue
-->
Deploy this docker-compose.yml:
```yaml
# This file is auto-generated by the Mailu configuration wizard.
# Please read the documentation before attempting any change.
# Generated for compose flavor
version: '2.2'
services:
# External dependencies
redis:
image: redis:alpine
restart: always
volumes:
- "/mailu/redis:/data"
dns: "192.168.222.1" # pfsense running unbound with DNSSEC enabled
# Core services
front:
image: ${DOCKER_ORG:-mailu}/${DOCKER_PREFIX:-}nginx:${MAILU_VERSION:-1.9}
restart: always
env_file: mailu.env
logging:
driver: json-file
ports:
- "192.168.222.83:80:80"
- "192.168.222.83:443:443"
- "192.168.222.83:25:25"
- "192.168.222.83:465:465"
- "192.168.222.83:587:587"
- "192.168.222.83:110:110"
- "192.168.222.83:995:995"
- "192.168.222.83:143:143"
- "192.168.222.83:993:993"
volumes:
- "/mailu/certs:/certs"
- "/mailu/overrides/nginx:/overrides:ro"
dns: "192.168.222.1"
admin:
image: ${DOCKER_ORG:-mailu}/${DOCKER_PREFIX:-}admin:${MAILU_VERSION:-1.9}
restart: always
env_file: mailu.env
volumes:
- "/mailu/data:/data"
- "/mailu/dkim:/dkim"
depends_on:
- redis
dns: "192.168.222.1"
imap:
image: ${DOCKER_ORG:-mailu}/${DOCKER_PREFIX:-}dovecot:${MAILU_VERSION:-1.9}
restart: always
env_file: mailu.env
volumes:
- "/mailu/mail:/mail"
- "/mailu/overrides/dovecot:/overrides:ro"
depends_on:
- front
dns: "192.168.222.1"
smtp:
image: ${DOCKER_ORG:-mailu}/${DOCKER_PREFIX:-}postfix:${MAILU_VERSION:-1.9}
restart: always
env_file: mailu.env
volumes:
- "/mailu/mailqueue:/queue"
- "/mailu/overrides/postfix:/overrides:ro"
depends_on:
- front
dns: "192.168.222.1"
antispam:
image: ${DOCKER_ORG:-mailu}/${DOCKER_PREFIX:-}rspamd:${MAILU_VERSION:-1.9}
hostname: antispam
restart: always
env_file: mailu.env
volumes:
- "/mailu/filter:/var/lib/rspamd"
- "/mailu/overrides/rspamd:/etc/rspamd/override.d:ro"
depends_on:
- front
dns: "192.168.222.1"
# Optional services
antivirus:
image: ${DOCKER_ORG:-mailu}/${DOCKER_PREFIX:-}clamav:${MAILU_VERSION:-1.9}
restart: always
env_file: mailu.env
volumes:
- "/mailu/filter:/data"
dns: "192.168.222.1"
webdav:
image: ${DOCKER_ORG:-mailu}/${DOCKER_PREFIX:-}radicale:${MAILU_VERSION:-1.9}
restart: always
env_file: mailu.env
volumes:
- "/mailu/dav:/data"
dns: "192.168.222.1"
fetchmail:
image: ${DOCKER_ORG:-mailu}/${DOCKER_PREFIX:-}fetchmail:${MAILU_VERSION:-1.9}
restart: always
env_file: mailu.env
volumes:
- "/mailu/data/fetchmail:/data"
dns: "192.168.222.1"
# Webmail
webmail:
image: ${DOCKER_ORG:-mailu}/${DOCKER_PREFIX:-}rainloop:${MAILU_VERSION:-1.9}
restart: always
env_file: mailu.env
volumes:
- "/mailu/webmail:/data"
- "/mailu/overrides/rainloop:/overrides:ro"
depends_on:
- imap
dns: "192.168.222.1"
networks:
default:
driver: bridge
ipam:
driver: default
config:
- subnet: 192.168.203.0/24
```
with this mailu.env:
```ini
# Mailu main configuration file
#
# This file is autogenerated by the configuration management wizard for compose flavor.
# For a detailed list of configuration variables, see the documentation at
# https://mailu.io
###################################
# Common configuration variables
###################################
# Set to a randomly generated 16 bytes string
SECRET_KEY=SOQJVE139OEKUPY3
# Subnet of the docker network. This should not conflict with any networks to which your system is connected. (Internal and external!)
SUBNET=192.168.203.0/24
# Main mail domain
DOMAIN=mail.lab.arpa
# Hostnames for this server, separated with comas
HOSTNAMES=mail.lab.arpa,docker.lab.arpa
# Postmaster local part (will append the main mail domain)
POSTMASTER=admin
# Choose how secure connections will behave (value: letsencrypt, cert, notls, mail, mail-letsencrypt)
TLS_FLAVOR=notls
# Authentication rate limit per IP (per /24 on ipv4 and /56 on ipv6)
AUTH_RATELIMIT_IP=60/hour
# Authentication rate limit per user (regardless of the source-IP)
AUTH_RATELIMIT_USER=100/day
# Opt-out of statistics, replace with "True" to opt out
DISABLE_STATISTICS=False
###################################
# Optional features
###################################
# Expose the admin interface (value: true, false)
ADMIN=true
INITIAL_ADMIN_ACCOUNT=admin
INITIAL_ADMIN_DOMAIN=mail.lab.arpa
INITIAL_ADMIN_PW=5ecret!
INITIAL_ADMIN_MODE=update
# Choose which webmail to run if any (values: roundcube, rainloop, none)
WEBMAIL=rainloop
# Dav server implementation (value: radicale, none)
WEBDAV=radicale
# Antivirus solution (value: clamav, none)
ANTIVIRUS=clamav
###################################
# Mail settings
###################################
# Message size limit in bytes
# Default: accept messages up to 50MB
# Max attachment size will be 33% smaller
MESSAGE_SIZE_LIMIT=50000000
# Message rate limit (per user)
MESSAGE_RATELIMIT=200/day
# Networks granted relay permissions
# Use this with care, all hosts in this networks will be able to send mail without authentication!
RELAYNETS=
# Will relay all outgoing mails if configured
RELAYHOST=
# Fetchmail delay
FETCHMAIL_DELAY=600
# Recipient delimiter, character used to delimiter localpart from custom address part
RECIPIENT_DELIMITER=+
# DMARC rua and ruf email
DMARC_RUA=admin
DMARC_RUF=admin
# Welcome email, enable and set a topic and body if you wish to send welcome
# emails to all users.
WELCOME=false
WELCOME_SUBJECT=Welcome to your new email account
WELCOME_BODY=Welcome to your new email account, if you can read this, then it is configured properly!
# Maildir Compression
# choose compression-method, default: none (value: gz, bz2, lz4, zstd)
COMPRESSION=
# change compression-level, default: 6 (value: 1-9)
COMPRESSION_LEVEL=
# IMAP full-text search is enabled by default. Set the following variable to off in order to disable the feature.
# FULL_TEXT_SEARCH=off
###################################
# Web settings
###################################
# Path to redirect / to
WEBROOT_REDIRECT=/webmail
# Path to the admin interface if enabled
WEB_ADMIN=/admin
# Path to the webmail if enabled
WEB_WEBMAIL=/webmail
# Website name
SITENAME=mailu.lab.arpa
# Linked Website URL
WEBSITE=https://mailu.lab.arpa
###################################
# Advanced settings
###################################
# Log driver for front service. Possible values:
# json-file (default)
# journald (On systemd platforms, useful for Fail2Ban integration)
# syslog (Non systemd platforms, Fail2Ban integration. Disables `docker-compose log` for front!)
# LOG_DRIVER=json-file
# Docker-compose project name, this will prepended to containers names.
COMPOSE_PROJECT_NAME=mailu
# Number of rounds used by the password hashing scheme
CREDENTIAL_ROUNDS=12
# Header to take the real ip from
REAL_IP_HEADER=
# IPs for nginx set_real_ip_from (CIDR list separated by commas)
REAL_IP_FROM=
# choose wether mailu bounces (no) or rejects (yes) mail when recipient is unknown (value: yes, no)
REJECT_UNLISTED_RECIPIENT=
# Log level threshold in start.py (value: CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET)
LOG_LEVEL=WARNING
# Timezone for the Mailu containers. See this link for all possible values https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
TZ=Etc/UTC
###################################
# Database settings
###################################
DB_FLAVOR=sqlite
```
## Observed behaviour
<!--
Explain or paste the result you received.
-->
Can't login as `[email protected]` with password `5ecret!`. Get red banner if password incorrect. Just shown the login page again if correct
## Expected behaviour
<!--
Explain what results you expected - be as specific as possible.
Just saying "it doesnβt work as expected" is not useful. It's also helpful to describe what you actually experienced.
-->
Login Success
## Logs
<!--
Often it is very useful to include log fragments of the involved component.
You can get the logs via `docker logs <container name> --tail 1000`.
For example for the admin container: `docker logs mailu_admin_1 --tail 1000`
or using docker-compose `docker-compose -f /mailu/docker-compose.yml logs --tail 1000 admin`
If you can find the relevant section, please share only the parts that seem relevant. If you have any logs, please enclose them in code tags, like so:
```
Your logs here!
```
-->
β mailu docker-compose logs --tail 1000 admin
```bash
Attaching to mailu_admin_1
admin_1 | INFO [alembic.runtime.migration] Context impl SQLiteImpl.
admin_1 | INFO [alembic.runtime.migration] Will assume non-transactional DDL.
admin_1 | updated admin password
admin_1 | [2022-11-12 16:38:28 +0000] [17] [INFO] Starting gunicorn 20.1.0
admin_1 | [2022-11-12 16:38:28 +0000] [17] [INFO] Listening at: http://0.0.0.0:80 (17)
admin_1 | [2022-11-12 16:38:28 +0000] [17] [INFO] Using worker: sync
admin_1 | [2022-11-12 16:38:28 +0000] [25] [INFO] Booting worker with pid: 25
```
```
imap_1 | Nov 12 16:38:49 pop3-login: Info: Disconnected: Aborted login by logging out (no auth attempts in 0 secs): user=<>, rip=127.0.0.1, lip=127.0.0.1, secured, session=<r1ILpEjt5Z5/AAAB>
front_1 | 192.168.222.100 - - [12/Nov/2022:16:39:07 +0000] "POST /sso/login HTTP/1.1" 302 218 "http://mail.lab.arpa/sso/login" "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36"
front_1 | 192.168.222.100 - - [12/Nov/2022:16:39:07 +0000] "GET /admin HTTP/1.1" 308 260 "http://mail.lab.arpa/sso/login" "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36"
front_1 | 192.168.222.100 - - [12/Nov/2022:16:39:07 +0000] "GET /admin/ HTTP/1.1" 302 226 "http://mail.lab.arpa/sso/login" "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36"
front_1 | 192.168.222.100 - - [12/Nov/2022:16:39:07 +0000] "GET /sso/login HTTP/1.1" 200 2071 "http://mail.lab.arpa/sso/login" "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36"
front_1 | 192.168.222.100 - - [12/Nov/2022:16:39:07 +0000] "GET /static/vendor.css HTTP/1.1" 304 0 "http://mail.lab.arpa/sso/login" "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36"
front_1 | 192.168.222.100 - - [12/Nov/2022:16:39:07 +0000] "GET /static/app.css HTTP/1.1" 304 0 "http://mail.lab.arpa/sso/login" "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36"
front_1 | 192.168.222.100 - - [12/Nov/2022:16:39:07 +0000] "GET /static/vendor.js HTTP/1.1" 304 0 "http://mail.lab.arpa/sso/login" "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36"
front_1 | 192.168.222.100 - - [12/Nov/2022:16:39:07 +0000] "GET /static/app.js HTTP/1.1" 304 0 "http://mail.lab.arpa/sso/login" "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36"
front_1 | 192.168.222.100 - - [12/Nov/2022:16:39:07 +0000] "GET /static/mailu.png HTTP/1.1" 304 0 "http://mail.lab.arpa/sso/login" "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36"
front_1 | 192.168.222.100 - - [12/Nov/2022:16:39:08 +0000] "GET /static/fa-solid-900.woff2 HTTP/1.1" 304 0 "http://mail.lab.arpa/static/vendor.css" "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36"
```
| Try a password without ! or try enclosing it in quotes
Initial password was much simpler but I used an example generated from chrome fake filler extension for the purposes of this issue (I also did try it with the password above before posting - however it wasn't working before that, with a password that basically amounts to `abdc1234`)
Try with ``SESSION_COOKIE_SECURE=False`` ... you're using notls and that's why it's required.
> Try with ``SESSION_COOKIE_SECURE=False`` ... you're using notls and that's why it's required.
Thanks. I'll give it a try. Local test environment so I figured I'd use notls just to try and keep things simple.
After all, I was setting this up to test an integration with the config cli, not transport later security.
Please consider https://github.com/Mailu/Mailu/pull/2464 if you're trying to integrate it somewhere.
Did setting SESSION_COOKIE_SECURE fix the "can't login" problem? | 2022-11-14T14:12:54 |
|
Mailu/Mailu | 2,542 | Mailu__Mailu-2542 | [
"2183"
] | 68bb8da2b7dbc8112366684168413546f9881f25 | diff --git a/core/admin/mailu/configuration.py b/core/admin/mailu/configuration.py
--- a/core/admin/mailu/configuration.py
+++ b/core/admin/mailu/configuration.py
@@ -80,6 +80,9 @@
'TLS_PERMISSIVE': True,
'TZ': 'Etc/UTC',
'DEFAULT_SPAM_THRESHOLD': 80,
+ 'PROXY_AUTH_WHITELIST': '',
+ 'PROXY_AUTH_HEADER': 'X-Auth-Email',
+ 'PROXY_AUTH_CREATE': False,
# Host settings
'HOST_IMAP': 'imap',
'HOST_LMTP': 'imap:2525',
@@ -171,6 +174,7 @@ def init_app(self, app):
self.config['HOSTNAMES'] = ','.join(hostnames)
self.config['HOSTNAME'] = hostnames[0]
self.config['DEFAULT_SPAM_THRESHOLD'] = int(self.config['DEFAULT_SPAM_THRESHOLD'])
+ self.config['PROXY_AUTH_WHITELIST'] = set(ipaddress.ip_network(cidr, False) for cidr in (cidr.strip() for cidr in self.config['PROXY_AUTH_WHITELIST'].split(',')) if cidr)
# update the app config
app.config.update(self.config)
diff --git a/core/admin/mailu/sso/views/base.py b/core/admin/mailu/sso/views/base.py
--- a/core/admin/mailu/sso/views/base.py
+++ b/core/admin/mailu/sso/views/base.py
@@ -6,6 +6,8 @@
from flask import current_app as app
import flask
import flask_login
+import secrets
+import ipaddress
@sso.route('/login', methods=['GET', 'POST'])
def login():
@@ -57,3 +59,41 @@ def logout():
flask.session.destroy()
return flask.redirect(flask.url_for('.login'))
+
[email protected]('/proxy', methods=['GET'])
[email protected]('/proxy/<target>', methods=['GET'])
+def proxy(target='webmail'):
+ ip = ipaddress.ip_address(flask.request.remote_addr)
+ if not any(ip in cidr for cidr in app.config['PROXY_AUTH_WHITELIST']):
+ return flask.abort(500, '%s is not on PROXY_AUTH_WHITELIST' % flask.request.remote_addr)
+
+ email = flask.request.headers.get(app.config['PROXY_AUTH_HEADER'])
+ if not email:
+ return flask.abort(500, 'No %s header' % app.config['PROXY_AUTH_HEADER'])
+
+ user = models.User.get(email)
+ if user:
+ flask.session.regenerate()
+ flask_login.login_user(user)
+ return flask.redirect(app.config['WEB_ADMIN'] if target=='admin' else app.config['WEB_WEBMAIL'])
+
+ if not app.config['PROXY_AUTH_CREATE']:
+ return flask.abort(500, 'You don\'t exist. Go away! (%s)' % email)
+
+ client_ip = flask.request.headers.get('X-Real-IP', flask.request.remote_addr)
+ try:
+ localpart, desireddomain = email.rsplit('@')
+ except Exception as e:
+ flask.current_app.logger.error('Error creating a new user via proxy for %s from %s: %s' % (email, client_ip, str(e)), e)
+ return flask.abort(500, 'You don\'t exist. Go away! (%s)' % email)
+ domain = models.Domain.query.get(desireddomain) or flask.abort(500, 'You don\'t exist. Go away! (domain=%s)' % desireddomain)
+ if not domain.max_users == -1 and len(domain.users) >= domain.max_users:
+ flask.current_app.logger.warning('Too many users for domain %s' % domain)
+ return flask.abort(500, 'Too many users in (domain=%s)' % domain)
+ user = models.User(localpart=localpart, domain=domain)
+ user.set_password(secrets.token_urlsafe())
+ models.db.session.add(user)
+ models.db.session.commit()
+ user.send_welcome()
+ flask.current_app.logger.info(f'Login succeeded by proxy created user: {user} from {client_ip} through {flask.request.remote_addr}.')
+ return flask.redirect(app.config['WEB_ADMIN'] if target=='admin' else app.config['WEB_WEBMAIL'])
| resolve #1972 authentication using external proxy
## What type of PR?
enhancement
## What does this PR do?
This PR enables header authentication on frontends. A proxy sends the username in a header (X-Auth-Email) to the backend server, which logs in the user without requesting a password or login action.
Therefore 2 Headers need to be set by proxy:
X-Auth-Proxy-Secret
X-Auth-Email
I m using openresty so the lines would look like:
```
ngx.req.set_header("X-Auth-Proxy-Secret", "aa0111f948a1cb2a947137e56922cb68")
ngx.req.set_header("X-Auth-Email", res.id_token.email)
ngx.req.set_header("X-Auth-Proxy-Target", "Admin") # optional
```
The third header is optional, its possible to choose the target after successful proxy login. Start at admin interface or webmail.
You need to add to values in mailu.env
```
PROXY_SECRET=aa0111f948a1cb2a947137e56922cb68
PROXY_CREATE=True
```
The secret is the same as used by proxy. It substitutes user credentials and has to be kept secret.
With the second value you can decide what happens if the user is not found. User can be created automatically if it does not exist (The username must specify a user form a domain used by mailu instance)
Now there is a new route `/sso/proxy`. If called and the proxy credentials are correct the user gets logged in. Its possible to set `WEBROOT_REDIRECT=/sso/proxy` to enable auto login.
Thats it.
### Related issue(s)
- Auto close an issue like: closes #1972
## Prerequisites
Before we can consider review and merge, please make sure the following list is done and checked.
If an entry in not applicable, you can check it or remove it from the list.
- [ ] In case of feature or enhancement: documentation updated accordingly
- [ ] Unless it's docs or a minor change: add [changelog](https://mailu.io/master/contributors/workflow.html#changelog) entry file.
| Thanks for submitting this pull request.
Bors-ng will now build test images. When it succeeds, we will continue to review and test your PR.
bors try
Note: if this build fails, [read this](http://mailu.io/master/contributors/environment.html#when-bors-try-fails).
## try
Build succeeded:
* [CI-Done](https://github.com/Mailu/Mailu/runs/4908279827?check_suite_focus=true)
Thank you for your contribution.
I am afraid that this isn't mergeable as-is; There are a few things that would deserve to be re-considered:
- GET-only authentication: is this something we want? The usual way of doing this is to form-fill (force a POST maybe with a dynamically generated temporary token) and redirect in all cases. Even if we do decide that we are OK with GET-only, I strongly suggest that we do not duplicate the code and redirect to the login form if login through headers failed.
- rate-limiting: does it make sense in this context? What about device cookies?
- configuration: should ``REAL_IP_FROM`` be used to apply additional restrictions on where the route is allowed from? Should the header names be configurable?
- security: PROXY_SECRET should enforce at least a specific length (to prevent users from shooting themselves in the foot)
> GET-only authentication: is this something we want?
Dunno, it doesnt matter in this context, cause the method is not used to submit any data. (no vars used from post or get, so no security risk at all)
All data is extracted from header. One method has to be choosen and get is the default method, even when none is explicit specified.
>Should the header names be configurable?
Not necessary in my opinion. Makes it more complicated at no gain.
The proxy auth is okay for some kinds of setups.
Personally i would prefer OpenID, seems not too hard to implement with mailu stack...
> > GET-only authentication: is this something we want?
>
> Dunno, it doesnt matter in this context, cause the method is not used to submit any data. (no vars used from post or get, so no security risk at all) All data is extracted from header. One method has to be choosen and get is the default method, even when none is explicit specified.
>
It's about ensuring that a crawler going through the proxy doesn't accidentally get logged in... and that "browser tabs" remaining open in browsers don't create a myriad of sessions by accident.
> > > GET-only authentication: is this something we want?
> >
> >
> > Dunno, it doesnt matter in this context, cause the method is not used to submit any data. (no vars used from post or get, so no security risk at all) All data is extracted from header. One method has to be choosen and get is the default method, even when none is explicit specified.
>
> It's about ensuring that a crawler going through the proxy doesn't accidentally get logged in... and that "browser tabs" remaining open in browsers don't create a myriad of sessions by accident.
Shame the creator of this PR didn't respond.
Lets explain and get this back on track:
Header auth should be toggleable with a **very** clear warning and (preferably) a sourceip whitelist.
It's often used with reverse proxies and always requires the header to be block/filtered on the proxy-level.
However:
This should follow standardised best-practices: Just the email/name and or group of the user. Not all sorts of semi-custom headers added on top for a minimal level of added security. because that level of added 'security' means users would need to make all sorts of customisations to their reverse proxy setups that are not common, which will only increase the failure rate and leads to mistakes being made.
> > > > GET-only authentication: is this something we want?
> > >
> > >
> > > Dunno, it doesnt matter in this context, cause the method is not used to submit any data. (no vars used from post or get, so no security risk at all) All data is extracted from header. One method has to be choosen and get is the default method, even when none is explicit specified.
> >
> >
> > It's about ensuring that a crawler going through the proxy doesn't accidentally get logged in... and that "browser tabs" remaining open in browsers don't create a myriad of sessions by accident.
>
> Shame the creator of this PR didn't respond. Lets explain and get this back on track: Header auth should be toggleable with a **very** clear warning and (preferably) a sourceip whitelist.
>
> It's often used with reverse proxies and always requires the header to be block/filtered on the proxy-level.
>
> However: This should follow standardised best-practices: Just the email/name and or group of the user. Not all sorts of semi-custom headers added on top for a minimal level of added security. because that level of added 'security' means users would need to make all sorts of customisations to their reverse proxy setups that are not common, which will only increase the failure rate and leads to mistakes being made.
So a source-ip whitelist is preferable to a shared key as proposed here?
We can definitely drop X-Auth-Proxy-Target and pass it as a parameter instead.
| 2022-11-19T17:01:46 |
|
Mailu/Mailu | 2,543 | Mailu__Mailu-2543 | [
"2231",
"2231"
] | 68bb8da2b7dbc8112366684168413546f9881f25 | diff --git a/core/admin/mailu/models.py b/core/admin/mailu/models.py
--- a/core/admin/mailu/models.py
+++ b/core/admin/mailu/models.py
@@ -2,7 +2,6 @@
"""
import os
-import smtplib
import json
from datetime import date
@@ -420,14 +419,19 @@ def __declare_last__(cls):
def sendmail(self, subject, body):
""" send an email to the address """
- f_addr = f'{app.config["POSTMASTER"]}@{idna.encode(app.config["DOMAIN"]).decode("ascii")}'
- with smtplib.SMTP(app.config['HOST_AUTHSMTP'], port=10025) as smtp:
- to_address = f'{self.localpart}@{idna.encode(self.domain_name).decode("ascii")}'
- msg = text.MIMEText(body)
- msg['Subject'] = subject
- msg['From'] = f_addr
- msg['To'] = to_address
- smtp.sendmail(f_addr, [to_address], msg.as_string())
+ try:
+ f_addr = f'{app.config["POSTMASTER"]}@{idna.encode(app.config["DOMAIN"]).decode("ascii")}'
+ ip, port = app.config['HOST_LMTP'].rsplit(':')
+ with smtplib.LMTP(ip, port=port) as lmtp:
+ to_address = f'{self.localpart}@{idna.encode(self.domain_name).decode("ascii")}'
+ msg = text.MIMEText(body)
+ msg['Subject'] = subject
+ msg['From'] = f_addr
+ msg['To'] = to_address
+ lmtp.sendmail(f_addr, [to_address], msg.as_string())
+ return True
+ except smtplib.SMTPException:
+ return False
@classmethod
def resolve_domain(cls, email):
diff --git a/core/admin/mailu/ui/views/base.py b/core/admin/mailu/ui/views/base.py
--- a/core/admin/mailu/ui/views/base.py
+++ b/core/admin/mailu/ui/views/base.py
@@ -21,8 +21,9 @@ def announcement():
form = forms.AnnouncementForm()
if form.validate_on_submit():
for user in models.User.query.all():
- user.sendmail(form.announcement_subject.data,
- form.announcement_body.data)
+ if not user.sendmail(form.announcement_subject.data,
+ form.announcement_body.data):
+ flask.flash('Failed to send to %s' % user.email, 'error')
# Force-empty the form
form.announcement_subject.data = ''
form.announcement_body.data = ''
| Unable to send an Public Announcement
## Environment & Versions
### Environment
- docker-compose
### Versions
- version 1.9
## Description
My mailu mail server is running as I expected it. Everything is working normally besides one admin function of it.
I am not able to use the Public Announcement. Doing so results in an error 502
I am using the mailu.env and docker-compose I created online for this.
## Replication Steps
I am logged in as administrator (my account has the role of global administrator as well as manager of the domain)
I choose option announcement under Menu Administration, fill in the Announcement- subject and body and select click on send button. After 30 secs I get an "502 Bad Gateway" message from nginx and no mail is being send
I have NO overrides options defined.
## Expected behaviour
I expected a mail to be send to all users of my domain with the context of the subject and body I entered
Instead I get an nginx error 502 page.
## Logs
I have exchanged my client IP adress with aaa.bbb.ccc.ddd and adjusted the domain to example.com in the logs underneath for privacy reasons. Rest is exactly as it is stated in the logs.
mailu-admin-1:
[2022-02-13 09:53:03 +0100] [9] [CRITICAL] WORKER TIMEOUT (pid:10)
[2022-02-13 09:53:04 +0100] [9] [WARNING] Worker with pid 10 was terminated due to signal 9
[2022-02-13 09:53:04 +0100] [29] [INFO] Booting worker with pid: 29
mailu-front-1:
2022/02/13 09:53:04 [error] 8#8: *3 upstream prematurely closed connection while reading response header from upstream, client: aaa.bbb.ccc.ddd, server: , request: "POST /admin/announcement HTTP/2.0", upstream: "http://192.168.203.6:80/admin/announcement", host: "mail.example.com", referrer: "https://mail.example.com/admin/announcement"
mailu-smtp-1:
2022-02-13T09:52:33.536923+01:00 b27dd03ea1f5 postfix/smtpd[371]: connect from mailu-admin-1.mailu_default[192.168.203.6]
2022-02-13T09:52:33.540236+01:00 b27dd03ea1f5 postfix/smtpd[371]: discarding EHLO keywords: PIPELINING
2022-02-13T09:53:04.866990+01:00 b27dd03ea1f5 postfix/smtpd[371]: disconnect from mailu-admin-1.mailu_default[192.168.203.6] ehlo=1 mail=1 quit=1 commands=3
Unable to send an Public Announcement
## Environment & Versions
### Environment
- docker-compose
### Versions
- version 1.9
## Description
My mailu mail server is running as I expected it. Everything is working normally besides one admin function of it.
I am not able to use the Public Announcement. Doing so results in an error 502
I am using the mailu.env and docker-compose I created online for this.
## Replication Steps
I am logged in as administrator (my account has the role of global administrator as well as manager of the domain)
I choose option announcement under Menu Administration, fill in the Announcement- subject and body and select click on send button. After 30 secs I get an "502 Bad Gateway" message from nginx and no mail is being send
I have NO overrides options defined.
## Expected behaviour
I expected a mail to be send to all users of my domain with the context of the subject and body I entered
Instead I get an nginx error 502 page.
## Logs
I have exchanged my client IP adress with aaa.bbb.ccc.ddd and adjusted the domain to example.com in the logs underneath for privacy reasons. Rest is exactly as it is stated in the logs.
mailu-admin-1:
[2022-02-13 09:53:03 +0100] [9] [CRITICAL] WORKER TIMEOUT (pid:10)
[2022-02-13 09:53:04 +0100] [9] [WARNING] Worker with pid 10 was terminated due to signal 9
[2022-02-13 09:53:04 +0100] [29] [INFO] Booting worker with pid: 29
mailu-front-1:
2022/02/13 09:53:04 [error] 8#8: *3 upstream prematurely closed connection while reading response header from upstream, client: aaa.bbb.ccc.ddd, server: , request: "POST /admin/announcement HTTP/2.0", upstream: "http://192.168.203.6:80/admin/announcement", host: "mail.example.com", referrer: "https://mail.example.com/admin/announcement"
mailu-smtp-1:
2022-02-13T09:52:33.536923+01:00 b27dd03ea1f5 postfix/smtpd[371]: connect from mailu-admin-1.mailu_default[192.168.203.6]
2022-02-13T09:52:33.540236+01:00 b27dd03ea1f5 postfix/smtpd[371]: discarding EHLO keywords: PIPELINING
2022-02-13T09:53:04.866990+01:00 b27dd03ea1f5 postfix/smtpd[371]: disconnect from mailu-admin-1.mailu_default[192.168.203.6] ehlo=1 mail=1 quit=1 commands=3
| This happened to me as well recently. I got timeouts too. (but on v1.8) If you have to get it done urgently: try again repeatedly! It sounds counterintuitive, but in this case trying the same thing over and over actually does lead to different outcomes. I don't know why, but I got mine sent.
> This happened to me as well recently. I got timeouts too. (but on v1.8) If you have to get it done urgently: try again repeatedly! It sounds counterintuitive, but in this case trying the same thing over and over actually does lead to different outcomes. I don't know why, but I got mine sent.
Thanks for your feedback. I have tried your advise but did not succeed getting any announcement out to "my users".
I have even tried a newly installed VPS server without any extra firewall rules or other adjustments, but this seems the only part that is not working of mailu for me.
Could it be generic and no one else is using this ? Because I do not read others having this issue (only some old resolved similar issues)
I do hope it can be fixed, because I would like to start using mailu in production
I have pinpointed the start of the problem
It is this line 24 in /app/mailu/ui/views/base.py in mailu-front-1 when it goes wrong
` user.sendmail(form.announcement_subject.data, form.announcement_body.data)`
If I comment it I do not get the 502 message and the expected flask success message, but of course also not the email ;-)
There seems to be something wrong with the function I think although I am no python/flask expert
I hope someone is willing to check/verify and maybe come with a solution.
I would also appreciate to hear if others are able to use this option successfull or is it just me ?
On a fresh 1.9 installation I cannot replicate this. Without reliable replicating steps it will be difficult to look into this.
Could you try again with a small test message? Possibly the issue is tied to the contents of the message.
This is how the SMTP logs look like for a successful attempt. For every user on my test server an email is send
```
smtp_1 | 2022-02-17T15:29:14.439769+00:00 967bafac904d postfix/smtpd[1613]: connect from mailu_admin_1.mailu_default[192.168.203.6]
smtp_1 | 2022-02-17T15:29:14.442145+00:00 967bafac904d postfix/smtpd[1613]: discarding EHLO keywords: PIPELINING
smtp_1 | 2022-02-17T15:29:14.535834+00:00 967bafac904d postfix/smtpd[1613]: 82CA418005E: client=mailu_admin_1.mailu_default[192.168.203.6]
smtp_1 | 2022-02-17T15:29:14.560068+00:00 967bafac904d postfix/cleanup[1616]: 82CA418005E: message-id=<>
smtp_1 | 2022-02-17T15:29:14.589978+00:00 967bafac904d postfix/qmgr[372]: 82CA418005E: from=<[email protected]>, size=314, nrcpt=1 (queue active)
smtp_1 | 2022-02-17T15:29:14.590750+00:00 967bafac904d postfix/smtpd[1613]: disconnect from mailu_admin_1.mailu_default[192.168.203.6] ehlo=1 mail=1 rcpt=1 data=1 quit=1 commands=5
smtp_1 | 2022-02-17T15:29:14.592501+00:00 967bafac904d postfix/smtpd[1613]: connect from mailu_admin_1.mailu_default[192.168.203.6]
smtp_1 | 2022-02-17T15:29:14.595564+00:00 967bafac904d postfix/smtpd[1613]: discarding EHLO keywords: PIPELINING
smtp_1 | 2022-02-17T15:29:14.709425+00:00 967bafac904d postfix/smtpd[1613]: AD2BC18005F: client=mailu_admin_1.mailu_default[192.168.203.6]
smtp_1 | 2022-02-17T15:29:14.748396+00:00 967bafac904d postfix/cleanup[1616]: AD2BC18005F: message-id=<>
smtp_1 | 2022-02-17T15:29:14.762164+00:00 967bafac904d postfix/qmgr[372]: AD2BC18005F: from=<[email protected]>, size=313, nrcpt=1 (queue active)
smtp_1 | 2022-02-17T15:29:14.764959+00:00 967bafac904d postfix/smtpd[1613]: disconnect from mailu_admin_1.mailu_default[192.168.203.6] ehlo=1 mail=1 rcpt=1 data=1 quit=1 commands=5
smtp_1 | 2022-02-17T15:29:14.772441+00:00 967bafac904d postfix/smtpd[1613]: connect from mailu_admin_1.mailu_default[192.168.203.6]
smtp_1 | 2022-02-17T15:29:14.775390+00:00 967bafac904d postfix/smtpd[1613]: discarding EHLO keywords: PIPELINING
smtp_1 | 2022-02-17T15:29:14.789028+00:00 967bafac904d postfix/lmtp[1617]: 82CA418005E: to=<[email protected]>, relay=192.168.203.9[192.168.203.9]:2525, delay=0.31, delays=0.11/0.03/0.05/0.12, dsn=2.0.0, status=sent (250 2.0.0 <[email protected]> eLjxJ8ppDmJtBQAAMm7Psw Saved)
smtp_1 | 2022-02-17T15:29:14.803519+00:00 967bafac904d postfix/qmgr[372]: 82CA418005E: removed
smtp_1 | 2022-02-17T15:29:14.947601+00:00 967bafac904d postfix/smtpd[1613]: E705418005E: client=mailu_admin_1.mailu_default[192.168.203.6]
smtp_1 | 2022-02-17T15:29:14.965108+00:00 967bafac904d postfix/lmtp[1617]: AD2BC18005F: to=<[email protected]>, relay=192.168.203.9[192.168.203.9]:2525, delay=0.37, delays=0.17/0.04/0.01/0.15, dsn=2.0.0, status=sent (250 2.0.0 <[email protected]> YAx8MMppDmJtBQAAMm7Psw Saved)
smtp_1 | 2022-02-17T15:29:14.965600+00:00 967bafac904d postfix/qmgr[372]: AD2BC18005F: removed
smtp_1 | 2022-02-17T15:29:14.988799+00:00 967bafac904d postfix/cleanup[1616]: E705418005E: message-id=<>
smtp_1 | 2022-02-17T15:29:15.010396+00:00 967bafac904d postfix/qmgr[372]: E705418005E: from=<[email protected]>, size=314, nrcpt=1 (queue active)
smtp_1 | 2022-02-17T15:29:15.010783+00:00 967bafac904d postfix/smtpd[1613]: disconnect from mailu_admin_1.mailu_default[192.168.203.6] ehlo=1 mail=1 rcpt=1 data=1 quit=1 commands=5
smtp_1 | 2022-02-17T15:29:15.159711+00:00 967bafac904d postfix/lmtp[1618]: E705418005E: to=<[email protected]>, relay=192.168.203.9[192.168.203.9]:2525, delay=0.38, delays=0.23/0.05/0.01/0.08, dsn=2.0.0, status=sent (250 2.0.0 <[email protected]> ZXOoBMtpDmJxBQAAMm7Psw Saved)
smtp_1 | 2022-02-17T15:29:15.160152+00:00 967bafac904d postfix/qmgr[372]: E705418005E: removed
```
This is the relevant code that is used for sending the emails to every user:
https://github.com/Mailu/Mailu/blob/b73963aae5177770c64c8e04be7c11b00317f0a5/core/admin/mailu/models.py#L413
```
def sendmail(self, subject, body):
""" send an email to the address """
f_addr = f'{app.config["POSTMASTER"]}@{idna.encode(app.config["DOMAIN"]).decode("ascii")}'
with smtplib.SMTP(app.config['HOST_AUTHSMTP'], port=10025) as smtp:
to_address = f'{self.localpart}@{idna.encode(self.domain_name).decode("ascii")}'
msg = text.MIMEText(body)
msg['Subject'] = subject
msg['From'] = f_addr
msg['To'] = to_address
smtp.sendmail(f_addr, [to_address], msg.as_string())
```
The form code https://github.com/Mailu/Mailu/blob/b73963aae5177770c64c8e04be7c11b00317f0a5/core/admin/mailu/ui/views/base.py#L18
```
@ui.route('/announcement', methods=['GET', 'POST'])
@access.global_admin
def announcement():
form = forms.AnnouncementForm()
if form.validate_on_submit():
for user in models.User.query.all():
user.sendmail(form.announcement_subject.data,
form.announcement_body.data)
# Force-empty the form
form.announcement_subject.data = ''
form.announcement_body.data = ''
flask.flash('Your announcement was sent', 'success')
return flask.render_template('announcement.html', form=form)
```
In your log we can see that a connection is made to smtp. But for some reason the email is not send. There appears to be some kind of timeout after 30 seconds. Sending the email should take less than a second.
> But for some reason the email is not send. There appears to be some kind of timeout after 30 seconds.
100% exact situation as with my post. It all looked just right, but some random timeout appeared. But I'm telling you - I knew that it worked, as I used the feature before, so I kept retrying every once in a while. And it worked. That might help debug the issue. I also researched bugs in the upstream projects and found some vaguely related stuff, but from many years ago.
> Sending the email should take less than a second.
Yeah, when it works it works in a second and makes you wonder what the fuss is about :)
As requested I tried again, but same result. My subject and body only consisted out of 3 characters so real small
`2022-02-20T10:59:17.712468+01:00 b3d7b3d95e0e postfix/smtpd[13439]: connect from mailu-admin-1.mailu_default[192.168.203.6]
2022-02-20T10:59:17.715164+01:00 b3d7b3d95e0e postfix/smtpd[13439]: discarding EHLO keywords: PIPELINING
INFO:root:Connect
DEBUG:root:Received bytearray(b'transport *')
DEBUG:root:Request transport/*
_DEBUG:root:Table get *_
DEBUG:root:Replying b'NOTFOUND '
DEBUG:root:Received bytearray(b'transport *')
DEBUG:root:Request transport/*
DEBUG:root:Table get *
DEBUG:root:Replying b'NOTFOUND '
DEBUG:root:Received bytearray(b'domain example.com')
DEBUG:root:Request domain/example.com
DEBUG:root:Table get example.com
DEBUG:root:Table get example.com is example.com
DEBUG:root:Replying b'OK example.com'
DEBUG:root:Received bytearray(b'transport example.com')
DEBUG:root:Request transport/example.com
DEBUG:root:Table get example.com
DEBUG:root:Replying b'NOTFOUND '
DEBUG:root:Received bytearray(b'transport [email protected]')
DEBUG:root:Request transport/[email protected]
DEBUG:root:Table get [email protected]
DEBUG:root:Replying b'NOTFOUND '
DEBUG:root:Received bytearray(b'transport *')
DEBUG:root:Request transport/*
DEBUG:root:Table get *
DEBUG:root:Replying b'NOTFOUND '
2022-02-20T10:59:49.216014+01:00 b3d7b3d95e0e postfix/smtpd[13439]: disconnect from mailu-admin-1.mailu_default[192.168.203.6] ehlo=1 mail=1 quit=1 commands=3`
The timeout (and so the wait after pressing send) occurs after the 5 line of this log (so after DEBUG:root:Table get *)
When that timeout has passed the 502 error is displayed (and the other lines logged)
I'm just a user myself FYI. And by trying again, I meant hammering it. I had my text on copy paste and tried at least two dozen times at varying hours. I have no clue how to debug this, I just said that in case someone needs to get something out urgently. I also checked the GH issues first when it happened to me and was lost, because the mailu team is overwhelmed already and no quick fix was in sight. I tried taking the containers down and up, taking other applications down temporarily to see if it was somehow exploding memory, but that made no difference. At random times it works fine.
At least mail itself worked continuously. Before the v1.8 I had a nasty bug that made me almost quit mailu (a minor update completely broke everything, and there was no way to go back one release) and it being understaffed meant all panic and no solution.
So if anyone finds this via search: hammer it. Or get all the mails via SQL from the `./data/main.db` and send normally via Thunderbird instead. Although I don't know how many mails at once TB/the stack can effectively handle, TB is a mess to begin with and wasn't meant for mass-mailing. If you do this a lot and have hundreds of users you might just want to make a little py script for now that sends them one after another.
Does the postmaster account exist?
The local part comes from POSTMASTER in mailu.env.
The domain part is from DOMAIN in mailu.env.
> Does the postmaster account exist? The local part comes from POSTMASTER in mailu.env. The domain part is from DOMAIN in mailu.env.
Yes it does. it is set to admin in mailu.env and that is also the account I log in with for sending this. I have even tried defining the alias of postmaster for this account, but also that does not resolve it unfortunately.
I did find out though that sending a welcome mail (setting it to through in mailu.env) also fails. And yet sending and receiving mail as a normal users works....
Like this:
SUBNET=192.168.203.0/24
DOMAIN=example.com
HOSTNAMES=mail.example.com
POSTMASTER=admin
TLS_FLAVOR=letsencrypt
I do wonder if it might have to do with the /etc/postfix/master.cf file
Looking at the code it seems the smtp on port 10025 is being used.
Can someone determine if that is perhaps blocking it ? Regular smtp works fine
> bash-5.1# cat /etc/postfix/master.cf
# service type private unpriv chroot wakeup maxproc command + args
# (yes) (yes) (yes) (never) (100)
# Exposed SMTP service
smtp inet n - n - - smtpd
# Internal SMTP service
10025 inet n - n - - smtpd
-o smtpd_sasl_auth_enable=yes
-o smtpd_discard_ehlo_keywords=pipelining
-o smtpd_client_restrictions=$check_ratelimit,reject_unlisted_sender,reject_authenticated_sender_login_mismatch,permit
-o smtpd_reject_unlisted_recipient=no
-o cleanup_service_name=outclean
outclean unix n - n - 0 cleanup
-o header_checks=pcre:/etc/postfix/outclean_header_filter.cf
-o nested_header_checks=
Some more detailed log from smtpd on port 10025 from the moment I try to send a public announcement you can find in the file here:
[detailed log.txt](https://github.com/Mailu/Mailu/files/8103785/detailed.log.txt)
> Does the postmaster account exist? The local part comes from POSTMASTER in mailu.env. The domain part is from DOMAIN in mailu.env.
Hi Dimitri,
Could you please have another look at this ? I provided a lot more detailed information ...
In a similar fashion, today I've ran into the much dreaded 'auth failed' bug from more than a year ago again.
It magically works again after a while, just like the announcements do for us, but this cannot be a permanent state.
As usual the logs show nothing useful:
```
front_1 | 2022/03/05 15:29:33 [error] 13#13: *590 recv() failed (111: Connection refused) while reading response from upstream, client: 1.2.3.4, server: 0.0.0.0:465, login: "[email protected]", upstream: 192.168.203.8:10025
front_1 | 2022/03/05 15:29:33 [crit] 13#13: *590 SSL_shutdown() failed (SSL: error:14094123:SSL routines:ssl3_read_bytes:application data after close notify) while reading response from upstream, client: 1.2.3.4, server: 0.0.0.0:465, login: "[email protected]", upstream: 192.168.203.8:10025
```
I'm posting in this thread because whatever causes this random behaviour must be linked. Last time the update to 1.8 fixed it. I hate seeing this yet again..
We need reliability, and as much as I like the simplicity of mailu compared to the rest, not being able to debug these issues in a timely fashion eventually will force my hand to move to something else. Please prioritize at least critical issues like 'random' behaviour.
For the record, this is v1.8. I will try updating and hope it fixes it again like last time..
Edit: It did just like last time. 'Last time' turns out was only half a year ago.
I confirm announcements don't work for me, too.
My setup is Mailu 1.9 / docker-compose / letsencrypt flavor / no overrides for now, and the issue is exactly the one described by @metlinux.
I don't know if it is related, I also get an error 502 when I use the admin web interface to create a new user, although in this case the user is effectively created, so it's not really a problem.
EDIT: it IS related, since the "welcome" message (set to true in mailu.env) never reaches the new user's box.
Issues not for bugs, enhancement requests or discussion go stale after 21 days of inactivity. This issue will be automatically closed after 14 days.
For all metrics refer to the [stale.yml file](https://github.com/Mailu/Mailu/blob/master/.github/stale.yml).
Github issues are not meant for user support. For **user-support questions**, reach out on the [matrix support channel](https://matrix.to/#/#mailu:tedomum.net).
Mark the issue as fresh by simply adding a comment to the issue.
If this issue is safe to close, please do so now.
The message is rejected by rspamd with `forced: soft reject "Try again later"; score=nan (set by greylist)`
Maybe the private IP of the admin conatiner needs to be skipped in rspamd:
```
(default: F (soft reject): [11.40/15.00] [HFILTER_HELO_BADIP(4.50){192.168.203.6;1;},
VIOLATED_DIRECT_SPF(3.50){},MISSING_MID(2.50){},MISSING_DATE(1.00){},MIME_GOOD(-0.10){text/plain;},
ARC_NA(0.00){},ARC_SIGNED(0.00){xx.yy:s=dkim:i=1;},DMARC_NA(0.00){aa.bb;},FROM_EQ_ENVFROM(0.00){},
FROM_NO_DN(0.00){},GREYLIST(0.00){greylisted;Mon, 04 Apr 2022 09:24:05 GMT;new record;},
MIME_TRACE(0.00){0:+;},RCPT_COUNT_ONE(0.00){1;},RCVD_COUNT_ZERO(0.00){0;},R_DKIM_NA(0.00){},
R_SPF_SOFTFAIL(0.00){~all:c;},TO_DN_NONE(0.00){},TO_MATCH_ENVRCPT_ALL(0.00){}])
```
The second part of the problem is not handling smtplib exceptions (smtplib.SMTPDataError) in models.py (line 425):
```
smtp.sendmail(f_addr, [to_address], msg.as_string())
=> smtplib.SMTPDataError: (451, b'4.7.1 Try again later')
```
What is odd, is that the SUBNET (192.168.203.0/24) setting (from mailu.env) should have been configured as ``local_networks = [192.168.203.0/24];`` in options.inc of rspamd.
This results in it being added as local_addrs in the rspamd config.
[This setting](https://rspamd.com/doc/configuration/options.html) configures; ``map or list of IP networks used as local, so certain checks are skipped for them (e.g. SPF checks)``.
Yet in the log excerpt we see that SPF check does take place. Rspamd ignores the setting? Or perhaps the subnet is incorrectly configured? SUBNET in mailu.env should match the subnet configured in docker-compose.yml.
If SUBNET in mailu.env matches the subnet in the docker-compose.yml file, then it should not be rejected. No SPF test takes place.
The settings of rspamd can be checked by first going into a shell and then running rspamadm configdump:
```
docker-compose exec antispam bash
rspamadm configdump | less
/options
```
In the options you should see the local_addr section with the whitelisted ips
```
options {
check_all_filters = true;
...
local_addrs [
"192.168.0.0/16",
"10.0.0.0/8",
"172.16.0.0/12",
"fd00::/8",
"169.254.0.0/16",
"fe80::/10",
"127.2.4.7",
]
```
"192.168.0.0/16", is my SUBNET setting.
As a test,
1. I added google.com as domain in the Admin WebUI.
2. Added an administrator user for the domain google.com.
3. Changed DOMAIN in mailu.env to google.com. (DOMAIN is used as sender for sending public announcements).
4. Send a public announcement. If SPF is checked, then rspamd will not allow this.
5. Result, email is send.
```
rspamd_task_write_log: id: <undef>, qid: <9CFC9C0414>, ip: 192.168.203.7, from: <[email protected]>, (default: F (no action): [3.40/15.00] [MISSING_MID(2.50){},MISSING_DATE(1.00){},MIME_GOOD(-0.10){text/plain;},ARC_NA(0.00){},FROM_EQ_ENVFROM(0.00){},FROM_NO_DN(0.00){},MIME_TRACE(0.00){0:+;},RCPT_COUNT_ONE(0.00){1;},RCVD_COUNT_ZERO(0.00){0;},TO_DN_NONE(0.00){},TO_MATCH_ENVRCPT_ALL(0.00){}]), len: 253, time: 63.330ms, dns req: 1, digest: <22bfa85cfd7bdb692033454ae2b44ddb>, rcpts: <[email protected]>, mime_rcpts: <[email protected]>
proxy; rspamd_protocol_http_reply: regexp statistics: 41 pcre regexps scanned, 0 regexps matched, 172 regexps total, 11 regexps cached, 1.03KiB scanned using pcre, 1.03KiB scanned total
proxy; proxy_milter_finish_handler: finished milter connection
proxy; proxy_accept_socket: accepted milter connection from 192.168.203.9 port 54560
milter; rspamd_milter_process_command: got connection from 192.168.203.7:33374
proxy; rspamd_message_parse: loaded message; id: <undef>; queue-id: <D17B5C0451>; size: 253; checksum: <22bfa85cfd7bdb692033454ae2b44ddb>
proxy; rspamd_mime_part_detect_language: detected part language: vi
lua; greylist.lua:217: skip greylisting for local networks and/or authorized users
proxy; dkim_symbol_callback: skip DKIM checks for local networks and authorized users
lua; spf.lua:186: skip SPF checks for local networks and authorized users
lua; dmarc.lua:349: skip DMARC checks as either SPF or DKIM were not checked
lua; once_received.lua:99: Skipping once_received for authenticated user or local network
proxy; rspamd_redis_connected: skip obtaining bayes tokens for BAYES_HAM of classifier bayes: not enough learns 0; 200 required
proxy; rspamd_redis_connected: skip obtaining bayes tokens for BAYES_SPAM of classifier bayes: not enough learns 33; 200 required
proxy; rspamd_stat_classifiers_process: skip statistics as SPAM class is missing
lua; greylist.lua:331: Score too low - skip greylisting
```
Note the:
```
lua; spf.lua:186: skip SPF checks for local networks and authorized users
lua; dmarc.lua:349: skip DMARC checks as either SPF or DKIM were not checked
lua; once_received.lua:99: Skipping once_received for authenticated user or local network
```
IMHO we should do LMTP delivery to the inboxes here, not fuss around with the filters.
This happened to me as well recently. I got timeouts too. (but on v1.8) If you have to get it done urgently: try again repeatedly! It sounds counterintuitive, but in this case trying the same thing over and over actually does lead to different outcomes. I don't know why, but I got mine sent.
> This happened to me as well recently. I got timeouts too. (but on v1.8) If you have to get it done urgently: try again repeatedly! It sounds counterintuitive, but in this case trying the same thing over and over actually does lead to different outcomes. I don't know why, but I got mine sent.
Thanks for your feedback. I have tried your advise but did not succeed getting any announcement out to "my users".
I have even tried a newly installed VPS server without any extra firewall rules or other adjustments, but this seems the only part that is not working of mailu for me.
Could it be generic and no one else is using this ? Because I do not read others having this issue (only some old resolved similar issues)
I do hope it can be fixed, because I would like to start using mailu in production
I have pinpointed the start of the problem
It is this line 24 in /app/mailu/ui/views/base.py in mailu-front-1 when it goes wrong
` user.sendmail(form.announcement_subject.data, form.announcement_body.data)`
If I comment it I do not get the 502 message and the expected flask success message, but of course also not the email ;-)
There seems to be something wrong with the function I think although I am no python/flask expert
I hope someone is willing to check/verify and maybe come with a solution.
I would also appreciate to hear if others are able to use this option successfull or is it just me ?
On a fresh 1.9 installation I cannot replicate this. Without reliable replicating steps it will be difficult to look into this.
Could you try again with a small test message? Possibly the issue is tied to the contents of the message.
This is how the SMTP logs look like for a successful attempt. For every user on my test server an email is send
```
smtp_1 | 2022-02-17T15:29:14.439769+00:00 967bafac904d postfix/smtpd[1613]: connect from mailu_admin_1.mailu_default[192.168.203.6]
smtp_1 | 2022-02-17T15:29:14.442145+00:00 967bafac904d postfix/smtpd[1613]: discarding EHLO keywords: PIPELINING
smtp_1 | 2022-02-17T15:29:14.535834+00:00 967bafac904d postfix/smtpd[1613]: 82CA418005E: client=mailu_admin_1.mailu_default[192.168.203.6]
smtp_1 | 2022-02-17T15:29:14.560068+00:00 967bafac904d postfix/cleanup[1616]: 82CA418005E: message-id=<>
smtp_1 | 2022-02-17T15:29:14.589978+00:00 967bafac904d postfix/qmgr[372]: 82CA418005E: from=<[email protected]>, size=314, nrcpt=1 (queue active)
smtp_1 | 2022-02-17T15:29:14.590750+00:00 967bafac904d postfix/smtpd[1613]: disconnect from mailu_admin_1.mailu_default[192.168.203.6] ehlo=1 mail=1 rcpt=1 data=1 quit=1 commands=5
smtp_1 | 2022-02-17T15:29:14.592501+00:00 967bafac904d postfix/smtpd[1613]: connect from mailu_admin_1.mailu_default[192.168.203.6]
smtp_1 | 2022-02-17T15:29:14.595564+00:00 967bafac904d postfix/smtpd[1613]: discarding EHLO keywords: PIPELINING
smtp_1 | 2022-02-17T15:29:14.709425+00:00 967bafac904d postfix/smtpd[1613]: AD2BC18005F: client=mailu_admin_1.mailu_default[192.168.203.6]
smtp_1 | 2022-02-17T15:29:14.748396+00:00 967bafac904d postfix/cleanup[1616]: AD2BC18005F: message-id=<>
smtp_1 | 2022-02-17T15:29:14.762164+00:00 967bafac904d postfix/qmgr[372]: AD2BC18005F: from=<[email protected]>, size=313, nrcpt=1 (queue active)
smtp_1 | 2022-02-17T15:29:14.764959+00:00 967bafac904d postfix/smtpd[1613]: disconnect from mailu_admin_1.mailu_default[192.168.203.6] ehlo=1 mail=1 rcpt=1 data=1 quit=1 commands=5
smtp_1 | 2022-02-17T15:29:14.772441+00:00 967bafac904d postfix/smtpd[1613]: connect from mailu_admin_1.mailu_default[192.168.203.6]
smtp_1 | 2022-02-17T15:29:14.775390+00:00 967bafac904d postfix/smtpd[1613]: discarding EHLO keywords: PIPELINING
smtp_1 | 2022-02-17T15:29:14.789028+00:00 967bafac904d postfix/lmtp[1617]: 82CA418005E: to=<[email protected]>, relay=192.168.203.9[192.168.203.9]:2525, delay=0.31, delays=0.11/0.03/0.05/0.12, dsn=2.0.0, status=sent (250 2.0.0 <[email protected]> eLjxJ8ppDmJtBQAAMm7Psw Saved)
smtp_1 | 2022-02-17T15:29:14.803519+00:00 967bafac904d postfix/qmgr[372]: 82CA418005E: removed
smtp_1 | 2022-02-17T15:29:14.947601+00:00 967bafac904d postfix/smtpd[1613]: E705418005E: client=mailu_admin_1.mailu_default[192.168.203.6]
smtp_1 | 2022-02-17T15:29:14.965108+00:00 967bafac904d postfix/lmtp[1617]: AD2BC18005F: to=<[email protected]>, relay=192.168.203.9[192.168.203.9]:2525, delay=0.37, delays=0.17/0.04/0.01/0.15, dsn=2.0.0, status=sent (250 2.0.0 <[email protected]> YAx8MMppDmJtBQAAMm7Psw Saved)
smtp_1 | 2022-02-17T15:29:14.965600+00:00 967bafac904d postfix/qmgr[372]: AD2BC18005F: removed
smtp_1 | 2022-02-17T15:29:14.988799+00:00 967bafac904d postfix/cleanup[1616]: E705418005E: message-id=<>
smtp_1 | 2022-02-17T15:29:15.010396+00:00 967bafac904d postfix/qmgr[372]: E705418005E: from=<[email protected]>, size=314, nrcpt=1 (queue active)
smtp_1 | 2022-02-17T15:29:15.010783+00:00 967bafac904d postfix/smtpd[1613]: disconnect from mailu_admin_1.mailu_default[192.168.203.6] ehlo=1 mail=1 rcpt=1 data=1 quit=1 commands=5
smtp_1 | 2022-02-17T15:29:15.159711+00:00 967bafac904d postfix/lmtp[1618]: E705418005E: to=<[email protected]>, relay=192.168.203.9[192.168.203.9]:2525, delay=0.38, delays=0.23/0.05/0.01/0.08, dsn=2.0.0, status=sent (250 2.0.0 <[email protected]> ZXOoBMtpDmJxBQAAMm7Psw Saved)
smtp_1 | 2022-02-17T15:29:15.160152+00:00 967bafac904d postfix/qmgr[372]: E705418005E: removed
```
This is the relevant code that is used for sending the emails to every user:
https://github.com/Mailu/Mailu/blob/b73963aae5177770c64c8e04be7c11b00317f0a5/core/admin/mailu/models.py#L413
```
def sendmail(self, subject, body):
""" send an email to the address """
f_addr = f'{app.config["POSTMASTER"]}@{idna.encode(app.config["DOMAIN"]).decode("ascii")}'
with smtplib.SMTP(app.config['HOST_AUTHSMTP'], port=10025) as smtp:
to_address = f'{self.localpart}@{idna.encode(self.domain_name).decode("ascii")}'
msg = text.MIMEText(body)
msg['Subject'] = subject
msg['From'] = f_addr
msg['To'] = to_address
smtp.sendmail(f_addr, [to_address], msg.as_string())
```
The form code https://github.com/Mailu/Mailu/blob/b73963aae5177770c64c8e04be7c11b00317f0a5/core/admin/mailu/ui/views/base.py#L18
```
@ui.route('/announcement', methods=['GET', 'POST'])
@access.global_admin
def announcement():
form = forms.AnnouncementForm()
if form.validate_on_submit():
for user in models.User.query.all():
user.sendmail(form.announcement_subject.data,
form.announcement_body.data)
# Force-empty the form
form.announcement_subject.data = ''
form.announcement_body.data = ''
flask.flash('Your announcement was sent', 'success')
return flask.render_template('announcement.html', form=form)
```
In your log we can see that a connection is made to smtp. But for some reason the email is not send. There appears to be some kind of timeout after 30 seconds. Sending the email should take less than a second.
> But for some reason the email is not send. There appears to be some kind of timeout after 30 seconds.
100% exact situation as with my post. It all looked just right, but some random timeout appeared. But I'm telling you - I knew that it worked, as I used the feature before, so I kept retrying every once in a while. And it worked. That might help debug the issue. I also researched bugs in the upstream projects and found some vaguely related stuff, but from many years ago.
> Sending the email should take less than a second.
Yeah, when it works it works in a second and makes you wonder what the fuss is about :)
As requested I tried again, but same result. My subject and body only consisted out of 3 characters so real small
`2022-02-20T10:59:17.712468+01:00 b3d7b3d95e0e postfix/smtpd[13439]: connect from mailu-admin-1.mailu_default[192.168.203.6]
2022-02-20T10:59:17.715164+01:00 b3d7b3d95e0e postfix/smtpd[13439]: discarding EHLO keywords: PIPELINING
INFO:root:Connect
DEBUG:root:Received bytearray(b'transport *')
DEBUG:root:Request transport/*
_DEBUG:root:Table get *_
DEBUG:root:Replying b'NOTFOUND '
DEBUG:root:Received bytearray(b'transport *')
DEBUG:root:Request transport/*
DEBUG:root:Table get *
DEBUG:root:Replying b'NOTFOUND '
DEBUG:root:Received bytearray(b'domain example.com')
DEBUG:root:Request domain/example.com
DEBUG:root:Table get example.com
DEBUG:root:Table get example.com is example.com
DEBUG:root:Replying b'OK example.com'
DEBUG:root:Received bytearray(b'transport example.com')
DEBUG:root:Request transport/example.com
DEBUG:root:Table get example.com
DEBUG:root:Replying b'NOTFOUND '
DEBUG:root:Received bytearray(b'transport [email protected]')
DEBUG:root:Request transport/[email protected]
DEBUG:root:Table get [email protected]
DEBUG:root:Replying b'NOTFOUND '
DEBUG:root:Received bytearray(b'transport *')
DEBUG:root:Request transport/*
DEBUG:root:Table get *
DEBUG:root:Replying b'NOTFOUND '
2022-02-20T10:59:49.216014+01:00 b3d7b3d95e0e postfix/smtpd[13439]: disconnect from mailu-admin-1.mailu_default[192.168.203.6] ehlo=1 mail=1 quit=1 commands=3`
The timeout (and so the wait after pressing send) occurs after the 5 line of this log (so after DEBUG:root:Table get *)
When that timeout has passed the 502 error is displayed (and the other lines logged)
I'm just a user myself FYI. And by trying again, I meant hammering it. I had my text on copy paste and tried at least two dozen times at varying hours. I have no clue how to debug this, I just said that in case someone needs to get something out urgently. I also checked the GH issues first when it happened to me and was lost, because the mailu team is overwhelmed already and no quick fix was in sight. I tried taking the containers down and up, taking other applications down temporarily to see if it was somehow exploding memory, but that made no difference. At random times it works fine.
At least mail itself worked continuously. Before the v1.8 I had a nasty bug that made me almost quit mailu (a minor update completely broke everything, and there was no way to go back one release) and it being understaffed meant all panic and no solution.
So if anyone finds this via search: hammer it. Or get all the mails via SQL from the `./data/main.db` and send normally via Thunderbird instead. Although I don't know how many mails at once TB/the stack can effectively handle, TB is a mess to begin with and wasn't meant for mass-mailing. If you do this a lot and have hundreds of users you might just want to make a little py script for now that sends them one after another.
Does the postmaster account exist?
The local part comes from POSTMASTER in mailu.env.
The domain part is from DOMAIN in mailu.env.
> Does the postmaster account exist? The local part comes from POSTMASTER in mailu.env. The domain part is from DOMAIN in mailu.env.
Yes it does. it is set to admin in mailu.env and that is also the account I log in with for sending this. I have even tried defining the alias of postmaster for this account, but also that does not resolve it unfortunately.
I did find out though that sending a welcome mail (setting it to through in mailu.env) also fails. And yet sending and receiving mail as a normal users works....
Like this:
SUBNET=192.168.203.0/24
DOMAIN=example.com
HOSTNAMES=mail.example.com
POSTMASTER=admin
TLS_FLAVOR=letsencrypt
I do wonder if it might have to do with the /etc/postfix/master.cf file
Looking at the code it seems the smtp on port 10025 is being used.
Can someone determine if that is perhaps blocking it ? Regular smtp works fine
> bash-5.1# cat /etc/postfix/master.cf
# service type private unpriv chroot wakeup maxproc command + args
# (yes) (yes) (yes) (never) (100)
# Exposed SMTP service
smtp inet n - n - - smtpd
# Internal SMTP service
10025 inet n - n - - smtpd
-o smtpd_sasl_auth_enable=yes
-o smtpd_discard_ehlo_keywords=pipelining
-o smtpd_client_restrictions=$check_ratelimit,reject_unlisted_sender,reject_authenticated_sender_login_mismatch,permit
-o smtpd_reject_unlisted_recipient=no
-o cleanup_service_name=outclean
outclean unix n - n - 0 cleanup
-o header_checks=pcre:/etc/postfix/outclean_header_filter.cf
-o nested_header_checks=
Some more detailed log from smtpd on port 10025 from the moment I try to send a public announcement you can find in the file here:
[detailed log.txt](https://github.com/Mailu/Mailu/files/8103785/detailed.log.txt)
> Does the postmaster account exist? The local part comes from POSTMASTER in mailu.env. The domain part is from DOMAIN in mailu.env.
Hi Dimitri,
Could you please have another look at this ? I provided a lot more detailed information ...
In a similar fashion, today I've ran into the much dreaded 'auth failed' bug from more than a year ago again.
It magically works again after a while, just like the announcements do for us, but this cannot be a permanent state.
As usual the logs show nothing useful:
```
front_1 | 2022/03/05 15:29:33 [error] 13#13: *590 recv() failed (111: Connection refused) while reading response from upstream, client: 1.2.3.4, server: 0.0.0.0:465, login: "[email protected]", upstream: 192.168.203.8:10025
front_1 | 2022/03/05 15:29:33 [crit] 13#13: *590 SSL_shutdown() failed (SSL: error:14094123:SSL routines:ssl3_read_bytes:application data after close notify) while reading response from upstream, client: 1.2.3.4, server: 0.0.0.0:465, login: "[email protected]", upstream: 192.168.203.8:10025
```
I'm posting in this thread because whatever causes this random behaviour must be linked. Last time the update to 1.8 fixed it. I hate seeing this yet again..
We need reliability, and as much as I like the simplicity of mailu compared to the rest, not being able to debug these issues in a timely fashion eventually will force my hand to move to something else. Please prioritize at least critical issues like 'random' behaviour.
For the record, this is v1.8. I will try updating and hope it fixes it again like last time..
Edit: It did just like last time. 'Last time' turns out was only half a year ago.
I confirm announcements don't work for me, too.
My setup is Mailu 1.9 / docker-compose / letsencrypt flavor / no overrides for now, and the issue is exactly the one described by @metlinux.
I don't know if it is related, I also get an error 502 when I use the admin web interface to create a new user, although in this case the user is effectively created, so it's not really a problem.
EDIT: it IS related, since the "welcome" message (set to true in mailu.env) never reaches the new user's box.
Issues not for bugs, enhancement requests or discussion go stale after 21 days of inactivity. This issue will be automatically closed after 14 days.
For all metrics refer to the [stale.yml file](https://github.com/Mailu/Mailu/blob/master/.github/stale.yml).
Github issues are not meant for user support. For **user-support questions**, reach out on the [matrix support channel](https://matrix.to/#/#mailu:tedomum.net).
Mark the issue as fresh by simply adding a comment to the issue.
If this issue is safe to close, please do so now.
The message is rejected by rspamd with `forced: soft reject "Try again later"; score=nan (set by greylist)`
Maybe the private IP of the admin conatiner needs to be skipped in rspamd:
```
(default: F (soft reject): [11.40/15.00] [HFILTER_HELO_BADIP(4.50){192.168.203.6;1;},
VIOLATED_DIRECT_SPF(3.50){},MISSING_MID(2.50){},MISSING_DATE(1.00){},MIME_GOOD(-0.10){text/plain;},
ARC_NA(0.00){},ARC_SIGNED(0.00){xx.yy:s=dkim:i=1;},DMARC_NA(0.00){aa.bb;},FROM_EQ_ENVFROM(0.00){},
FROM_NO_DN(0.00){},GREYLIST(0.00){greylisted;Mon, 04 Apr 2022 09:24:05 GMT;new record;},
MIME_TRACE(0.00){0:+;},RCPT_COUNT_ONE(0.00){1;},RCVD_COUNT_ZERO(0.00){0;},R_DKIM_NA(0.00){},
R_SPF_SOFTFAIL(0.00){~all:c;},TO_DN_NONE(0.00){},TO_MATCH_ENVRCPT_ALL(0.00){}])
```
The second part of the problem is not handling smtplib exceptions (smtplib.SMTPDataError) in models.py (line 425):
```
smtp.sendmail(f_addr, [to_address], msg.as_string())
=> smtplib.SMTPDataError: (451, b'4.7.1 Try again later')
```
What is odd, is that the SUBNET (192.168.203.0/24) setting (from mailu.env) should have been configured as ``local_networks = [192.168.203.0/24];`` in options.inc of rspamd.
This results in it being added as local_addrs in the rspamd config.
[This setting](https://rspamd.com/doc/configuration/options.html) configures; ``map or list of IP networks used as local, so certain checks are skipped for them (e.g. SPF checks)``.
Yet in the log excerpt we see that SPF check does take place. Rspamd ignores the setting? Or perhaps the subnet is incorrectly configured? SUBNET in mailu.env should match the subnet configured in docker-compose.yml.
If SUBNET in mailu.env matches the subnet in the docker-compose.yml file, then it should not be rejected. No SPF test takes place.
The settings of rspamd can be checked by first going into a shell and then running rspamadm configdump:
```
docker-compose exec antispam bash
rspamadm configdump | less
/options
```
In the options you should see the local_addr section with the whitelisted ips
```
options {
check_all_filters = true;
...
local_addrs [
"192.168.0.0/16",
"10.0.0.0/8",
"172.16.0.0/12",
"fd00::/8",
"169.254.0.0/16",
"fe80::/10",
"127.2.4.7",
]
```
"192.168.0.0/16", is my SUBNET setting.
As a test,
1. I added google.com as domain in the Admin WebUI.
2. Added an administrator user for the domain google.com.
3. Changed DOMAIN in mailu.env to google.com. (DOMAIN is used as sender for sending public announcements).
4. Send a public announcement. If SPF is checked, then rspamd will not allow this.
5. Result, email is send.
```
rspamd_task_write_log: id: <undef>, qid: <9CFC9C0414>, ip: 192.168.203.7, from: <[email protected]>, (default: F (no action): [3.40/15.00] [MISSING_MID(2.50){},MISSING_DATE(1.00){},MIME_GOOD(-0.10){text/plain;},ARC_NA(0.00){},FROM_EQ_ENVFROM(0.00){},FROM_NO_DN(0.00){},MIME_TRACE(0.00){0:+;},RCPT_COUNT_ONE(0.00){1;},RCVD_COUNT_ZERO(0.00){0;},TO_DN_NONE(0.00){},TO_MATCH_ENVRCPT_ALL(0.00){}]), len: 253, time: 63.330ms, dns req: 1, digest: <22bfa85cfd7bdb692033454ae2b44ddb>, rcpts: <[email protected]>, mime_rcpts: <[email protected]>
proxy; rspamd_protocol_http_reply: regexp statistics: 41 pcre regexps scanned, 0 regexps matched, 172 regexps total, 11 regexps cached, 1.03KiB scanned using pcre, 1.03KiB scanned total
proxy; proxy_milter_finish_handler: finished milter connection
proxy; proxy_accept_socket: accepted milter connection from 192.168.203.9 port 54560
milter; rspamd_milter_process_command: got connection from 192.168.203.7:33374
proxy; rspamd_message_parse: loaded message; id: <undef>; queue-id: <D17B5C0451>; size: 253; checksum: <22bfa85cfd7bdb692033454ae2b44ddb>
proxy; rspamd_mime_part_detect_language: detected part language: vi
lua; greylist.lua:217: skip greylisting for local networks and/or authorized users
proxy; dkim_symbol_callback: skip DKIM checks for local networks and authorized users
lua; spf.lua:186: skip SPF checks for local networks and authorized users
lua; dmarc.lua:349: skip DMARC checks as either SPF or DKIM were not checked
lua; once_received.lua:99: Skipping once_received for authenticated user or local network
proxy; rspamd_redis_connected: skip obtaining bayes tokens for BAYES_HAM of classifier bayes: not enough learns 0; 200 required
proxy; rspamd_redis_connected: skip obtaining bayes tokens for BAYES_SPAM of classifier bayes: not enough learns 33; 200 required
proxy; rspamd_stat_classifiers_process: skip statistics as SPAM class is missing
lua; greylist.lua:331: Score too low - skip greylisting
```
Note the:
```
lua; spf.lua:186: skip SPF checks for local networks and authorized users
lua; dmarc.lua:349: skip DMARC checks as either SPF or DKIM were not checked
lua; once_received.lua:99: Skipping once_received for authenticated user or local network
```
IMHO we should do LMTP delivery to the inboxes here, not fuss around with the filters. | 2022-11-19T17:46:58 |
|
Mailu/Mailu | 2,552 | Mailu__Mailu-2552 | [
"2231"
] | bf421092bdc5ec32d47f8bcb21119ae4f605fab0 | diff --git a/core/admin/mailu/models.py b/core/admin/mailu/models.py
--- a/core/admin/mailu/models.py
+++ b/core/admin/mailu/models.py
@@ -2,7 +2,6 @@
"""
import os
-import smtplib
import json
from datetime import date
@@ -412,14 +411,19 @@ def __declare_last__(cls):
def sendmail(self, subject, body):
""" send an email to the address """
- f_addr = f'{app.config["POSTMASTER"]}@{idna.encode(app.config["DOMAIN"]).decode("ascii")}'
- with smtplib.SMTP(app.config['HOST_AUTHSMTP'], port=10025) as smtp:
- to_address = f'{self.localpart}@{idna.encode(self.domain_name).decode("ascii")}'
- msg = text.MIMEText(body)
- msg['Subject'] = subject
- msg['From'] = f_addr
- msg['To'] = to_address
- smtp.sendmail(f_addr, [to_address], msg.as_string())
+ try:
+ f_addr = f'{app.config["POSTMASTER"]}@{idna.encode(app.config["DOMAIN"]).decode("ascii")}'
+ ip, port = app.config['HOST_LMTP'].rsplit(':')
+ with smtplib.LMTP(ip, port=port) as lmtp:
+ to_address = f'{self.localpart}@{idna.encode(self.domain_name).decode("ascii")}'
+ msg = text.MIMEText(body)
+ msg['Subject'] = subject
+ msg['From'] = f_addr
+ msg['To'] = to_address
+ lmtp.sendmail(f_addr, [to_address], msg.as_string())
+ return True
+ except smtplib.SMTPException:
+ return False
@classmethod
def resolve_domain(cls, email):
diff --git a/core/admin/mailu/ui/views/base.py b/core/admin/mailu/ui/views/base.py
--- a/core/admin/mailu/ui/views/base.py
+++ b/core/admin/mailu/ui/views/base.py
@@ -21,8 +21,9 @@ def announcement():
form = forms.AnnouncementForm()
if form.validate_on_submit():
for user in models.User.query.all():
- user.sendmail(form.announcement_subject.data,
- form.announcement_body.data)
+ if not user.sendmail(form.announcement_subject.data,
+ form.announcement_body.data):
+ flask.flash('Failed to send to %s' % user.email, 'error')
# Force-empty the form
form.announcement_subject.data = ''
form.announcement_body.data = ''
| Unable to send an Public Announcement
## Environment & Versions
### Environment
- docker-compose
### Versions
- version 1.9
## Description
My mailu mail server is running as I expected it. Everything is working normally besides one admin function of it.
I am not able to use the Public Announcement. Doing so results in an error 502
I am using the mailu.env and docker-compose I created online for this.
## Replication Steps
I am logged in as administrator (my account has the role of global administrator as well as manager of the domain)
I choose option announcement under Menu Administration, fill in the Announcement- subject and body and select click on send button. After 30 secs I get an "502 Bad Gateway" message from nginx and no mail is being send
I have NO overrides options defined.
## Expected behaviour
I expected a mail to be send to all users of my domain with the context of the subject and body I entered
Instead I get an nginx error 502 page.
## Logs
I have exchanged my client IP adress with aaa.bbb.ccc.ddd and adjusted the domain to example.com in the logs underneath for privacy reasons. Rest is exactly as it is stated in the logs.
mailu-admin-1:
[2022-02-13 09:53:03 +0100] [9] [CRITICAL] WORKER TIMEOUT (pid:10)
[2022-02-13 09:53:04 +0100] [9] [WARNING] Worker with pid 10 was terminated due to signal 9
[2022-02-13 09:53:04 +0100] [29] [INFO] Booting worker with pid: 29
mailu-front-1:
2022/02/13 09:53:04 [error] 8#8: *3 upstream prematurely closed connection while reading response header from upstream, client: aaa.bbb.ccc.ddd, server: , request: "POST /admin/announcement HTTP/2.0", upstream: "http://192.168.203.6:80/admin/announcement", host: "mail.example.com", referrer: "https://mail.example.com/admin/announcement"
mailu-smtp-1:
2022-02-13T09:52:33.536923+01:00 b27dd03ea1f5 postfix/smtpd[371]: connect from mailu-admin-1.mailu_default[192.168.203.6]
2022-02-13T09:52:33.540236+01:00 b27dd03ea1f5 postfix/smtpd[371]: discarding EHLO keywords: PIPELINING
2022-02-13T09:53:04.866990+01:00 b27dd03ea1f5 postfix/smtpd[371]: disconnect from mailu-admin-1.mailu_default[192.168.203.6] ehlo=1 mail=1 quit=1 commands=3
| This happened to me as well recently. I got timeouts too. (but on v1.8) If you have to get it done urgently: try again repeatedly! It sounds counterintuitive, but in this case trying the same thing over and over actually does lead to different outcomes. I don't know why, but I got mine sent.
> This happened to me as well recently. I got timeouts too. (but on v1.8) If you have to get it done urgently: try again repeatedly! It sounds counterintuitive, but in this case trying the same thing over and over actually does lead to different outcomes. I don't know why, but I got mine sent.
Thanks for your feedback. I have tried your advise but did not succeed getting any announcement out to "my users".
I have even tried a newly installed VPS server without any extra firewall rules or other adjustments, but this seems the only part that is not working of mailu for me.
Could it be generic and no one else is using this ? Because I do not read others having this issue (only some old resolved similar issues)
I do hope it can be fixed, because I would like to start using mailu in production
I have pinpointed the start of the problem
It is this line 24 in /app/mailu/ui/views/base.py in mailu-front-1 when it goes wrong
` user.sendmail(form.announcement_subject.data, form.announcement_body.data)`
If I comment it I do not get the 502 message and the expected flask success message, but of course also not the email ;-)
There seems to be something wrong with the function I think although I am no python/flask expert
I hope someone is willing to check/verify and maybe come with a solution.
I would also appreciate to hear if others are able to use this option successfull or is it just me ?
On a fresh 1.9 installation I cannot replicate this. Without reliable replicating steps it will be difficult to look into this.
Could you try again with a small test message? Possibly the issue is tied to the contents of the message.
This is how the SMTP logs look like for a successful attempt. For every user on my test server an email is send
```
smtp_1 | 2022-02-17T15:29:14.439769+00:00 967bafac904d postfix/smtpd[1613]: connect from mailu_admin_1.mailu_default[192.168.203.6]
smtp_1 | 2022-02-17T15:29:14.442145+00:00 967bafac904d postfix/smtpd[1613]: discarding EHLO keywords: PIPELINING
smtp_1 | 2022-02-17T15:29:14.535834+00:00 967bafac904d postfix/smtpd[1613]: 82CA418005E: client=mailu_admin_1.mailu_default[192.168.203.6]
smtp_1 | 2022-02-17T15:29:14.560068+00:00 967bafac904d postfix/cleanup[1616]: 82CA418005E: message-id=<>
smtp_1 | 2022-02-17T15:29:14.589978+00:00 967bafac904d postfix/qmgr[372]: 82CA418005E: from=<[email protected]>, size=314, nrcpt=1 (queue active)
smtp_1 | 2022-02-17T15:29:14.590750+00:00 967bafac904d postfix/smtpd[1613]: disconnect from mailu_admin_1.mailu_default[192.168.203.6] ehlo=1 mail=1 rcpt=1 data=1 quit=1 commands=5
smtp_1 | 2022-02-17T15:29:14.592501+00:00 967bafac904d postfix/smtpd[1613]: connect from mailu_admin_1.mailu_default[192.168.203.6]
smtp_1 | 2022-02-17T15:29:14.595564+00:00 967bafac904d postfix/smtpd[1613]: discarding EHLO keywords: PIPELINING
smtp_1 | 2022-02-17T15:29:14.709425+00:00 967bafac904d postfix/smtpd[1613]: AD2BC18005F: client=mailu_admin_1.mailu_default[192.168.203.6]
smtp_1 | 2022-02-17T15:29:14.748396+00:00 967bafac904d postfix/cleanup[1616]: AD2BC18005F: message-id=<>
smtp_1 | 2022-02-17T15:29:14.762164+00:00 967bafac904d postfix/qmgr[372]: AD2BC18005F: from=<[email protected]>, size=313, nrcpt=1 (queue active)
smtp_1 | 2022-02-17T15:29:14.764959+00:00 967bafac904d postfix/smtpd[1613]: disconnect from mailu_admin_1.mailu_default[192.168.203.6] ehlo=1 mail=1 rcpt=1 data=1 quit=1 commands=5
smtp_1 | 2022-02-17T15:29:14.772441+00:00 967bafac904d postfix/smtpd[1613]: connect from mailu_admin_1.mailu_default[192.168.203.6]
smtp_1 | 2022-02-17T15:29:14.775390+00:00 967bafac904d postfix/smtpd[1613]: discarding EHLO keywords: PIPELINING
smtp_1 | 2022-02-17T15:29:14.789028+00:00 967bafac904d postfix/lmtp[1617]: 82CA418005E: to=<[email protected]>, relay=192.168.203.9[192.168.203.9]:2525, delay=0.31, delays=0.11/0.03/0.05/0.12, dsn=2.0.0, status=sent (250 2.0.0 <[email protected]> eLjxJ8ppDmJtBQAAMm7Psw Saved)
smtp_1 | 2022-02-17T15:29:14.803519+00:00 967bafac904d postfix/qmgr[372]: 82CA418005E: removed
smtp_1 | 2022-02-17T15:29:14.947601+00:00 967bafac904d postfix/smtpd[1613]: E705418005E: client=mailu_admin_1.mailu_default[192.168.203.6]
smtp_1 | 2022-02-17T15:29:14.965108+00:00 967bafac904d postfix/lmtp[1617]: AD2BC18005F: to=<[email protected]>, relay=192.168.203.9[192.168.203.9]:2525, delay=0.37, delays=0.17/0.04/0.01/0.15, dsn=2.0.0, status=sent (250 2.0.0 <[email protected]> YAx8MMppDmJtBQAAMm7Psw Saved)
smtp_1 | 2022-02-17T15:29:14.965600+00:00 967bafac904d postfix/qmgr[372]: AD2BC18005F: removed
smtp_1 | 2022-02-17T15:29:14.988799+00:00 967bafac904d postfix/cleanup[1616]: E705418005E: message-id=<>
smtp_1 | 2022-02-17T15:29:15.010396+00:00 967bafac904d postfix/qmgr[372]: E705418005E: from=<[email protected]>, size=314, nrcpt=1 (queue active)
smtp_1 | 2022-02-17T15:29:15.010783+00:00 967bafac904d postfix/smtpd[1613]: disconnect from mailu_admin_1.mailu_default[192.168.203.6] ehlo=1 mail=1 rcpt=1 data=1 quit=1 commands=5
smtp_1 | 2022-02-17T15:29:15.159711+00:00 967bafac904d postfix/lmtp[1618]: E705418005E: to=<[email protected]>, relay=192.168.203.9[192.168.203.9]:2525, delay=0.38, delays=0.23/0.05/0.01/0.08, dsn=2.0.0, status=sent (250 2.0.0 <[email protected]> ZXOoBMtpDmJxBQAAMm7Psw Saved)
smtp_1 | 2022-02-17T15:29:15.160152+00:00 967bafac904d postfix/qmgr[372]: E705418005E: removed
```
This is the relevant code that is used for sending the emails to every user:
https://github.com/Mailu/Mailu/blob/b73963aae5177770c64c8e04be7c11b00317f0a5/core/admin/mailu/models.py#L413
```
def sendmail(self, subject, body):
""" send an email to the address """
f_addr = f'{app.config["POSTMASTER"]}@{idna.encode(app.config["DOMAIN"]).decode("ascii")}'
with smtplib.SMTP(app.config['HOST_AUTHSMTP'], port=10025) as smtp:
to_address = f'{self.localpart}@{idna.encode(self.domain_name).decode("ascii")}'
msg = text.MIMEText(body)
msg['Subject'] = subject
msg['From'] = f_addr
msg['To'] = to_address
smtp.sendmail(f_addr, [to_address], msg.as_string())
```
The form code https://github.com/Mailu/Mailu/blob/b73963aae5177770c64c8e04be7c11b00317f0a5/core/admin/mailu/ui/views/base.py#L18
```
@ui.route('/announcement', methods=['GET', 'POST'])
@access.global_admin
def announcement():
form = forms.AnnouncementForm()
if form.validate_on_submit():
for user in models.User.query.all():
user.sendmail(form.announcement_subject.data,
form.announcement_body.data)
# Force-empty the form
form.announcement_subject.data = ''
form.announcement_body.data = ''
flask.flash('Your announcement was sent', 'success')
return flask.render_template('announcement.html', form=form)
```
In your log we can see that a connection is made to smtp. But for some reason the email is not send. There appears to be some kind of timeout after 30 seconds. Sending the email should take less than a second.
> But for some reason the email is not send. There appears to be some kind of timeout after 30 seconds.
100% exact situation as with my post. It all looked just right, but some random timeout appeared. But I'm telling you - I knew that it worked, as I used the feature before, so I kept retrying every once in a while. And it worked. That might help debug the issue. I also researched bugs in the upstream projects and found some vaguely related stuff, but from many years ago.
> Sending the email should take less than a second.
Yeah, when it works it works in a second and makes you wonder what the fuss is about :)
As requested I tried again, but same result. My subject and body only consisted out of 3 characters so real small
`2022-02-20T10:59:17.712468+01:00 b3d7b3d95e0e postfix/smtpd[13439]: connect from mailu-admin-1.mailu_default[192.168.203.6]
2022-02-20T10:59:17.715164+01:00 b3d7b3d95e0e postfix/smtpd[13439]: discarding EHLO keywords: PIPELINING
INFO:root:Connect
DEBUG:root:Received bytearray(b'transport *')
DEBUG:root:Request transport/*
_DEBUG:root:Table get *_
DEBUG:root:Replying b'NOTFOUND '
DEBUG:root:Received bytearray(b'transport *')
DEBUG:root:Request transport/*
DEBUG:root:Table get *
DEBUG:root:Replying b'NOTFOUND '
DEBUG:root:Received bytearray(b'domain example.com')
DEBUG:root:Request domain/example.com
DEBUG:root:Table get example.com
DEBUG:root:Table get example.com is example.com
DEBUG:root:Replying b'OK example.com'
DEBUG:root:Received bytearray(b'transport example.com')
DEBUG:root:Request transport/example.com
DEBUG:root:Table get example.com
DEBUG:root:Replying b'NOTFOUND '
DEBUG:root:Received bytearray(b'transport [email protected]')
DEBUG:root:Request transport/[email protected]
DEBUG:root:Table get [email protected]
DEBUG:root:Replying b'NOTFOUND '
DEBUG:root:Received bytearray(b'transport *')
DEBUG:root:Request transport/*
DEBUG:root:Table get *
DEBUG:root:Replying b'NOTFOUND '
2022-02-20T10:59:49.216014+01:00 b3d7b3d95e0e postfix/smtpd[13439]: disconnect from mailu-admin-1.mailu_default[192.168.203.6] ehlo=1 mail=1 quit=1 commands=3`
The timeout (and so the wait after pressing send) occurs after the 5 line of this log (so after DEBUG:root:Table get *)
When that timeout has passed the 502 error is displayed (and the other lines logged)
I'm just a user myself FYI. And by trying again, I meant hammering it. I had my text on copy paste and tried at least two dozen times at varying hours. I have no clue how to debug this, I just said that in case someone needs to get something out urgently. I also checked the GH issues first when it happened to me and was lost, because the mailu team is overwhelmed already and no quick fix was in sight. I tried taking the containers down and up, taking other applications down temporarily to see if it was somehow exploding memory, but that made no difference. At random times it works fine.
At least mail itself worked continuously. Before the v1.8 I had a nasty bug that made me almost quit mailu (a minor update completely broke everything, and there was no way to go back one release) and it being understaffed meant all panic and no solution.
So if anyone finds this via search: hammer it. Or get all the mails via SQL from the `./data/main.db` and send normally via Thunderbird instead. Although I don't know how many mails at once TB/the stack can effectively handle, TB is a mess to begin with and wasn't meant for mass-mailing. If you do this a lot and have hundreds of users you might just want to make a little py script for now that sends them one after another.
Does the postmaster account exist?
The local part comes from POSTMASTER in mailu.env.
The domain part is from DOMAIN in mailu.env.
> Does the postmaster account exist? The local part comes from POSTMASTER in mailu.env. The domain part is from DOMAIN in mailu.env.
Yes it does. it is set to admin in mailu.env and that is also the account I log in with for sending this. I have even tried defining the alias of postmaster for this account, but also that does not resolve it unfortunately.
I did find out though that sending a welcome mail (setting it to through in mailu.env) also fails. And yet sending and receiving mail as a normal users works....
Like this:
SUBNET=192.168.203.0/24
DOMAIN=example.com
HOSTNAMES=mail.example.com
POSTMASTER=admin
TLS_FLAVOR=letsencrypt
I do wonder if it might have to do with the /etc/postfix/master.cf file
Looking at the code it seems the smtp on port 10025 is being used.
Can someone determine if that is perhaps blocking it ? Regular smtp works fine
> bash-5.1# cat /etc/postfix/master.cf
# service type private unpriv chroot wakeup maxproc command + args
# (yes) (yes) (yes) (never) (100)
# Exposed SMTP service
smtp inet n - n - - smtpd
# Internal SMTP service
10025 inet n - n - - smtpd
-o smtpd_sasl_auth_enable=yes
-o smtpd_discard_ehlo_keywords=pipelining
-o smtpd_client_restrictions=$check_ratelimit,reject_unlisted_sender,reject_authenticated_sender_login_mismatch,permit
-o smtpd_reject_unlisted_recipient=no
-o cleanup_service_name=outclean
outclean unix n - n - 0 cleanup
-o header_checks=pcre:/etc/postfix/outclean_header_filter.cf
-o nested_header_checks=
Some more detailed log from smtpd on port 10025 from the moment I try to send a public announcement you can find in the file here:
[detailed log.txt](https://github.com/Mailu/Mailu/files/8103785/detailed.log.txt)
> Does the postmaster account exist? The local part comes from POSTMASTER in mailu.env. The domain part is from DOMAIN in mailu.env.
Hi Dimitri,
Could you please have another look at this ? I provided a lot more detailed information ...
In a similar fashion, today I've ran into the much dreaded 'auth failed' bug from more than a year ago again.
It magically works again after a while, just like the announcements do for us, but this cannot be a permanent state.
As usual the logs show nothing useful:
```
front_1 | 2022/03/05 15:29:33 [error] 13#13: *590 recv() failed (111: Connection refused) while reading response from upstream, client: 1.2.3.4, server: 0.0.0.0:465, login: "[email protected]", upstream: 192.168.203.8:10025
front_1 | 2022/03/05 15:29:33 [crit] 13#13: *590 SSL_shutdown() failed (SSL: error:14094123:SSL routines:ssl3_read_bytes:application data after close notify) while reading response from upstream, client: 1.2.3.4, server: 0.0.0.0:465, login: "[email protected]", upstream: 192.168.203.8:10025
```
I'm posting in this thread because whatever causes this random behaviour must be linked. Last time the update to 1.8 fixed it. I hate seeing this yet again..
We need reliability, and as much as I like the simplicity of mailu compared to the rest, not being able to debug these issues in a timely fashion eventually will force my hand to move to something else. Please prioritize at least critical issues like 'random' behaviour.
For the record, this is v1.8. I will try updating and hope it fixes it again like last time..
Edit: It did just like last time. 'Last time' turns out was only half a year ago.
I confirm announcements don't work for me, too.
My setup is Mailu 1.9 / docker-compose / letsencrypt flavor / no overrides for now, and the issue is exactly the one described by @metlinux.
I don't know if it is related, I also get an error 502 when I use the admin web interface to create a new user, although in this case the user is effectively created, so it's not really a problem.
EDIT: it IS related, since the "welcome" message (set to true in mailu.env) never reaches the new user's box.
Issues not for bugs, enhancement requests or discussion go stale after 21 days of inactivity. This issue will be automatically closed after 14 days.
For all metrics refer to the [stale.yml file](https://github.com/Mailu/Mailu/blob/master/.github/stale.yml).
Github issues are not meant for user support. For **user-support questions**, reach out on the [matrix support channel](https://matrix.to/#/#mailu:tedomum.net).
Mark the issue as fresh by simply adding a comment to the issue.
If this issue is safe to close, please do so now.
The message is rejected by rspamd with `forced: soft reject "Try again later"; score=nan (set by greylist)`
Maybe the private IP of the admin conatiner needs to be skipped in rspamd:
```
(default: F (soft reject): [11.40/15.00] [HFILTER_HELO_BADIP(4.50){192.168.203.6;1;},
VIOLATED_DIRECT_SPF(3.50){},MISSING_MID(2.50){},MISSING_DATE(1.00){},MIME_GOOD(-0.10){text/plain;},
ARC_NA(0.00){},ARC_SIGNED(0.00){xx.yy:s=dkim:i=1;},DMARC_NA(0.00){aa.bb;},FROM_EQ_ENVFROM(0.00){},
FROM_NO_DN(0.00){},GREYLIST(0.00){greylisted;Mon, 04 Apr 2022 09:24:05 GMT;new record;},
MIME_TRACE(0.00){0:+;},RCPT_COUNT_ONE(0.00){1;},RCVD_COUNT_ZERO(0.00){0;},R_DKIM_NA(0.00){},
R_SPF_SOFTFAIL(0.00){~all:c;},TO_DN_NONE(0.00){},TO_MATCH_ENVRCPT_ALL(0.00){}])
```
The second part of the problem is not handling smtplib exceptions (smtplib.SMTPDataError) in models.py (line 425):
```
smtp.sendmail(f_addr, [to_address], msg.as_string())
=> smtplib.SMTPDataError: (451, b'4.7.1 Try again later')
```
What is odd, is that the SUBNET (192.168.203.0/24) setting (from mailu.env) should have been configured as ``local_networks = [192.168.203.0/24];`` in options.inc of rspamd.
This results in it being added as local_addrs in the rspamd config.
[This setting](https://rspamd.com/doc/configuration/options.html) configures; ``map or list of IP networks used as local, so certain checks are skipped for them (e.g. SPF checks)``.
Yet in the log excerpt we see that SPF check does take place. Rspamd ignores the setting? Or perhaps the subnet is incorrectly configured? SUBNET in mailu.env should match the subnet configured in docker-compose.yml.
If SUBNET in mailu.env matches the subnet in the docker-compose.yml file, then it should not be rejected. No SPF test takes place.
The settings of rspamd can be checked by first going into a shell and then running rspamadm configdump:
```
docker-compose exec antispam bash
rspamadm configdump | less
/options
```
In the options you should see the local_addr section with the whitelisted ips
```
options {
check_all_filters = true;
...
local_addrs [
"192.168.0.0/16",
"10.0.0.0/8",
"172.16.0.0/12",
"fd00::/8",
"169.254.0.0/16",
"fe80::/10",
"127.2.4.7",
]
```
"192.168.0.0/16", is my SUBNET setting.
As a test,
1. I added google.com as domain in the Admin WebUI.
2. Added an administrator user for the domain google.com.
3. Changed DOMAIN in mailu.env to google.com. (DOMAIN is used as sender for sending public announcements).
4. Send a public announcement. If SPF is checked, then rspamd will not allow this.
5. Result, email is send.
```
rspamd_task_write_log: id: <undef>, qid: <9CFC9C0414>, ip: 192.168.203.7, from: <[email protected]>, (default: F (no action): [3.40/15.00] [MISSING_MID(2.50){},MISSING_DATE(1.00){},MIME_GOOD(-0.10){text/plain;},ARC_NA(0.00){},FROM_EQ_ENVFROM(0.00){},FROM_NO_DN(0.00){},MIME_TRACE(0.00){0:+;},RCPT_COUNT_ONE(0.00){1;},RCVD_COUNT_ZERO(0.00){0;},TO_DN_NONE(0.00){},TO_MATCH_ENVRCPT_ALL(0.00){}]), len: 253, time: 63.330ms, dns req: 1, digest: <22bfa85cfd7bdb692033454ae2b44ddb>, rcpts: <[email protected]>, mime_rcpts: <[email protected]>
proxy; rspamd_protocol_http_reply: regexp statistics: 41 pcre regexps scanned, 0 regexps matched, 172 regexps total, 11 regexps cached, 1.03KiB scanned using pcre, 1.03KiB scanned total
proxy; proxy_milter_finish_handler: finished milter connection
proxy; proxy_accept_socket: accepted milter connection from 192.168.203.9 port 54560
milter; rspamd_milter_process_command: got connection from 192.168.203.7:33374
proxy; rspamd_message_parse: loaded message; id: <undef>; queue-id: <D17B5C0451>; size: 253; checksum: <22bfa85cfd7bdb692033454ae2b44ddb>
proxy; rspamd_mime_part_detect_language: detected part language: vi
lua; greylist.lua:217: skip greylisting for local networks and/or authorized users
proxy; dkim_symbol_callback: skip DKIM checks for local networks and authorized users
lua; spf.lua:186: skip SPF checks for local networks and authorized users
lua; dmarc.lua:349: skip DMARC checks as either SPF or DKIM were not checked
lua; once_received.lua:99: Skipping once_received for authenticated user or local network
proxy; rspamd_redis_connected: skip obtaining bayes tokens for BAYES_HAM of classifier bayes: not enough learns 0; 200 required
proxy; rspamd_redis_connected: skip obtaining bayes tokens for BAYES_SPAM of classifier bayes: not enough learns 33; 200 required
proxy; rspamd_stat_classifiers_process: skip statistics as SPAM class is missing
lua; greylist.lua:331: Score too low - skip greylisting
```
Note the:
```
lua; spf.lua:186: skip SPF checks for local networks and authorized users
lua; dmarc.lua:349: skip DMARC checks as either SPF or DKIM were not checked
lua; once_received.lua:99: Skipping once_received for authenticated user or local network
```
IMHO we should do LMTP delivery to the inboxes here, not fuss around with the filters. | 2022-11-23T09:41:44 |
|
Mailu/Mailu | 2,563 | Mailu__Mailu-2563 | [
"1483",
"1483"
] | b49d9ce2437680828478311de5fc2e3a7b838577 | diff --git a/core/postfix/start.py b/core/postfix/start.py
--- a/core/postfix/start.py
+++ b/core/postfix/start.py
@@ -14,6 +14,8 @@
log.basicConfig(stream=sys.stderr, level=os.environ.get("LOG_LEVEL", "WARNING"))
+os.system("flock -n /queue/pid/master.pid rm /queue/pid/master.pid")
+
def start_podop():
os.setuid(getpwnam('postfix').pw_uid)
os.makedirs('/dev/shm/postfix',mode=0o700, exist_ok=True)
| postfix throttling afer a hard shutdown
After a hard shutdown, I noticed that postfix would not restart because of master.pid is found in /queue/pid/master.pid
We should check and remove this file during container start up (start.py)
postfix throttling afer a hard shutdown
After a hard shutdown, I noticed that postfix would not restart because of master.pid is found in /queue/pid/master.pid
We should check and remove this file during container start up (start.py)
| It is quiet hard to reproduce though !
Most likely an issue related to my nfs share set-up.
Closing now.
I reopen this due to https://github.com/Mailu/helm-charts/issues/54. It seems to affect others as well.
Hi There,
The `Mailu`-Project is currently in a bit of a bind! We are short on man-power, and we need to judge if it is possible for us to put in some work on this issue.
To help with that, we are currently trying to find out which issues are actively keeping users from using `Mailu`, which issues have someone who want to work on them β and which issues may be less important. These a less important ones could be discarded for the time being, until the project is in a more stable and regular state once again.
In order for us to better assess this, it would be helpful if you could put a **reaction on this post** (use the :smiley: icon to the top-right).
- ποΈ if you **need this** to be able to use Mailu. Ideally, youβd also be able to test this on your installation, and provide feedback β¦
- π if you find it a **nice bonus**, but no deal-breaker
- π if you want to **work on it yourself**!
We want to keep this **voting open for 2 weeks** from now, so please help out!
It is quiet hard to reproduce though !
Most likely an issue related to my nfs share set-up.
Closing now.
I reopen this due to https://github.com/Mailu/helm-charts/issues/54. It seems to affect others as well.
Hi There,
The `Mailu`-Project is currently in a bit of a bind! We are short on man-power, and we need to judge if it is possible for us to put in some work on this issue.
To help with that, we are currently trying to find out which issues are actively keeping users from using `Mailu`, which issues have someone who want to work on them β and which issues may be less important. These a less important ones could be discarded for the time being, until the project is in a more stable and regular state once again.
In order for us to better assess this, it would be helpful if you could put a **reaction on this post** (use the :smiley: icon to the top-right).
- ποΈ if you **need this** to be able to use Mailu. Ideally, youβd also be able to test this on your installation, and provide feedback β¦
- π if you find it a **nice bonus**, but no deal-breaker
- π if you want to **work on it yourself**!
We want to keep this **voting open for 2 weeks** from now, so please help out! | 2022-11-27T08:57:46 |
|
Mailu/Mailu | 2,566 | Mailu__Mailu-2566 | [
"1820"
] | 03ff2f21324e609d3ab433410f73e89972951541 | diff --git a/core/admin/mailu/manage.py b/core/admin/mailu/manage.py
--- a/core/admin/mailu/manage.py
+++ b/core/admin/mailu/manage.py
@@ -304,6 +304,7 @@ def config_update(verbose=False, delete_objects=False):
if verbose:
print(f'Deleting domain: {domain.name}')
db.session.delete(domain)
+
db.session.commit()
@@ -351,7 +352,7 @@ def config_import(verbose=0, secrets=False, debug=False, quiet=False, color=Fals
raise click.ClickException(msg) from exc
raise
- # don't commit when running dry
+ # do not commit when running dry
if dry_run:
log.changes('Dry run. Not committing changes.')
db.session.rollback()
@@ -403,13 +404,16 @@ def config_export(full=False, secrets=False, color=False, dns=False, output=None
@mailu.command()
@click.argument('email')
[email protected]('-r', '--really', is_flag=True)
@with_appcontext
-def user_delete(email):
- """delete user"""
- user = models.User.query.get(email)
- if user:
- db.session.delete(user)
- db.session.commit()
+def user_delete(email, really=False):
+ """disable or delete user"""
+ if user := models.User.query.get(email):
+ if really:
+ db.session.delete(user)
+ else:
+ user.enabled = False
+ db.session.commit()
@mailu.command()
@@ -417,10 +421,9 @@ def user_delete(email):
@with_appcontext
def alias_delete(email):
"""delete alias"""
- alias = models.Alias.query.get(email)
- if alias:
+ if alias := models.Alias.query.get(email):
db.session.delete(alias)
- db.session.commit()
+ db.session.commit()
@mailu.command()
diff --git a/core/admin/mailu/ui/views/users.py b/core/admin/mailu/ui/views/users.py
--- a/core/admin/mailu/ui/views/users.py
+++ b/core/admin/mailu/ui/views/users.py
@@ -80,19 +80,6 @@ def user_edit(user_email):
domain=user.domain, max_quota_bytes=max_quota_bytes)
[email protected]('/user/delete/<path:user_email>', methods=['GET', 'POST'])
[email protected]_admin(models.User, 'user_email')
[email protected]_required("delete {user_email}")
-def user_delete(user_email):
- user = models.User.query.get(user_email) or flask.abort(404)
- domain = user.domain
- models.db.session.delete(user)
- models.db.session.commit()
- flask.flash('User %s deleted' % user)
- return flask.redirect(
- flask.url_for('.user_list', domain_name=domain.name))
-
-
@ui.route('/user/settings', methods=['GET', 'POST'], defaults={'user_email': None})
@ui.route('/user/usersettings/<path:user_email>', methods=['GET', 'POST'])
@access.owner(models.User, 'user_email')
| [Enhancement] List of previously used addresses/aliases to block accidental reuse and privacy implications
I did search for existing issues first. Not quite sure how this feature would be worded so I may have missed a discussion.
I propose a simple feature, definitely easily overrideable if need be and with a toggle to turn it off, that permanently keeps track of used addresses so you cannot accidentally reuse a previously used one and leak data.
Address reuse is a problem for obvious reasons:
- intermingling different identities
- therefore leaking private information
Storing the addresses can be done privacy friendly: simply store exclusively the address itself, no metadata.
On a side note: Deleting accounts does not delete the actual folders. I am doing this manually so far, but it'd be nice if *delete* actually meant *delete*.
As usual I have to praise how well Mailu works in general. I realized I've been using it for years now, and apart from some inconveniences like this or #1700 #1328 there hasn't been an issue, and it's more lightweight than the competitors. I wish you all the best!
| If I understand you correctly, deleting an user in the web administration GUI does not delete the mail folders of the user.
When you create a new user with the same user name (same email address), then this user sees the mail box of the deleted user. For me this sounds as a bug./
The enhancement request is clear for me. This should be configurable via the GUI or via mailu.env.
Yes, it does not delete the folders/data.
I did not check if you would see the content of the previous user, that'd be quite a grave bug indeed. I simply meant reusing addresses means that addresses could in the future receive data meant for the previous user. That's why pretty much every mailprovider blocks it.
I suggest:
- having an option to keep a list of all addresses that ever existed
- when creating a new inbox that matches, showing a warning, but having an option to override this as domain administrator if need be (deleting and recreating postmaster@ should be easy for instance)
Mailu still does not remove deleted user data, or block/warn of address reusage for the record. You have to keep your own list and delete the folder in `/mail`.
Edit: Here it leaves traces as well:
`mailu/webmail/_data_/_default_/storage/cfg` | 2022-11-27T13:16:31 |
|
Mailu/Mailu | 2,568 | Mailu__Mailu-2568 | [
"2451"
] | a366116cae2597bd6d91fe5e3826ccaf3d891bc1 | diff --git a/core/admin/mailu/internal/nginx.py b/core/admin/mailu/internal/nginx.py
--- a/core/admin/mailu/internal/nginx.py
+++ b/core/admin/mailu/internal/nginx.py
@@ -26,12 +26,14 @@
}),
}
+WEBMAIL_PORTS = ['10143', '10025']
+
def check_credentials(user, password, ip, protocol=None, auth_port=None):
- if not user or not user.enabled or (protocol == "imap" and not user.enable_imap) or (protocol == "pop3" and not user.enable_pop):
+ if not user or not user.enabled or (protocol == "imap" and not user.enable_imap and not auth_port in WEBMAIL_PORTS) or (protocol == "pop3" and not user.enable_pop):
return False
is_ok = False
# webmails
- if auth_port in ['10143', '10025'] and password.startswith('token-'):
+ if auth_port in WEBMAIL_PORTS and password.startswith('token-'):
if utils.verify_temp_token(user.get_id(), password):
is_ok = True
# All tokens are 32 characters hex lowercase
| not allowing POP3/IMAP leads to infinite loop in webmail
v1.9.32
I noticed a small bug. If both are disabled, webmail is stuck in an infinite loop. I guess nobody ever tried it before since both are checked by default.
Not very consequential, but I figured you might want to know. Not sure about the use case either. I unchecked them because there was no need for this particular account and found it that way.
Cheers
| Thanks. Logs show this (over and over again):
```
mailu-front-1 | * - - [14/Sep/2022:13:46:43 +0000] "GET /webmail/sso.php HTTP/2.0" 302 0 "https://domain.fqdn/sso/login" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:104.0) Gecko/20100101 Firefox/104.0"
mailu-front-1 | 2022/09/14 13:46:43 [info] 10#10: *1372 client 192.168.203.10:38486 connected to 0.0.0.0:10143
mailu-front-1 | 2022/09/14 13:46:43 [info] 10#10: *1372 client login failed: "Authentication credentials invalid" while in http auth state, client: 192.168.203.10, server: 0.0.0.0:10143, login: "[email protected]"
``` | 2022-11-29T12:30:38 |
|
Mailu/Mailu | 2,569 | Mailu__Mailu-2569 | [
"2451"
] | 139c3d36c4688653cb0db8ce897b8868baa95064 | diff --git a/core/admin/mailu/internal/nginx.py b/core/admin/mailu/internal/nginx.py
--- a/core/admin/mailu/internal/nginx.py
+++ b/core/admin/mailu/internal/nginx.py
@@ -27,12 +27,14 @@
}),
}
+WEBMAIL_PORTS = ['10143', '10025']
+
def check_credentials(user, password, ip, protocol=None, auth_port=None):
- if not user or not user.enabled or (protocol == "imap" and not user.enable_imap) or (protocol == "pop3" and not user.enable_pop):
+ if not user or not user.enabled or (protocol == "imap" and not user.enable_imap and not auth_port in WEBMAIL_PORTS) or (protocol == "pop3" and not user.enable_pop):
return False
is_ok = False
# webmails
- if auth_port in ['10143', '10025'] and password.startswith('token-'):
+ if auth_port in WEBMAIL_PORTS and password.startswith('token-'):
if utils.verify_temp_token(user.get_id(), password):
is_ok = True
# All tokens are 32 characters hex lowercase
| not allowing POP3/IMAP leads to infinite loop in webmail
v1.9.32
I noticed a small bug. If both are disabled, webmail is stuck in an infinite loop. I guess nobody ever tried it before since both are checked by default.
Not very consequential, but I figured you might want to know. Not sure about the use case either. I unchecked them because there was no need for this particular account and found it that way.
Cheers
| Thanks. Logs show this (over and over again):
```
mailu-front-1 | * - - [14/Sep/2022:13:46:43 +0000] "GET /webmail/sso.php HTTP/2.0" 302 0 "https://domain.fqdn/sso/login" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:104.0) Gecko/20100101 Firefox/104.0"
mailu-front-1 | 2022/09/14 13:46:43 [info] 10#10: *1372 client 192.168.203.10:38486 connected to 0.0.0.0:10143
mailu-front-1 | 2022/09/14 13:46:43 [info] 10#10: *1372 client login failed: "Authentication credentials invalid" while in http auth state, client: 192.168.203.10, server: 0.0.0.0:10143, login: "[email protected]"
``` | 2022-11-30T08:11:56 |
|
Mailu/Mailu | 2,601 | Mailu__Mailu-2601 | [
"2493"
] | e9175da5861ee5179f13b16bff7c81af7e8d9b31 | diff --git a/core/admin/mailu/schemas.py b/core/admin/mailu/schemas.py
--- a/core/admin/mailu/schemas.py
+++ b/core/admin/mailu/schemas.py
@@ -909,16 +909,23 @@ def _patch_item(self, data, many, **kwargs): # pylint: disable=unused-argument
# stabilize import of auto-increment primary keys (not required),
# by matching import data to existing items and setting primary key
if not self._primary in data:
- for item in getattr(self.recall('parent'), self.recall('field', 'parent')):
- existing = self.dump(item, many=False)
- this = existing.pop(self._primary)
- if data == existing:
- instance = item
- data[self._primary] = this
- break
+ parent = self.recall('parent')
+ if parent is not None:
+ for item in getattr(parent, self.recall('field', 'parent')):
+ existing = self.dump(item, many=False)
+ this = existing.pop(self._primary)
+ if data == existing:
+ instance = item
+ data[self._primary] = this
+ break
# try to load instance
instance = self.instance or self.get_instance(data)
+
+ # remember instance as parent for pruning siblings
+ if not self.Meta.sibling and self.context.get('update'):
+ self.store('parent', instance)
+
if instance is None:
if '__delete__' in data:
@@ -931,9 +938,6 @@ def _patch_item(self, data, many, **kwargs): # pylint: disable=unused-argument
else:
if self.context.get('update'):
- # remember instance as parent for pruning siblings
- if not self.Meta.sibling:
- self.store('parent', instance)
# delete instance from session when marked
if '__delete__' in data:
self.opts.sqla_session.delete(instance)
@@ -1014,14 +1018,16 @@ def _prune_items(self, items, many, **kwargs): # pylint: disable=unused-argument
del_items = True
if add_items or del_items:
- existing = {item[self._primary] for item in items if self._primary in item}
- for item in getattr(self.recall('parent'), self.recall('field', 'parent')):
- key = getattr(item, self._primary)
- if key not in existing:
- if add_items:
- items.append({self._primary: key})
- else:
- items.append({self._primary: key, '__delete__': '?'})
+ parent = self.recall('parent')
+ if parent is not None:
+ existing = {item[self._primary] for item in items if self._primary in item}
+ for item in getattr(parent, self.recall('field', 'parent')):
+ key = getattr(item, self._primary)
+ if key not in existing:
+ if add_items:
+ items.append({self._primary: key})
+ else:
+ items.append({self._primary: key, '__delete__': '?'})
return items
| config-import [KeyError] 'id'
Thank you for opening an issue with Mailu. Please understand that issues are meant for bugs and enhancement-requests.
For **user-support questions**, reach out to us on [matrix](https://matrix.to/#/#mailu:tedomum.net).
To be able to help you best, we need some more information.
## Before you open your issue
- [ ] Check if no issue or pull-request for this already exists.
- [ ] Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)
- [ ] You understand `Mailu` is made by volunteers in their **free time** β be concise, civil and accept that delays can occur.
- [ ] The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.
## Environment & Versions
### Environment
- [X] docker-compose
- [ ] kubernetes
- [ ] docker swarm
### Versions
To find your version, get the image name of a mailu container and read the version from the tag (example for version 1.7).
```
$> docker ps -a | grep mailu
140b09d4b09c mailu/roundcube:1.7 "docker-php-entrypoiβ¦" 2 weeks ago Up 2 days (healthy) 80/tcp
$> grep MAILU_VERSION docker-compose.yml mailu.env
```
version 1.9
## Description
Further explain the bug in a few words. It should be clear what the unexpected behaviour is. Share it in an easy-to-understand language.
Reciving Error: [KeyError] 'id' , when trying to import configuration, with fetches.
## Replication Steps
Steps for replicating your issue
docker exec -i $(docker-compose ps -q admin) flask mailu config-import -uv < import-user.yml
domain:
- name: example.com
user:
- email: [email protected]
displayed_name: 'Test'
enable_imap: true
enable_pop: false
enabled: true
fetches:
- comment: ''
error: null
host: 0.0.0.0
keep: true
last_check: '2022-10-25T14:26:20.876898Z'
password: 'test'
port: 993
protocol: imap
tls: true
username: [email protected]
password: 'test'
hash_password: false
quota_bytes: 10000000
spam_enabled: true
spam_mark_as_read: false
## Expected behaviour
Explain what results you expected - be as specific as possible. Just saying "it doesnβt work as expected" is not useful. It's also helpful to describe what you actually experienced.
The expected behaviours is to be able to create a new user, while fetching other mailbox.
## Logs
Often it is very useful to include log fragments of the involved component. You can get the logs via `docker logs <container name> --tail 1000`. For example for the admin container:
`docker logs mailu_admin_1 --tail 1000`
or using docker-compose `docker-compose -f /mailu/docker-compose.yml logs --tail 1000 admin`
If you can find the relevant section, please share only the parts that seem relevant. If you have any logs, please enclose them in code tags, like so:
````markdown
```
Your logs here!
```
````
| You can just add an id to work around this: `id: 1`
I'll see if it is possible to create a new id if it's missing.
The id is needed to know if you want to add or replace a new item.
Thanks for the reply, when I add id: 1.
I'm receving the following error:
Error: [AttributeError] 'User' object has no attribute 'id'
You need to add the id to the fetch item, not the user.
Please just create a user with a fetch using the web interface and export your configuration using "config-export" to see how it should look like.
Hi,
I have exported the configuration, and still receiving the same error:
Error: [AttributeError] 'User' object has no attribute 'id'
In the example you can see that the id is already in the fetch item:
---
domain:
-
name: example.com
user:
-
displayed_name: Test
email: [email protected]
enable_imap: true
enable_pop: false
enabled: true
fetches:
-
comment: ""
error: ~
host: "example"
id: 3
keep: true
last_check: ~
password: example
port: 993
protocol: imap
tls: false
username: [email protected]
forward_destination: []
forward_enabled: false
forward_keep: true
global_admin: false
manager_of: []
password: "example"
quota_bytes: 1000000000
reply_body: ~
reply_enabled: false
reply_enddate: "2999-12-31"
reply_startdate: "1900-01-01"
reply_subject: ~
spam_enabled: true
spam_mark_as_read: true
spam_threshold: 80
| 2022-12-27T11:54:40 |
|
Mailu/Mailu | 2,603 | Mailu__Mailu-2603 | [
"1364"
] | c729954b4ae4507cb689d512622365d9286e660c | diff --git a/core/admin/mailu/internal/views/dovecot.py b/core/admin/mailu/internal/views/dovecot.py
--- a/core/admin/mailu/internal/views/dovecot.py
+++ b/core/admin/mailu/internal/views/dovecot.py
@@ -17,7 +17,7 @@ def dovecot_passdb_dict(user_email):
return flask.jsonify({
"password": None,
"nopassword": "Y",
- "allow_nets": ",".join(allow_nets)
+ "allow_real_nets": ",".join(allow_nets)
})
@internal.route("/dovecot/userdb/")
| Maximum number of connections from user+IP exceeded
Hi, we have a problem... :-)
We have changed the original value of "AUTH_RATELIMIT" to "AUTH_RATELIMIT=100/minute;6000/hour", but logs continue to say " Maximum number of connections from user+IP exceeded (mail_max_userip_connections=20)" while reading response from upstream..."
We have made docker-compose dow and docker-compose up -d, but without result.
How can we change the default limit set during the installation?
Thanks in advance.
| That is not related to the ratelimit but to a dovecot limit that allows so many connections per user and originating ip.
Either you indeed have that many clients connected to your server something is wrong and dovecot has open connections hanging
You can override this value by creating a file
`/mailu/overrides/dovecot.conf`
with the following content:
```
protocol imap {
mail_max_userip_connections = 100
}
```
Hi! Thank you very much for your replay... Overriding the value worked very well.
We have another two questions... And we don't know if we can ask here or if we need to open another thread.
We have a problem with a request to sended mail (request a read receipt and request a delivery receipt).
In one account when recipient attempt to send a read request gets this error message: "Recipient address rejected: User unknown in virtual mailbox table"
The delivery request don't say nothing, but the request don't return.
Another account don't have a read error and request a read receipt it works, but a request to delivery don't works and don't say nothing.
How we can resolve this problem?
Hi There,
The `Mailu`-Project is currently in a bit of a bind! We are short on man-power, and we need to judge if it is possible for us to put in some work on this issue.
To help with that, we are currently trying to find out which issues are actively keeping users from using `Mailu`, which issues have someone who want to work on them β and which issues may be less important. These a less important ones could be discarded for the time being, until the project is in a more stable and regular state once again.
In order for us to better assess this, it would be helpful if you could put a **reaction on this post** (use the :smiley: icon to the top-right).
- ποΈ if you **need this** to be able to use Mailu. Ideally, youβd also be able to test this on your installation, and provide feedback β¦
- π if you find it a **nice bonus**, but no deal-breaker
- π if you want to **work on it yourself**!
We want to keep this **voting open for 2 weeks** from now, so please help out!
This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.
Hi There,
we see this issue had only little attention π. As much as it pains us: In order to get the current issues a bit more manageable for us, we decided to close it. ππ
We hope that no one feels offended by doing so. Should the issue get really pressing in the future, please feel free to re-open it.
Thank you for your patience and understanding, π
- Your Mailu Team
Perhaps a few organizations struck this limit or simply understood the actual cause of a problem (:
Say, **Thunderbird asks for a password** on such limit errors and says nothing about the original error message. Mailspring does show the original problem and that magical number `20`. As people fetch mail, come to work and go home, this limit appears and disappears, becoming seemingly stochastic and driving suspicion away from the server to clients as other accounts work.
We do have such a mailbox that exceededs the number of clients. It is the main organization's email that is actually used by many departments, plus mobile clients, and all the needed mail from dozens of third-party organizations come into it. It is a bad way of organizing mail, but that's what has happened historically. | 2022-12-28T13:21:22 |
|
Mailu/Mailu | 2,604 | Mailu__Mailu-2604 | [
"2493"
] | 151601744f4d73baf5aab6c959dce6dc2d04dbd4 | diff --git a/core/admin/mailu/schemas.py b/core/admin/mailu/schemas.py
--- a/core/admin/mailu/schemas.py
+++ b/core/admin/mailu/schemas.py
@@ -5,6 +5,7 @@
from collections import Counter
from datetime import timezone
+import inspect
import json
import logging
import yaml
@@ -669,20 +670,15 @@ class Storage:
context = {}
- def _bind(self, key, bind):
- if bind is True:
- return (self.__class__, key)
- if isinstance(bind, str):
- return (get_schema(self.recall(bind).__class__), key)
- return (bind, key)
-
- def store(self, key, value, bind=None):
+ def store(self, key, value):
""" store value under key """
- self.context.setdefault('_track', {})[self._bind(key, bind)]= value
+ key = f'{self.__class__.__name__}.{key}'
+ self.context.setdefault('_track', {})[key] = value
- def recall(self, key, bind=None):
+ def recall(self, key):
""" recall value from key """
- return self.context['_track'][self._bind(key, bind)]
+ key = f'{self.__class__.__name__}.{key}'
+ return self.context['_track'][key]
class BaseOpts(SQLAlchemyAutoSchemaOpts):
""" Option class with sqla session
@@ -790,10 +786,16 @@ def hide(self, data):
for key, value in data.items()
}
- def _call_and_store(self, *args, **kwargs):
- """ track current parent field for pruning """
- self.store('field', kwargs['field_name'], True)
- return super()._call_and_store(*args, **kwargs)
+ def get_parent(self):
+ """ helper to determine parent of current object """
+ for x in inspect.stack():
+ loc = x[0].f_locals
+ if 'ret_d' in loc:
+ if isinstance(loc['self'], MailuSchema):
+ return self.context.get('config'), loc['attr_name']
+ else:
+ return loc['self'].get_instance(loc['ret_d']), loc['attr_name']
+ return None, None
# this is only needed to work around the declared attr "email" primary key in model
def get_instance(self, data):
@@ -803,9 +805,13 @@ def get_instance(self, data):
if keys := getattr(self.Meta, 'primary_keys', None):
filters = {key: data.get(key) for key in keys}
if None not in filters.values():
- res= self.session.query(self.opts.model).filter_by(**filters).first()
- return res
- res= super().get_instance(data)
+ try:
+ res = self.session.query(self.opts.model).filter_by(**filters).first()
+ except sqlalchemy.exc.StatementError as exc:
+ raise ValidationError(f'Invalid {keys[0]}: {data.get(keys[0])!r}', data.get(keys[0])) from exc
+ else:
+ return res
+ res = super().get_instance(data)
return res
@pre_load(pass_many=True)
@@ -829,6 +835,10 @@ def _patch_many(self, items, many, **kwargs): # pylint: disable=unused-argument
want_prune = []
def patch(count, data):
+ # we only process objects here
+ if type(data) is not dict:
+ raise ValidationError(f'Invalid item. {self.Meta.model.__tablename__.title()} needs to be an object.', f'{data!r}')
+
# don't allow __delete__ coming from input
if '__delete__' in data:
raise ValidationError('Unknown field.', f'{count}.__delete__')
@@ -882,10 +892,10 @@ def set_default(key, value):
]
# remember if prune was requested for _prune_items@post_load
- self.store('prune', bool(want_prune), True)
+ self.store('prune', bool(want_prune))
# remember original items to stabilize password-changes in _add_instance@post_load
- self.store('original', items, True)
+ self.store('original', items)
return items
@@ -909,23 +919,18 @@ def _patch_item(self, data, many, **kwargs): # pylint: disable=unused-argument
# stabilize import of auto-increment primary keys (not required),
# by matching import data to existing items and setting primary key
if not self._primary in data:
- parent = self.recall('parent')
+ parent, field = self.get_parent()
if parent is not None:
- for item in getattr(parent, self.recall('field', 'parent')):
+ for item in getattr(parent, field):
existing = self.dump(item, many=False)
this = existing.pop(self._primary)
if data == existing:
- instance = item
+ self.instance = item
data[self._primary] = this
break
# try to load instance
instance = self.instance or self.get_instance(data)
-
- # remember instance as parent for pruning siblings
- if not self.Meta.sibling and self.context.get('update'):
- self.store('parent', instance)
-
if instance is None:
if '__delete__' in data:
@@ -1001,7 +1006,7 @@ def _prune_items(self, items, many, **kwargs): # pylint: disable=unused-argument
return items
# get prune flag from _patch_many@pre_load
- want_prune = self.recall('prune', True)
+ want_prune = self.recall('prune')
# prune: determine if existing items in db need to be added or marked for deletion
add_items = False
@@ -1018,16 +1023,17 @@ def _prune_items(self, items, many, **kwargs): # pylint: disable=unused-argument
del_items = True
if add_items or del_items:
- parent = self.recall('parent')
+ parent, field = self.get_parent()
if parent is not None:
existing = {item[self._primary] for item in items if self._primary in item}
- for item in getattr(parent, self.recall('field', 'parent')):
+ for item in getattr(parent, field):
key = getattr(item, self._primary)
if key not in existing:
if add_items:
items.append({self._primary: key})
else:
- items.append({self._primary: key, '__delete__': '?'})
+ if self.context.get('update'):
+ self.opts.sqla_session.delete(self.instance or self.get_instance({self._primary: key}))
return items
@@ -1048,7 +1054,7 @@ def _add_instance(self, item, many, **kwargs): # pylint: disable=unused-argument
# did we hash a new plaintext password?
original = None
pkey = getattr(item, self._primary)
- for data in self.recall('original', True):
+ for data in self.recall('original'):
if 'hash_password' in data and data.get(self._primary) == pkey:
original = data['password']
break
@@ -1244,12 +1250,6 @@ def __init__(self, *args, **kwargs):
if field in fieldlist:
fieldlist[field] = fieldlist.pop(field)
- def _call_and_store(self, *args, **kwargs):
- """ track current parent and field for pruning """
- self.store('field', kwargs['field_name'], True)
- self.store('parent', self.context.get('config'))
- return super()._call_and_store(*args, **kwargs)
-
@pre_load
def _clear_config(self, data, many, **kwargs): # pylint: disable=unused-argument
""" create config object in context if missing
| config-import [KeyError] 'id'
Thank you for opening an issue with Mailu. Please understand that issues are meant for bugs and enhancement-requests.
For **user-support questions**, reach out to us on [matrix](https://matrix.to/#/#mailu:tedomum.net).
To be able to help you best, we need some more information.
## Before you open your issue
- [ ] Check if no issue or pull-request for this already exists.
- [ ] Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)
- [ ] You understand `Mailu` is made by volunteers in their **free time** β be concise, civil and accept that delays can occur.
- [ ] The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.
## Environment & Versions
### Environment
- [X] docker-compose
- [ ] kubernetes
- [ ] docker swarm
### Versions
To find your version, get the image name of a mailu container and read the version from the tag (example for version 1.7).
```
$> docker ps -a | grep mailu
140b09d4b09c mailu/roundcube:1.7 "docker-php-entrypoiβ¦" 2 weeks ago Up 2 days (healthy) 80/tcp
$> grep MAILU_VERSION docker-compose.yml mailu.env
```
version 1.9
## Description
Further explain the bug in a few words. It should be clear what the unexpected behaviour is. Share it in an easy-to-understand language.
Reciving Error: [KeyError] 'id' , when trying to import configuration, with fetches.
## Replication Steps
Steps for replicating your issue
docker exec -i $(docker-compose ps -q admin) flask mailu config-import -uv < import-user.yml
domain:
- name: example.com
user:
- email: [email protected]
displayed_name: 'Test'
enable_imap: true
enable_pop: false
enabled: true
fetches:
- comment: ''
error: null
host: 0.0.0.0
keep: true
last_check: '2022-10-25T14:26:20.876898Z'
password: 'test'
port: 993
protocol: imap
tls: true
username: [email protected]
password: 'test'
hash_password: false
quota_bytes: 10000000
spam_enabled: true
spam_mark_as_read: false
## Expected behaviour
Explain what results you expected - be as specific as possible. Just saying "it doesnβt work as expected" is not useful. It's also helpful to describe what you actually experienced.
The expected behaviours is to be able to create a new user, while fetching other mailbox.
## Logs
Often it is very useful to include log fragments of the involved component. You can get the logs via `docker logs <container name> --tail 1000`. For example for the admin container:
`docker logs mailu_admin_1 --tail 1000`
or using docker-compose `docker-compose -f /mailu/docker-compose.yml logs --tail 1000 admin`
If you can find the relevant section, please share only the parts that seem relevant. If you have any logs, please enclose them in code tags, like so:
````markdown
```
Your logs here!
```
````
| You can just add an id to work around this: `id: 1`
I'll see if it is possible to create a new id if it's missing.
The id is needed to know if you want to add or replace a new item.
Thanks for the reply, when I add id: 1.
I'm receving the following error:
Error: [AttributeError] 'User' object has no attribute 'id'
You need to add the id to the fetch item, not the user.
Please just create a user with a fetch using the web interface and export your configuration using "config-export" to see how it should look like.
Hi,
I have exported the configuration, and still receiving the same error:
Error: [AttributeError] 'User' object has no attribute 'id'
In the example you can see that the id is already in the fetch item:
---
domain:
-
name: example.com
user:
-
displayed_name: Test
email: [email protected]
enable_imap: true
enable_pop: false
enabled: true
fetches:
-
comment: ""
error: ~
host: "example"
id: 3
keep: true
last_check: ~
password: example
port: 993
protocol: imap
tls: false
username: [email protected]
forward_destination: []
forward_enabled: false
forward_keep: true
global_admin: false
manager_of: []
password: "example"
quota_bytes: 1000000000
reply_body: ~
reply_enabled: false
reply_enddate: "2999-12-31"
reply_startdate: "1900-01-01"
reply_subject: ~
spam_enabled: true
spam_mark_as_read: true
spam_threshold: 80
I fear the bugfix has opened another bug. I'll try to fix this asap. | 2022-12-28T16:45:46 |
|
Mailu/Mailu | 2,630 | Mailu__Mailu-2630 | [
"2392"
] | e1a85a450f28984e35b737e6b1a5367abb5fcda7 | diff --git a/core/admin/start.py b/core/admin/start.py
--- a/core/admin/start.py
+++ b/core/admin/start.py
@@ -52,15 +52,21 @@ def test_DNS():
test_DNS()
-start_command=" ".join([
- "gunicorn",
- f"--threads {str(os.cpu_count())}",
- "-b :80",
+cmdline = [
+ "gunicorn",
+ "--threads", f"{os.cpu_count()}",
+ # If SUBNET6 is defined, gunicorn must listen on IPv6 as well as IPv4
+ "-b", f"{'[::]' if os.environ.get('SUBNET6') else ''}:80",
"--logger-class mailu.Logger",
"--worker-tmp-dir /dev/shm",
- "--access-logfile -" if (log.root.level<=log.INFO) else "",
- "--error-logfile -",
- "--preload",
- "'mailu:create_app()'"])
+ "--error-logfile", "-",
+ "--preload"
+]
-os.system(start_command)
+# logging
+if log.root.level <= log.INFO:
+ cmdline.extend(["--access-logfile", "-"])
+
+cmdline.append("'mailu:create_app()'")
+
+os.system(" ".join(cmdline))
| Docker container crashes if IPv6 is disabled at the system level.
If listen [::] is found somewhere in the configs, but IPv6 is disabled at the host system level and in the docker, then the process crashes, and, accordingly, the docker container also crashes.
This can be manually climbed into each container, corrected, but it is not very convenient.
docker exec mailu_front_1 sed -i '/listen \[/d' /conf/nginx.conf
docker exec mailu_front_1 sed -i '/listen \[/d' /etc/nginx/nginx.conf
docker exec mailu_front_1 sed -i '/listen \[/d' /etc/nginx/http.d/default.conf
docker restart mailu_front_1
docker restart mailu_webdav_1 && docker exec -it mailu_webdav_1 sed -i 's/hosts =.*\[::\].*/hosts = 0.0.0.0:5232/g' /radicale.conf && docker restart mailu_webdav_1
Can you add a container launch option to remove listen [::] from configs?
| 2023-01-25T15:43:51 |
||
Mailu/Mailu | 2,632 | Mailu__Mailu-2632 | [
"2570"
] | 3a1cecbe211872eab6cba2eb14c6502aaa8979b4 | diff --git a/core/admin/mailu/configuration.py b/core/admin/mailu/configuration.py
--- a/core/admin/mailu/configuration.py
+++ b/core/admin/mailu/configuration.py
@@ -18,6 +18,7 @@
'TEMPLATES_AUTO_RELOAD': True,
'MEMORY_SESSIONS': False,
'FETCHMAIL_ENABLED': True,
+ 'MAILU_VERSION': 'unknown',
# Database settings
'DB_FLAVOR': None,
'DB_USER': 'mailu',
@@ -157,6 +158,10 @@ def init_app(self, app):
self.config['HOSTNAME'] = hostnames[0]
self.config['DEFAULT_SPAM_THRESHOLD'] = int(self.config['DEFAULT_SPAM_THRESHOLD'])
self.config['PROXY_AUTH_WHITELIST'] = set(ipaddress.ip_network(cidr, False) for cidr in (cidr.strip() for cidr in self.config['PROXY_AUTH_WHITELIST'].split(',')) if cidr)
+ try:
+ self.config['MAILU_VERSION'] = open('/version', 'r').read()
+ except FileNotFoundError:
+ pass
# update the app config
app.config.update(self.config)
| Small feature - add mailu version in the admin UI
Add mailu version in the admin UI
I think it would nice to be able to see which version of mailu we are running, unless there is other simple way?
Thanks
| If you run a recent-enough version of Mailu you can use:
``docker-compose exec admin cat /version`` or should be able to see which image are running using ``docker ps``.
I suggest a different approach altogether.
Broken updates have caused me great pain several times in years past. I was only saved by luck, since I hadn't updated in a while, and a new major release was available that fixed it. (God forbid you get stuck at the wrong time!) That's because you used to not be able to select specific versions. That changed a while ago. Now you can override it by setting an environment variable.
I simply made a small update script:
```bash
export MAILU_VERSION=1.9.43
docker compose pull
docker compose down
docker compose up -d --force-recreate
docker compose logs -f
```
The additional `down` is probably overkill, but some earlier releases had issues when not fully down-ed, so I kept it.
It sounds like you update automatically, I'd personally recommend against that. Hence the logs line. I want to do a sanity check before considering it done.
How to find a changelog is explained on the release page. I'm guilty of not always reading it myself, though. :)
Cheers | 2023-01-25T22:02:36 |
|
Mailu/Mailu | 2,651 | Mailu__Mailu-2651 | [
"1236"
] | 3804d0bf5e55a9bc775eb6e1e73fee17fdb414cf | diff --git a/core/admin/mailu/ui/forms.py b/core/admin/mailu/ui/forms.py
--- a/core/admin/mailu/ui/forms.py
+++ b/core/admin/mailu/ui/forms.py
@@ -37,7 +37,7 @@ def __init__(self,message=_('Invalid email address.')):
self.message = message
def __call__(self, form, field):
- pattern = re.compile(r'^([_a-z0-9\-]+)(\.[_a-z0-9\-]+)*@([a-z0-9\-]{1,}\.)*([a-z]{1,})(,([_a-z0-9\-]+)(\.[_a-z0-9\-]+)*@([a-z0-9\-]{1,}\.)*([a-z]{2,}))*$')
+ pattern = re.compile(r'^([_a-z0-9\-\+]+)(\.[_a-z0-9\-\+]+)*@([a-z0-9\-]{1,}\.)*([a-z]{1,})(,([_a-z0-9\-\+]+)(\.[_a-z0-9\-\+]+)*@([a-z0-9\-]{1,}\.)*([a-z]{2,}))*$')
if not pattern.match(field.data.replace(" ", "")):
raise validators.ValidationError(self.message)
| plus mail in auto-forward destination
The email address in `admin -> User settings -> Auto-forward -> Destination` cannot contains a '+' character (error: Invalid email address). But the '+' in email address is valid and admin should accept it.
| This is a pretty straightforward bug probably related to flask form validation
Hi There,
The `Mailu`-Project is currently in a bit of a bind! We are short on man-power, and we need to judge if it is possible for us to put in some work on this issue.
To help with that, we are currently trying to find out which issues are actively keeping users from using `Mailu`, which issues have someone who want to work on them β and which issues may be less important. These a less important ones could be discarded for the time being, until the project is in a more stable and regular state once again.
In order for us to better assess this, it would be helpful if you could put a **reaction on this post** (use the :smiley: icon to the top-right).
- ποΈ if you **need this** to be able to use Mailu. Ideally, youβd also be able to test this on your installation, and provide feedback β¦
- π if you find it a **nice bonus**, but no deal-breaker
- π if you want to **work on it yourself**!
We want to keep this **voting open for 2 weeks** from now, so please help out!
The problem is probably here:
https://github.com/Mailu/Mailu/blob/dbbfa44461c3b8a537e9fe6b56f4690417d3c8a2/core/admin/mailu/ui/forms.py#L40
This is the validator for the forward field
https://github.com/Mailu/Mailu/blob/dbbfa44461c3b8a537e9fe6b56f4690417d3c8a2/core/admin/mailu/ui/forms.py#L106
I think it can be fixed with this regex
```^([+_a-z0-9\-]+)(\.[+_a-z0-9\-]+)*@([a-z0-9\-]{2,}\.)*([a-z]{2,})(,([+_a-z0-9\-]+)(\.[+_a-z0-9\-]+)*@([a-z0-9\-]{2,}\.)*([a-z]{2,}))*$```
According to https://regex101.com/ the regex works.
| 2023-02-05T16:03:11 |
|
Mailu/Mailu | 2,654 | Mailu__Mailu-2654 | [
"1236"
] | cdbb91b07f60e431224f189f7072f9d6faa3c549 | diff --git a/core/admin/mailu/ui/forms.py b/core/admin/mailu/ui/forms.py
--- a/core/admin/mailu/ui/forms.py
+++ b/core/admin/mailu/ui/forms.py
@@ -37,7 +37,7 @@ def __init__(self,message=_('Invalid email address.')):
self.message = message
def __call__(self, form, field):
- pattern = re.compile(r'^([_a-z0-9\-]+)(\.[_a-z0-9\-]+)*@([a-z0-9\-]{2,}\.)*([a-z]{2,})(,([_a-z0-9\-]+)(\.[_a-z0-9\-]+)*@([a-z0-9\-]{2,}\.)*([a-z]{2,}))*$')
+ pattern = re.compile(r'^([_a-z0-9\-\+]+)(\.[_a-z0-9\-\+]+)*@([a-z0-9\-]{1,}\.)*([a-z]{1,})(,([_a-z0-9\-\+]+)(\.[_a-z0-9\-\+]+)*@([a-z0-9\-]{1,}\.)*([a-z]{2,}))*$')
if not pattern.match(field.data.replace(" ", "")):
raise validators.ValidationError(self.message)
| plus mail in auto-forward destination
The email address in `admin -> User settings -> Auto-forward -> Destination` cannot contains a '+' character (error: Invalid email address). But the '+' in email address is valid and admin should accept it.
| This is a pretty straightforward bug probably related to flask form validation
Hi There,
The `Mailu`-Project is currently in a bit of a bind! We are short on man-power, and we need to judge if it is possible for us to put in some work on this issue.
To help with that, we are currently trying to find out which issues are actively keeping users from using `Mailu`, which issues have someone who want to work on them β and which issues may be less important. These a less important ones could be discarded for the time being, until the project is in a more stable and regular state once again.
In order for us to better assess this, it would be helpful if you could put a **reaction on this post** (use the :smiley: icon to the top-right).
- ποΈ if you **need this** to be able to use Mailu. Ideally, youβd also be able to test this on your installation, and provide feedback β¦
- π if you find it a **nice bonus**, but no deal-breaker
- π if you want to **work on it yourself**!
We want to keep this **voting open for 2 weeks** from now, so please help out!
The problem is probably here:
https://github.com/Mailu/Mailu/blob/dbbfa44461c3b8a537e9fe6b56f4690417d3c8a2/core/admin/mailu/ui/forms.py#L40
This is the validator for the forward field
https://github.com/Mailu/Mailu/blob/dbbfa44461c3b8a537e9fe6b56f4690417d3c8a2/core/admin/mailu/ui/forms.py#L106
I think it can be fixed with this regex
```^([+_a-z0-9\-]+)(\.[+_a-z0-9\-]+)*@([a-z0-9\-]{2,}\.)*([a-z]{2,})(,([+_a-z0-9\-]+)(\.[+_a-z0-9\-]+)*@([a-z0-9\-]{2,}\.)*([a-z]{2,}))*$```
According to https://regex101.com/ the regex works.
| 2023-02-06T12:23:30 |
|
Mailu/Mailu | 2,690 | Mailu__Mailu-2690 | [
"2555"
] | 0de2430868853fa7a3a6edc8d8175cb786b37d23 | diff --git a/core/rspamd/start.py b/core/rspamd/start.py
--- a/core/rspamd/start.py
+++ b/core/rspamd/start.py
@@ -4,6 +4,7 @@
import glob
import logging as log
import requests
+import shutil
import sys
import time
from socrate import system,conf
@@ -13,8 +14,14 @@
# Actual startup script
+config_files = []
for rspamd_file in glob.glob("/conf/*"):
conf.jinja(rspamd_file, os.environ, os.path.join("/etc/rspamd/local.d", os.path.basename(rspamd_file)))
+ config_files.append(os.path.basename(rspamd_file))
+
+for override_file in glob.glob("/overrides/*"):
+ if os.path.basename(override_file) not in config_files:
+ shutil.copyfile(override_file, os.path.join("/etc/rspamd/local.d", os.path.basename(override_file)))
# Admin may not be up just yet
healthcheck = f'http://{os.environ["ADMIN_ADDRESS"]}/internal/rspamd/local_domains'
| rethink rspamd's overrides
Currently any override put in rspamd's folder will replace Mailu's default config.
This may disable functionality (anti-spoof, oletools, ...) and doesn't make upgrades easy.
We can probably do better.
| 2023-03-09T08:29:06 |
||
Mailu/Mailu | 2,697 | Mailu__Mailu-2697 | [
"2692"
] | f0b3689732eb5a974c917b9ab2309845f558ba8a | diff --git a/core/admin/mailu/configuration.py b/core/admin/mailu/configuration.py
--- a/core/admin/mailu/configuration.py
+++ b/core/admin/mailu/configuration.py
@@ -86,6 +86,7 @@
'PROXY_AUTH_WHITELIST': '',
'PROXY_AUTH_HEADER': 'X-Auth-Email',
'PROXY_AUTH_CREATE': False,
+ 'PROXY_AUTH_LOGOUT_URL': None,
'SUBNET': '192.168.203.0/24',
'SUBNET6': None,
}
diff --git a/core/admin/mailu/sso/views/base.py b/core/admin/mailu/sso/views/base.py
--- a/core/admin/mailu/sso/views/base.py
+++ b/core/admin/mailu/sso/views/base.py
@@ -8,26 +8,38 @@
import flask_login
import secrets
import ipaddress
+from urllib.parse import urlparse, urljoin
+from werkzeug.urls import url_unquote
@sso.route('/login', methods=['GET', 'POST'])
def login():
+ if flask.request.headers.get(app.config['PROXY_AUTH_HEADER']) and not 'noproxyauth' in flask.request.url:
+ return _proxy()
+
client_ip = flask.request.headers.get('X-Real-IP', flask.request.remote_addr)
form = forms.LoginForm()
- form.submitAdmin.label.text = form.submitAdmin.label.text + ' Admin'
- form.submitWebmail.label.text = form.submitWebmail.label.text + ' Webmail'
fields = []
- if str(app.config["WEBMAIL"]).upper() != "NONE":
- fields.append(form.submitWebmail)
- if str(app.config["ADMIN"]).upper() != "FALSE":
+
+ if 'url' in flask.request.args and not 'homepage' in flask.request.url:
fields.append(form.submitAdmin)
+ else:
+ form.submitAdmin.label.text = form.submitAdmin.label.text + ' Admin'
+ form.submitWebmail.label.text = form.submitWebmail.label.text + ' Webmail'
+ if str(app.config["WEBMAIL"]).upper() != "NONE":
+ fields.append(form.submitWebmail)
+ if str(app.config["ADMIN"]).upper() != "FALSE":
+ fields.append(form.submitAdmin)
fields = [fields]
if form.validate_on_submit():
- if form.submitAdmin.data:
- destination = app.config['WEB_ADMIN']
- elif form.submitWebmail.data:
- destination = app.config['WEB_WEBMAIL']
+ if destination := _has_usable_redirect():
+ pass
+ else:
+ if form.submitAdmin.data:
+ destination = app.config['WEB_ADMIN']
+ elif form.submitWebmail.data:
+ destination = app.config['WEB_WEBMAIL']
device_cookie, device_cookie_username = utils.limiter.parse_device_cookie(flask.request.cookies.get('rate_limit'))
username = form.email.data
if username != device_cookie_username and utils.limiter.should_rate_limit_ip(client_ip):
@@ -57,14 +69,29 @@ def login():
def logout():
flask_login.logout_user()
flask.session.destroy()
- response = flask.redirect(flask.url_for('.login'))
+ response = flask.redirect(app.config['PROXY_AUTH_LOGOUT_URL'] or flask.url_for('.login'))
for cookie in ['roundcube_sessauth', 'roundcube_sessid', 'smsession']:
response.set_cookie(cookie, 'empty', expires=0)
return response
[email protected]('/proxy', methods=['GET'])
[email protected]('/proxy/<target>', methods=['GET'])
-def proxy(target='webmail'):
+"""
+Redirect to the url passed in parameter if any; Ensure that this is not an open-redirect too...
+https://cheatsheetseries.owasp.org/cheatsheets/Unvalidated_Redirects_and_Forwards_Cheat_Sheet.html
+"""
+def _has_usable_redirect():
+ if 'homepage' in flask.request.url:
+ return None
+ if url := flask.request.args.get('url'):
+ url = url_unquote(url)
+ target = urlparse(urljoin(flask.request.url, url))
+ if target.netloc == urlparse(flask.request.url).netloc:
+ return target.geturl()
+ return None
+
+"""
+https://mailu.io/master/configuration.html#header-authentication-using-an-external-proxy
+"""
+def _proxy():
ip = ipaddress.ip_address(flask.request.remote_addr)
if not any(ip in cidr for cidr in app.config['PROXY_AUTH_WHITELIST']):
return flask.abort(500, '%s is not on PROXY_AUTH_WHITELIST' % flask.request.remote_addr)
@@ -73,11 +100,13 @@ def proxy(target='webmail'):
if not email:
return flask.abort(500, 'No %s header' % app.config['PROXY_AUTH_HEADER'])
+ url = _has_usable_redirect() or app.config['WEB_ADMIN']
+
user = models.User.get(email)
if user:
flask.session.regenerate()
flask_login.login_user(user)
- return flask.redirect(app.config['WEB_ADMIN'] if target=='admin' else app.config['WEB_WEBMAIL'])
+ return flask.redirect(url)
if not app.config['PROXY_AUTH_CREATE']:
return flask.abort(500, 'You don\'t exist. Go away! (%s)' % email)
@@ -100,4 +129,4 @@ def proxy(target='webmail'):
flask_login.login_user(user)
user.send_welcome()
flask.current_app.logger.info(f'Login succeeded by proxy created user: {user} from {client_ip} through {flask.request.remote_addr}.')
- return flask.redirect(app.config['WEB_ADMIN'] if target=='admin' else app.config['WEB_WEBMAIL'])
+ return flask.redirect(url)
diff --git a/core/admin/mailu/utils.py b/core/admin/mailu/utils.py
--- a/core/admin/mailu/utils.py
+++ b/core/admin/mailu/utils.py
@@ -42,7 +42,7 @@
def handle_needs_login():
""" redirect unauthorized requests to login page """
return flask.redirect(
- flask.url_for('sso.login')
+ flask.url_for('sso.login', url=flask.request.url)
)
# DNS stub configured to do DNSSEC enabled queries
| Mailu should redirect to proxy authentication endpoints for users of proxy header authentication
<!--
Thank you for opening an issue with Mailu. Please understand that issues are meant for bugs and enhancement-requests.
For **user-support questions**, reach out to us on [matrix](https://matrix.to/#/#mailu:tedomum.net).
To be able to help you best, we need some more information.
Before you open your issue
- Check if no issue or pull-request for this already exists.
- Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)
- You understand `Mailu` is made by volunteers in their **free time** β be concise, civil and accept that delays can occur.
- The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.
Please put your text outside of the comment blocks to be visible. You can use the button "Preview" above to check.
-->
## Environment & Version
### Environment
- [x] docker-compose
- [ ] kubernetes
- [ ] docker swarm
### Version
- Version: `master`
<!--
To find your version, get the image name of a mailu container and read the version from the tag (example for version 1.7).
$> docker ps -a | grep mailu
140b09d4b09c mailu/roundcube:1.7 "docker-php-entrypoiβ¦" 2 weeks ago Up 2 days (healthy) 80/tcp
$> grep MAILU_VERSION docker-compose.yml mailu.env
-->
## Description
<!--
Further explain the bug in a few words. It should be clear what the unexpected behaviour is. Share it in an easy-to-understand language.
-->
This is related to #1972.
I've been testing proxy authentication with a new deployment of mailu. It works as expected, however there is one issue. The default login redirect for `WEB_WEBMAIL` path `/webmail`, and `WEB_ADMIN` path `/admin` will use `/sso/login`, where the user is prompted for credentials after completing SSO workflow. When using a proxy for auth, mailu should redirect logins to `WEB_WEBMAIL` to `/sso/proxy` and `WEB_ADMIN` logins to `/sso/proxy/admin`. We should have a setting in the configuration (disabled by default) to enable redirect to the proxy authentication endpoints. This would keep mailu from prompting users for their credentials where we expect all users to authenticate through the proxy. If proxy authentication breaks or there are users that do not authenticate through the proxy, users can still login by navigating to `/sso/login`.
As a workaround, I have configured requests to the base URL `/` to redirect to `/sso/proxy` using my proxy, but it's not an ideal solution. There is no way to redirect requests to `/webmail` and `/admin` to `/sso/proxy` because the proxy cannot tell if the request has been authenticated against mailu. Configuring redirects on those endpoints will only cause a redirect loop.
## Replication Steps
<!--
Steps for replicating your issue
-->
Using docker-compose and mailu master tag. Tested with Google Chrome version 110 on Windows.
1. Deploy mailu with a reverse proxy to perform authentication. I am using traefik2 for proxy and authentik for IdP.
2. Configure a user in IdP that will authenticate to mailu. Optionally enable PROXY_AUTH_CREATE in mailu config.
3. Confirm authentication through proxy works by navigating to `/sso/proxy`. A successful login will load Webmail.
4. Logout of any sessions via mailu, or clear cookies set by mailu.
5. Navigate to base URL `/`, webmail endpoint `/webmail`, and admin endpoint `/admin`.
## Observed behaviour
<!--
Explain or paste the result you received.
-->
The base URL `/` redirects to `/webmail`. This is expected. However, proxy authenticated requests to `/webmail` and `/admin` are redirected to `/sso/login`. Note the requests are correctly authenticated through the proxy, but not yet authenticated with mailu as it hasn't set the roundcube/rainloop/admin session cookies yet. When the browser is redirected here, mailu prompts the user for credentials which we don't want for users already authenticated through the proxy.
## Expected behaviour
<!--
Explain what results you expected - be as specific as possible.
Just saying "it doesnβt work as expected" is not useful. It's also helpful to describe what you actually experienced.
-->
In environments where a proxy is configured for header authentication to mailu, requests to `WEB_WEBMAIL` (`/webmail`) and `WEB_ADMIN` (`/admin`) should redirect to `/sso/proxy` and `/sso/proxy/admin` respectively when the session cookies are unset/invalid/expired.
## Logs
<!--
Often it is very useful to include log fragments of the involved component.
You can get the logs via `docker logs <container name> --tail 1000`.
For example for the admin container: `docker logs mailu_admin_1 --tail 1000`
or using docker-compose `docker-compose -f /mailu/docker-compose.yml logs --tail 1000 admin`
If you can find the relevant section, please share only the parts that seem relevant. If you have any logs, please enclose them in code tags, like so:
```
Your logs here!
```
-->
I don't think logs are necessary for this, but let me know if I can help by providing any.
| my workaround is to redirect all incoming request from `/sso/login` to `/sso/proxy` this works very good on my end. Also I redirected `/sso/logout` to the logout route of my proxy (e.g. for authentik: `/outpost.goauthentik.io/sign_out`). But I admit, it would be better if Mailu automatically redirects the `/sso/login` route to `/sso/proxy` in this setting
Yeah I know redirecting `/sso/login` is an option, but you don't want to do that in production environments where you would want to keep mailu's login page as a backup if proxy authentication breaks and users need access to their webmail, or if there are a few users who do not authenticate through the proxy, such as the admin user created during mailu setup.
Thank you for bringing up `/sso/logout` though, I didn't consider this and think that similarly it should be a setting in the mailu configuration. The way I see it, users would still get sent to `/sso/logout` to invalidate the mailu session cookies, then get redirected to the URL specified in configuration (if set) to complete the logout with the proxy auth.
This is not specific to proxy-auth: we currently don't "guess" nor auto-detect where we should redirect... we could attempt to parse the referer to get back to the right place. | 2023-03-11T09:07:14 |
|
Mailu/Mailu | 2,709 | Mailu__Mailu-2709 | [
"2708"
] | 1d9791ceaa9aec75cd838ce85de4edc4d3b6efdf | diff --git a/core/admin/mailu/sso/views/base.py b/core/admin/mailu/sso/views/base.py
--- a/core/admin/mailu/sso/views/base.py
+++ b/core/admin/mailu/sso/views/base.py
@@ -78,8 +78,8 @@ def logout():
Redirect to the url passed in parameter if any; Ensure that this is not an open-redirect too...
https://cheatsheetseries.owasp.org/cheatsheets/Unvalidated_Redirects_and_Forwards_Cheat_Sheet.html
"""
-def _has_usable_redirect():
- if 'homepage' in flask.request.url:
+def _has_usable_redirect(is_proxied=False):
+ if 'homepage' in flask.request.url and not is_proxied:
return None
if url := flask.request.args.get('url'):
url = url_unquote(url)
@@ -92,15 +92,16 @@ def _has_usable_redirect():
https://mailu.io/master/configuration.html#header-authentication-using-an-external-proxy
"""
def _proxy():
- ip = ipaddress.ip_address(flask.request.remote_addr)
+ proxy_ip = flask.request.headers.get('X-Forwarded-By', flask.request.remote_addr)
+ ip = ipaddress.ip_address(proxy_ip)
if not any(ip in cidr for cidr in app.config['PROXY_AUTH_WHITELIST']):
- return flask.abort(500, '%s is not on PROXY_AUTH_WHITELIST' % flask.request.remote_addr)
+ return flask.abort(500, '%s is not on PROXY_AUTH_WHITELIST' % proxy_ip)
email = flask.request.headers.get(app.config['PROXY_AUTH_HEADER'])
if not email:
return flask.abort(500, 'No %s header' % app.config['PROXY_AUTH_HEADER'])
- url = _has_usable_redirect() or app.config['WEB_ADMIN']
+ url = _has_usable_redirect(True) or app.config['WEB_ADMIN']
user = models.User.get(email)
if user:
| PROXY_AUTH_WHITELIST is validating address in REAL_IP_HEADER instead of proxy IP address
<!--
Thank you for opening an issue with Mailu. Please understand that issues are meant for bugs and enhancement-requests.
For **user-support questions**, reach out to us on [matrix](https://matrix.to/#/#mailu:tedomum.net).
To be able to help you best, we need some more information.
Before you open your issue
- Check if no issue or pull-request for this already exists.
- Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)
- You understand `Mailu` is made by volunteers in their **free time** β be concise, civil and accept that delays can occur.
- The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.
Please put your text outside of the comment blocks to be visible. You can use the button "Preview" above to check.
-->
## Environment & Version
### Environment
- [x] docker compose
- [ ] kubernetes
- [ ] docker swarm
### Version
- Version: `master`
<!--
To find your version, get the image name of a mailu container and read the version from the tag (example for version 1.7).
$> docker ps -a | grep mailu
140b09d4b09c mailu/roundcube:1.7 "docker-php-entrypoiβ¦" 2 weeks ago Up 2 days (healthy) 80/tcp
$> grep MAILU_VERSION docker-compose.yml mailu.env
-->
## Description
<!--
Further explain the bug in a few words. It should be clear what the unexpected behaviour is. Share it in an easy-to-understand language.
-->
When using a proxy for authentication, we have the option to set proxy IPs/CIDRs to trust for authentication with `PROXY_AUTH_WHITELIST` setting. Whenever we are using a proxy for authentication we are also supposed to set `REAL_IP_HEADER` and `REAL_IP_FROM` so mailu can determine where the request came from when the proxy intercepted it. However, when `REAL_IP_HEADER` and `REAL_IP_FROM` are set, the mailu frontend is validating the IP address in the header from the proxy, rather than the proxy IP itself, for `PROXY_AUTH_WHITELIST`.
I would guess that this has something to do with how nginx is configured for the frontend, as this only happens when BOTH `REAL_IP_FROM` and `PROXY_AUTH_WHITELIST` are configured. If only `PROXY_AUTH_WHITELIST` is set, then the user is permitted to login and access webmail (at increased security risk of IP spoofing).
## Replication Steps
<!--
Steps for replicating your issue
-->
1. Deploy mailu with a reverse proxy to perform authentication. I am using traefik2 for proxy and authentik for IdP.
2. Configure a user in IdP that will authenticate to mailu. Optionally enable PROXY_AUTH_CREATE in mailu config.
3. Set mailu configuration `PROXY_AUTH_WHITELIST` to use the proxy's IP address. Leave `REAL_IP_HEADER` and `REAL_IP_FROM` unset. Restart/redeploy mailu to pick up the new configuration.
4. Confirm authentication through proxy works by navigating to mailu through the proxy, using a different host / source IP address than the one used by the proxy. A successful login will load Webmail.
5. Set mailu configuration `REAL_IP_FROM` to use the proxy's IP address. Set `REAL_IP_HEADER` to a header that the proxy will set, such as `X-Real-IP`. Restart/redeploy mailu to pick up the new configuration.
6. Navigate to mailu through the proxy, using a different host / source IP address than the one used by the proxy. Complete the login if prompted.
## Observed behaviour
<!--
Explain or paste the result you received.
-->
Mailu returns with HTTP 500 error, where `X.X.X.X` is the IP address of the host running the browser, found from the header:
```
Internal Server Error
X.X.X.X is not on PROXY_AUTH_WHITELIST
```
## Expected behaviour
<!--
Explain what results you expected - be as specific as possible.
Just saying "it doesnβt work as expected" is not useful. It's also helpful to describe what you actually experienced.
-->
User should be permitted to login without encountering this error.
## Logs
<!--
Often it is very useful to include log fragments of the involved component.
You can get the logs via `docker logs <container name> --tail 1000`.
For example for the admin container: `docker logs mailu_admin_1 --tail 1000`
or using docker compose `docker compose -f /mailu/docker-compose.yml logs --tail 1000 admin`
If you can find the relevant section, please share only the parts that seem relevant. If you have any logs, please enclose them in code tags, like so:
```
Your logs here!
```
-->
Logs from the frontend:
```
<host IP> - - [17/Mar/2023:19:50:25 -0500] "GET /webmail HTTP/1.1" 301 162 "-" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36"
<host IP> - - [17/Mar/2023:19:50:25 -0500] "GET /webmail/ HTTP/1.1" 302 138 "-" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36"
<host IP> - - [17/Mar/2023:19:50:25 -0500] "GET /sso/login?url=/webmail/ HTTP/1.1" 500 155 "-" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36"
```
| It looks like here is the problem
https://github.com/Mailu/Mailu/blob/86ad4c93a901233226a9492279d2d2cc677344c9/core/admin/mailu/sso/views/base.py#L95 | 2023-03-18T08:18:52 |
|
Mailu/Mailu | 2,735 | Mailu__Mailu-2735 | [
"2215"
] | b68e1323691911a3e3bc5916fa42b6e778198da2 | diff --git a/core/admin/mailu/configuration.py b/core/admin/mailu/configuration.py
--- a/core/admin/mailu/configuration.py
+++ b/core/admin/mailu/configuration.py
@@ -31,7 +31,7 @@
'SQLALCHEMY_TRACK_MODIFICATIONS': False,
# Statistics management
'INSTANCE_ID_PATH': '/data/instance',
- 'STATS_ENDPOINT': '19.{}.stats.mailu.io',
+ 'STATS_ENDPOINT': '20.{}.stats.mailu.io',
# Common configuration variables
'SECRET_KEY': 'changeMe',
'DOMAIN': 'mailu.io',
diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -9,7 +9,7 @@
source_suffix = '.rst'
master_doc = 'index'
project = 'Mailu'
-copyright = '2018, Mailu authors'
+copyright = '2016, Mailu authors'
author = 'Mailu authors'
version = release = os.environ.get('VERSION', 'master')
language = 'en'
@@ -25,7 +25,7 @@
# to template names.
html_sidebars = {
'**': [
- 'relations.html',
+ 'relations.html',
'searchbox.html',
]
}
@@ -36,10 +36,10 @@
'github_user': 'mailu',
'github_repo': 'mailu',
'github_version': version,
- 'stable_version': '1.9',
+ 'stable_version': '2.0',
'versions': [
- ('1.8', '/1.8/'),
('1.9', '/1.9/'),
+ ('2.0', '/2.0/'),
('master', '/master/')
],
'conf_py_path': '/docs/'
| Brainstorming for 1.10 / 2.0 roadmap
# Description
Now 1.9 has been released for some time, it is time to discuss what we want to work on for the next release.
- Release date?
- Scope 1.10 (normal release) or 2.0 (release with breaking changes)?
- What do we want to work on?
## Ideas
Ideas from the last meeting:
- introduce snappymail.
- rootless containers
- probably requires init container
- replacement of Socrates. ghostwheel42 will work on this (if he finds time).
- A security/keys page for Mailu admin interface.
- On this page you can configure/change/generate keys for all your Mail domains.
- DNS overview
- Have some UI that tells you if DNS is correctly configured.
- Have a look at BuildX
- means we can build for multiple platforms including ARM
- means we remove the build arm script
- means CI will be changed massively.
- For ARM, we could maybe build once per week to make sure the build time for normal builts is not too long.
- autoconfiguration for email clients?
- automx: https://rseichter.github.io/automx2/?
- integrate whatever we choose with mailu (part of mailu)?
- also good time to drop starttls for imap/pop/smtp(sending mail).
- So only support direct SSL/TLS
- Could be done via environment_variable. When not configured, then starttls supported. If configured (default for new deployments) then it is disabled.
- Another idea is to disable starttls, and report a custom error (when you use explicit starttls) that you must switch to implicit SSL/TLS port 465.
- Full 2 factor authentication with xoath2
- Too large in scope for this release. But preparations could be made.
- Means we need autoconfiguration. Otherwise the email client will not use xoath2.
- means using single sign on via identity provider (which mailu could be as well). This opens the door to use other identity providers in the future.
Feel free to suggest your own ideas
## Misc
For small non-critical issues/features I suggest we do not put it on the roadmap, but simply offer anyone the chance to pick these up and submit a PR if they want it included.
What are your thoughts? Please share your feedback.
Regardless the above wall of text, feel free to mention any feature/issue you would like included in the next release.
## My 2 cents
For release date we could maybe aim around August/September?
We are all very busy with our personal life now. This should give enough time to implement new features. Of course a decided date is not set in stone. It could still be moved forward/backwards.
https://github.com/Mailu/Mailu/blob/master/design/mailu-directory-structure.md
This is what I was thinking about for the scope. Changing the directory structure must be part of a major release with breaking changes. Do we want to make this change for this release or postpone this?
Personally I'd like to check
- switching to SnappyMail
- rootless containers
- BuildX. At least investigate what changes are required.
Feel free to suggest your own ideas.
| thanks for this great project
just some thoughts here
- rootless containers would be great
- for my personal setup i integrated mailman3 with "official" docker container from "maxking". Was not complicated as an additional/optional setup via ansible within the same docker-compose file/network. Only thing not working here is dkim key generation as i use an additional domain for the mailing lists (e.g. "example.com" for mailu and "lists.example.com" for mailman).
- allow dkim key creation/full listing via "rspamadm vault" (will also solve dkim problem for my mailman setup above)
- maybe more customization beside log and color of upper left box - e.g. the primary/secondary color for lines/buttons and so one
- allow redirect url after admin login (simply setup of additional admin uis and use your existing nginx auth config for that - for me it would help to protect parallel mailgraph installation for mailu and allow easy auth for admins)
- more general: some statistics about postfix, e.g. how much of my mails are rejected from others like google&co as possible spam
and maybe an alternative for automx2 might be email-autoconf (https://gitlab.com/onlime/email-autoconf/-/blob/main/README.md). But have not tested it, just thought about adding this feature to my setup too and stumbled accross these two projects.
My ideas as a first time user of mailu:
- Having a `port set` for not SSL/TLS use case (local network) like for the webmail but without custom login throttling rules. (useful for external reverse proxy that handle SSL termination but SSL in enabled on `front`)
- `API` for password reset, user registration, Avatar upload/support.
- Integration of `caldav` account generation on registration via admin.
- Standardize `ENV` configration variables.
- Better documentation about `DOMAIN` and `HOSTNAMES`.
- Better error handling on startup. During configuration of `init.py`, the python script fail and print error of the key missing, effected config file/ line number (Now it fail and you dont know why. You have to kill the process to see the output stacktrace in logs.)
- Default `ENV` values on configuration scripts. some scripts dont have default values. (`RECIPIENT_DELIMITER`, `POSTMASTER`, `MESSAGE_SIZE_LIMIT`,`MESSAGE_RATELIMIT` are some)
- Prevent override of config files if path is in a mounted volume it might have been customized. (Like for the rainloop webmail override didnt work for me)
- `front`, `admin`, `antispam`, `unbound` containers health check output is huge. a simple 200 / OK would be awesome. (it breaks portainer UI)
- Automatic detection of stack configuration via PINGs for `WEBMAIL`, `CLAMAV`, `WEBDAV` or get it from an `API@admin`.
- Possibility to disable reverse proxy rules without disabling the relative service. (Like having `WEBMAIL=rainloop` but nginx configuration block is not set)
More advance stuff that mailcow have that to me they seem interesting / useful:
- Full text search
- [oletools](https://github.com/decalage2/oletools) integration in `rspamd`
hi
just came around and while testing mailu i came along the same question...
> * more general: some statistics about postfix, e.g. how much of my mails are rejected from others like google&co as possible spam
maybe "integrate" https://gitlab.com/lightmeter/controlcenter to get visual insight into the postfix logs...
Hello, i'm curious to know if it is currently possible to run Mailu using docker in rootless configuration.
Thank you if you could clarify about this topic.
From security point of view rootless containers are a must have, also I'm a bit concerned that all the environment variables are global in default `mailu.env` generated, so all applications have access to all secrets even when they don't need to.
As discussed in https://github.com/Mailu/Mailu/issues/2258 having UTF-8 Dovecot config by default (rather than default strange UTF-7 IMAP) would be nice to.
@nazar-pc most containers now drop privileges early on master and the environment is sanitized so not all secrets are available to everyone. See https://github.com/Mailu/Mailu/blob/5b4f2fb0753709c94563c01e4870c6059b4c7231/core/base/libs/socrate/socrate/system.py#L27
Going rootless is non-trivial but patches would be welcome.
I'd also like to suggest another major breaking change that is `:LAYOUT=fs` for Dovecot. It makes it nicer to manage folders and allows to use somewhat popular `.` character in names of directories. But those who upgrade would need to go through a migration process (or this might just be added to the docs somewhere as it works great with an override).
Please create new tickets/feature requests for all of this so that it can be discussed.
IMHO there is value in sticking with standard Maildir++ format: that's a lot easier to re-use for users migrating away from Mailu ... or those using 3rd party tools on the backend.
One feature that would be awesome to see on the roadmap as well is the configuration of telemetry data and metrics for mailu and its components (I believe https://github.com/Mailu/Mailu/issues/1059 was supposed to cover this).
Dovecot: native, https://doc.dovecot.org/configuration_manual/stats/openmetrics/
Rspamd: native, https://www.rspamd.com/doc/modules/metric_exporter.html
Postfix: e.g. via https://github.com/google/mtail/blob/main/examples/postfix.mtail
Nginx: e.g. via https://github.com/nginxinc/nginx-prometheus-exporter
Flask: e.g. via https://pypi.org/project/prometheus-flask-exporter/
Proposal: mass import tool for users and alias #2711
https://github.com/Mailu/Mailu/issues/2711
Proposal: Test for correct diagnostics configuration #2710
https://github.com/Mailu/Mailu/issues/2710 | 2023-04-02T16:48:35 |
|
Mailu/Mailu | 2,791 | Mailu__Mailu-2791 | [
"2789"
] | 91f4ac609f892745f2938a19c084bad68aa5ed9a | diff --git a/core/base/libs/socrate/socrate/system.py b/core/base/libs/socrate/socrate/system.py
--- a/core/base/libs/socrate/socrate/system.py
+++ b/core/base/libs/socrate/socrate/system.py
@@ -68,6 +68,9 @@ def _is_compatible_with_hardened_malloc():
# See #2764, we need vmovdqu
if line.startswith('flags') and ' avx ' not in line:
return False
+ # See #2541
+ if line.startswith('Features') and ' lrcpc ' not in line:
+ return False
return True
def set_env(required_secrets=[], log_filters=[], log_file=None):
@@ -76,7 +79,8 @@ def set_env(required_secrets=[], log_filters=[], log_file=None):
sys.stderr = LogFilter(sys.stderr, log_filters, log_file)
log.basicConfig(stream=sys.stderr, level=os.environ.get("LOG_LEVEL", 'WARNING'))
- if not _is_compatible_with_hardened_malloc():
+ if 'LD_PRELOAD' in os.environ and not _is_compatible_with_hardened_malloc():
+ log.warning('Disabling hardened-malloc on this CPU')
del os.environ['LD_PRELOAD']
""" This will set all the environment variables and retains only the secrets we need """
| mailu front fails with KeyError: 'LD_PRELOAD'
<!--
Thank you for opening an issue with Mailu. Please understand that issues are meant for bugs and enhancement-requests.
For **user-support questions**, reach out to us on [matrix](https://matrix.to/#/#mailu:tedomum.net).
To be able to help you best, we need some more information.
Before you open your issue
- Check if no issue or pull-request for this already exists.
- Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)
- You understand `Mailu` is made by volunteers in their **free time** β be concise, civil and accept that delays can occur.
- The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.
Please put your text outside of the comment blocks to be visible. You can use the button "Preview" above to check.
-->
## Environment & Version
### Environment
- [X] docker compose
- [ ] kubernetes
- [ ] docker swarm
### Version
- Version: `2.0`
<!--
To find your version, get the image name of a mailu container and read the version from the tag (example for version 1.7).
$> docker ps -a | grep mailu
140b09d4b09c mailu/roundcube:1.7 "docker-php-entrypoiβ¦" 2 weeks ago Up 2 days (healthy) 80/tcp
$> grep MAILU_VERSION docker-compose.yml mailu.env
-->
## Description
<!--
Further explain the bug in a few words. It should be clear what the unexpected behaviour is. Share it in an easy-to-understand language.
-->
Pulled the image today to create a new server. The nginx fails with the following error.
## Replication Steps
<!--
Steps for replicating your issue
-->
* docker-compose up -d
* docker shows unhealthy front container
* docker logs mailu_front_1
## Observed behaviour
<!--
Explain or paste the result you received.
-->
## Expected behaviour
<!--
Explain what results you expected - be as specific as possible.
Just saying "it doesnβt work as expected" is not useful. It's also helpful to describe what you actually experienced.
-->
## Logs
<!--
Often it is very useful to include log fragments of the involved component.
You can get the logs via `docker logs <container name> --tail 1000`.
For example for the admin container: `docker logs mailu_admin_1 --tail 1000`
or using docker compose `docker compose -f /mailu/docker-compose.yml logs --tail 1000 admin`
If you can find the relevant section, please share only the parts that seem relevant. If you have any logs, please enclose them in code tags, like so:
-->
```
# docker logs mailu_front_1
Traceback (most recent call last):
File "/config.py", line 8, in <module>
args = system.set_env()
File "/app/venv/lib/python3.10/site-packages/socrate/system.py", line 80, in set_env
del os.environ['LD_PRELOAD']
File "/usr/lib/python3.10/os.py", line 696, in __delitem__
raise KeyError(key) from None
KeyError: 'LD_PRELOAD'
```
| Seems like the env `"LD_PRELOAD=/usr/lib/libhardened_malloc.so" `is set when I run docker inspect
Can't reproduce it here with 2.0.8.
Which specific version of Mailu are you running? ``cat /version`` from a container
What CPU are you running it from? ``cat /proc/cpuinfo``
Did you set LD_PRELOAD manually in mailu.env?
Same here with both `ghcr.io/mailu/nginx:2.0.7` and `ghcr.io/mailu/nginx:2.0.8`.
I reverted to `2.0.6` where the error does not occur.
@RandomByte same questions.
The code is here: https://github.com/Mailu/Mailu/blob/master/core/base/libs/socrate/socrate/system.py#L80 and yes it's new. LD_PRELOAD is set https://github.com/Mailu/Mailu/blob/master/core/base/Dockerfile#L83 and should always be present: there should be no problem removing it.
OK, found the problem; I'm working on a fix | 2023-04-20T15:47:48 |
|
Mailu/Mailu | 2,794 | Mailu__Mailu-2794 | [
"2139"
] | 1f9cd7db995946e18aaf650744868e4da0e1173d | diff --git a/core/dovecot/start.py b/core/dovecot/start.py
--- a/core/dovecot/start.py
+++ b/core/dovecot/start.py
@@ -9,7 +9,7 @@
from podop import run_server
from socrate import system, conf
-system.set_env(log_filters=r'waitpid\(\) returned unknown PID \d+$')
+system.set_env(log_filters=r'Error\: SSL context initialization failed, disabling SSL\: Can\'t load SSL certificate \(ssl_cert setting\)\: The certificate is empty$')
def start_podop():
system.drop_privs_to('mail')
diff --git a/core/postfix/start.py b/core/postfix/start.py
--- a/core/postfix/start.py
+++ b/core/postfix/start.py
@@ -11,7 +11,6 @@
from socrate import system, conf
system.set_env(log_filters=[
- r'the Postfix mail system is running\: \d+$',
r'(dis)?connect from localhost\[(\:\:1|127\.0\.0\.1)\]( quit=1 commands=1)?$',
r'haproxy read\: short protocol header\: QUIT$',
r'discarding EHLO keywords\: PIPELINING$',
| dovecot: support for zstd/lz4 compression is not compiled in
## Environment & Versions
### Environment
- [x] docker-compose
- [ ] kubernetes
- [ ] docker swarm
### Versions
1.9
## Description
When `COMPRESSION` in `mailu.env` is set to `zstd` or `lz4` and a new mail arrives, the imap container logs something like this:
```
Jan 04 23:32:46 lmtp([email protected])<32616><kRbzEw7L1GFofwAADGH2HQ>: Error: zlib_save: Support not compiled in for handler: zstd
Jan 04 23:32:46 indexer-worker([email protected])<32619><D5ZaIcXUkKDAqMsD:Z6zPGQ7L1GFrfwAADGH2HQ>: Error: zlib_save: Support not compiled in for handler: zstd
```
```
Jan 04 23:43:03 imap([email protected])<178><2CwwXMnUwMXAqMsE>: Error: zlib_save: Support not compiled in for handler: lz4
Jan 04 23:43:03 indexer-worker([email protected])<180><B4qhJXfN1GGvAAAAflj66Q:/8frM3fN1GG0AAAAflj66Q>: Error: zlib_save: Support not compiled in for handler: lz4
```
As far as I can tell the Dovecot package from Alpine is probably not compiled with zstd/lz4 support, but I'm not sure since the build log of the last Alpine package returns 404 :roll_eyes:
This is __not__ breaking anything, mails will be received, but they won't be compressed.
## Replication Steps
1. in `mailu.env` set `COMPRESSION=zstd` or `COMPRESSION=lz4`
2. `docker-compose down && docker-compose up`
3. send yourself a mail
4. watch the logs of the imap container
## Expected behaviour
With `COMPRESSION=zstd`/`COMPRESSION=lz4` set, mails should be compressed with zstd/lz4 and then stored.
## Workaround
Set `COMPRESSION=gz` if you need compressed mail storage.
| Does this mean it never worked?
I also see this functionality is not documented in the configuration reference.
Do you know which packages provide zstd/lz4 support?
If it is not listed in the build file (makedepends?) then we might have to file an issue in the alpine issue tracker:
https://git.alpinelinux.org/aports/tree/main/dovecot/APKBUILD#n22
For now we could document the option COMPRESSION in the configuration reference and in the mailu.env file remove zstd/lz4 in the comment.
> Does this mean it never worked?
Yes it does. :grimacing:
> Do you know which packages provide zstd/lz4 support?
> If it is not listed in the build file (makedepends?) then we might have to file an issue in the alpine issue tracker:
> https://git.alpinelinux.org/aports/tree/main/dovecot/APKBUILD#n22
No package simply provides it.
For dovecot to support zstd/lz4 compression:
1. `zstd-dev`/`lz4-dev` have to be installed _before_ dovecot is compiled (so they would have to be in the `makedepends` list)
2. the configure flags `--with-zstd` and `--with-lz4` should be supplied (it may auto-detect that the needed headers are present)
Both these steps are not done in Alpine's APKBUILD - since these compression features are optional and Alpine enjoys minimalism, I'm not sure if they would count that as a bug/a missing feature/in need of a fix β¦
> For now we could document the option COMPRESSION in the configuration reference and in the mailu.env file remove zstd/lz4 in the comment.
Yep, the only working COMPRESSION options are `gz`/`bz2` since only `zlib-dev` and `bzip2-dev` are in `makedepends`.
This was by the way also true before https://github.com/Mailu/Mailu/commit/2316ef1162d400fc486ad72df62fbd1fbd46afce changed our COMPRESSION options, only `gz`/`bz2` ever worked.
@lub Can you provide a comment on this issue since you are the author of that commit?
Do you remember why you submitted [that commit](https://github.com/Mailu/Mailu/commit/2316ef1162d400fc486ad72df62fbd1fbd46afce)?
This is the linked [PR1694](https://github.com/Mailu/Mailu/pull/1694).
@nextgens Do you remember if you tested with zstd/lz4 at that time? I know it is a long time ago, but maybe you still remember?
I don't remember :$
Do we want to fix it or drop the feature?
I'd say drop the feature. I'd like to keep using the alpine dovecot package. There is no need to make our builds even more complicated and introduce more maintenance overhead.
I'm so sad. I really liked `zstd` compression :(
In any case, you should update the configuration generator, because it confused me a lot...
@Diman0 Honestly can't remember if I tested it, but I basically made the PR to bring mailu in line with the algorithms mentioned in the dovecot documentation. I wasn't aware that they are not included in the alpine packages.
Sorry for the trouble.
On March 13th Alpine Linux got support for `zstd`compression in `dovecot` with 2.3.20-r7 which is in 3.17.
https://gitlab.alpinelinux.org/alpine/aports/-/commit/db057b7badb30307a5c66dead3049008de2c2c07
https://gitlab.alpinelinux.org/alpine/aports/-/issues/14712
https://build.alpinelinux.org/buildlogs/build-3-17-x86_64/main/dovecot/dovecot-2.3.20-r7.log
This package version is in Mailu 2.0 and thus `COMPRESSION=zstd` can now be used. | 2023-04-21T13:11:31 |
|
Mailu/Mailu | 2,796 | Mailu__Mailu-2796 | [
"2139"
] | 01846cbc1c68803ba1d7e4f187c74c9185df2003 | diff --git a/core/dovecot/start.py b/core/dovecot/start.py
--- a/core/dovecot/start.py
+++ b/core/dovecot/start.py
@@ -9,7 +9,7 @@
from podop import run_server
from socrate import system, conf
-system.set_env(log_filters=r'waitpid\(\) returned unknown PID \d+$')
+system.set_env(log_filters=r'Error\: SSL context initialization failed, disabling SSL\: Can\'t load SSL certificate \(ssl_cert setting\)\: The certificate is empty$')
def start_podop():
system.drop_privs_to('mail')
diff --git a/core/postfix/start.py b/core/postfix/start.py
--- a/core/postfix/start.py
+++ b/core/postfix/start.py
@@ -11,7 +11,6 @@
from socrate import system, conf
system.set_env(log_filters=[
- r'the Postfix mail system is running\: \d+$',
r'(dis)?connect from localhost\[(\:\:1|127\.0\.0\.1)\]( quit=1 commands=1)?$',
r'haproxy read\: short protocol header\: QUIT$',
r'discarding EHLO keywords\: PIPELINING$',
| dovecot: support for zstd/lz4 compression is not compiled in
## Environment & Versions
### Environment
- [x] docker-compose
- [ ] kubernetes
- [ ] docker swarm
### Versions
1.9
## Description
When `COMPRESSION` in `mailu.env` is set to `zstd` or `lz4` and a new mail arrives, the imap container logs something like this:
```
Jan 04 23:32:46 lmtp([email protected])<32616><kRbzEw7L1GFofwAADGH2HQ>: Error: zlib_save: Support not compiled in for handler: zstd
Jan 04 23:32:46 indexer-worker([email protected])<32619><D5ZaIcXUkKDAqMsD:Z6zPGQ7L1GFrfwAADGH2HQ>: Error: zlib_save: Support not compiled in for handler: zstd
```
```
Jan 04 23:43:03 imap([email protected])<178><2CwwXMnUwMXAqMsE>: Error: zlib_save: Support not compiled in for handler: lz4
Jan 04 23:43:03 indexer-worker([email protected])<180><B4qhJXfN1GGvAAAAflj66Q:/8frM3fN1GG0AAAAflj66Q>: Error: zlib_save: Support not compiled in for handler: lz4
```
As far as I can tell the Dovecot package from Alpine is probably not compiled with zstd/lz4 support, but I'm not sure since the build log of the last Alpine package returns 404 :roll_eyes:
This is __not__ breaking anything, mails will be received, but they won't be compressed.
## Replication Steps
1. in `mailu.env` set `COMPRESSION=zstd` or `COMPRESSION=lz4`
2. `docker-compose down && docker-compose up`
3. send yourself a mail
4. watch the logs of the imap container
## Expected behaviour
With `COMPRESSION=zstd`/`COMPRESSION=lz4` set, mails should be compressed with zstd/lz4 and then stored.
## Workaround
Set `COMPRESSION=gz` if you need compressed mail storage.
| Does this mean it never worked?
I also see this functionality is not documented in the configuration reference.
Do you know which packages provide zstd/lz4 support?
If it is not listed in the build file (makedepends?) then we might have to file an issue in the alpine issue tracker:
https://git.alpinelinux.org/aports/tree/main/dovecot/APKBUILD#n22
For now we could document the option COMPRESSION in the configuration reference and in the mailu.env file remove zstd/lz4 in the comment.
> Does this mean it never worked?
Yes it does. :grimacing:
> Do you know which packages provide zstd/lz4 support?
> If it is not listed in the build file (makedepends?) then we might have to file an issue in the alpine issue tracker:
> https://git.alpinelinux.org/aports/tree/main/dovecot/APKBUILD#n22
No package simply provides it.
For dovecot to support zstd/lz4 compression:
1. `zstd-dev`/`lz4-dev` have to be installed _before_ dovecot is compiled (so they would have to be in the `makedepends` list)
2. the configure flags `--with-zstd` and `--with-lz4` should be supplied (it may auto-detect that the needed headers are present)
Both these steps are not done in Alpine's APKBUILD - since these compression features are optional and Alpine enjoys minimalism, I'm not sure if they would count that as a bug/a missing feature/in need of a fix β¦
> For now we could document the option COMPRESSION in the configuration reference and in the mailu.env file remove zstd/lz4 in the comment.
Yep, the only working COMPRESSION options are `gz`/`bz2` since only `zlib-dev` and `bzip2-dev` are in `makedepends`.
This was by the way also true before https://github.com/Mailu/Mailu/commit/2316ef1162d400fc486ad72df62fbd1fbd46afce changed our COMPRESSION options, only `gz`/`bz2` ever worked.
@lub Can you provide a comment on this issue since you are the author of that commit?
Do you remember why you submitted [that commit](https://github.com/Mailu/Mailu/commit/2316ef1162d400fc486ad72df62fbd1fbd46afce)?
This is the linked [PR1694](https://github.com/Mailu/Mailu/pull/1694).
@nextgens Do you remember if you tested with zstd/lz4 at that time? I know it is a long time ago, but maybe you still remember?
I don't remember :$
Do we want to fix it or drop the feature?
I'd say drop the feature. I'd like to keep using the alpine dovecot package. There is no need to make our builds even more complicated and introduce more maintenance overhead.
I'm so sad. I really liked `zstd` compression :(
In any case, you should update the configuration generator, because it confused me a lot...
@Diman0 Honestly can't remember if I tested it, but I basically made the PR to bring mailu in line with the algorithms mentioned in the dovecot documentation. I wasn't aware that they are not included in the alpine packages.
Sorry for the trouble.
On March 13th Alpine Linux got support for `zstd`compression in `dovecot` with 2.3.20-r7 which is in 3.17.
https://gitlab.alpinelinux.org/alpine/aports/-/commit/db057b7badb30307a5c66dead3049008de2c2c07
https://gitlab.alpinelinux.org/alpine/aports/-/issues/14712
https://build.alpinelinux.org/buildlogs/build-3-17-x86_64/main/dovecot/dovecot-2.3.20-r7.log
This package version is in Mailu 2.0 and thus `COMPRESSION=zstd` can now be used. | 2023-04-22T09:47:30 |
|
Mailu/Mailu | 2,808 | Mailu__Mailu-2808 | [
"2805"
] | f2435f69644167079c868206c2f52a5c8c14c5ea | diff --git a/core/rspamd/start.py b/core/rspamd/start.py
--- a/core/rspamd/start.py
+++ b/core/rspamd/start.py
@@ -9,13 +9,13 @@
import time
from socrate import system,conf
-system.set_env()
+env = system.set_env()
# Actual startup script
config_files = []
for rspamd_file in glob.glob("/conf/*"):
- conf.jinja(rspamd_file, os.environ, os.path.join("/etc/rspamd/local.d", os.path.basename(rspamd_file)))
+ conf.jinja(rspamd_file, env, os.path.join("/etc/rspamd/local.d", os.path.basename(rspamd_file)))
config_files.append(os.path.basename(rspamd_file))
for override_file in glob.glob("/overrides/*"):
@@ -23,7 +23,7 @@
shutil.copyfile(override_file, os.path.join("/etc/rspamd/local.d", os.path.basename(override_file)))
# Admin may not be up just yet
-healthcheck = f'http://{os.environ["ADMIN_ADDRESS"]}/internal/rspamd/local_domains'
+healthcheck = f'http://{env["ADMIN_ADDRESS"]}/internal/rspamd/local_domains'
while True:
time.sleep(1)
try:
| SCAN_MACROS is incorrect case in Rspamd configs
In Mailu/core/rspamd/conf/external_services.conf, the first line is '{% if SCAN_MACROS == 'True' %}'. It is also the same in external_services_group.conf, note the capital 'T' in 'True'. When the mailu.env is generated, it generates SCAN_MACROS=true, i.e . all lowercase. Thus, in order to enable oletools, one must put SCAN_MACROS=True in environment file. I'm not sure what other items are reliant on SCAN_MACROS, but they should be checked as well.
| 2023-05-06T07:08:29 |
||
Mailu/Mailu | 2,838 | Mailu__Mailu-2838 | [
"2837"
] | 097bad138f35c92823b65488066abc5b97e4a125 | diff --git a/core/admin/mailu/internal/nginx.py b/core/admin/mailu/internal/nginx.py
--- a/core/admin/mailu/internal/nginx.py
+++ b/core/admin/mailu/internal/nginx.py
@@ -111,7 +111,6 @@ def handle_authentication(headers):
"Auth-Server": server,
"Auth-User": user_email,
"Auth-User-Exists": is_valid_user,
- "Auth-Password": password,
"Auth-Port": port
}
status, code = get_status(protocol, "authentication")
@@ -120,7 +119,6 @@ def handle_authentication(headers):
"Auth-Error-Code": code,
"Auth-User": user_email,
"Auth-User-Exists": is_valid_user,
- "Auth-Password": password,
"Auth-Wait": 0
}
# Unexpected
diff --git a/core/admin/mailu/internal/views/auth.py b/core/admin/mailu/internal/views/auth.py
--- a/core/admin/mailu/internal/views/auth.py
+++ b/core/admin/mailu/internal/views/auth.py
@@ -6,6 +6,7 @@
import flask_login
import base64
import sqlalchemy.exc
+import urllib
@internal.route("/auth/email")
def nginx_authentication():
@@ -30,6 +31,7 @@ def nginx_authentication():
if int(flask.request.headers['Auth-Login-Attempt']) < 10:
response.headers['Auth-Wait'] = '3'
return response
+ raw_password = urllib.parse.unquote(headers["Auth-Pass"])
headers = nginx.handle_authentication(flask.request.headers)
response = flask.Response()
for key, value in headers.items():
@@ -52,7 +54,14 @@ def nginx_authentication():
if not is_port_25:
utils.limiter.exempt_ip_from_ratelimits(client_ip)
elif is_valid_user:
- utils.limiter.rate_limit_user(username, client_ip, password=response.headers.get('Auth-Password', None))
+ password = None
+ try:
+ password = raw_password.encode("iso8859-1").decode("utf8")
+ except:
+ app.logger.warn(f'Received undecodable password for {username} from nginx: {raw_password!r}')
+ utils.limiter.rate_limit_user(username, client_ip, password=None)
+ else:
+ utils.limiter.rate_limit_user(username, client_ip, password=password)
elif not is_from_webmail:
utils.limiter.rate_limit_ip(client_ip, username)
return response
| diff --git a/tests/compose/core/00_create_users.sh b/tests/compose/core/00_create_users.sh
--- a/tests/compose/core/00_create_users.sh
+++ b/tests/compose/core/00_create_users.sh
@@ -8,4 +8,5 @@ docker compose -f tests/compose/core/docker-compose.yml exec -T admin flask mail
docker compose -f tests/compose/core/docker-compose.yml exec -T admin flask mailu admin admin mailu.io 'password' --mode=update || exit 1
docker compose -f tests/compose/core/docker-compose.yml exec -T admin flask mailu user user mailu.io 'password' || exit 1
docker compose -f tests/compose/core/docker-compose.yml exec -T admin flask mailu user 'user/with/slash' mailu.io 'password' || exit 1
+docker compose -f tests/compose/core/docker-compose.yml exec -T admin flask mailu user 'user_UTF8' mailu.io 'passwordβ¬' || exit 1
echo "User testing successful!"
diff --git a/tests/compose/core/05_connectivity.py b/tests/compose/core/05_connectivity.py
--- a/tests/compose/core/05_connectivity.py
+++ b/tests/compose/core/05_connectivity.py
@@ -7,25 +7,31 @@
import managesieve
SERVER='localhost'
-USERNAME='[email protected]'
-PASSWORD='password'
+USERNAME='[email protected]'
+PASSWORD='passwordβ¬'
+#https://github.com/python/cpython/issues/73936
+#SMTPlib does not support UTF8 passwords.
+USERNAME_ASCII='[email protected]'
+PASSWORD_ASCII='password'
+
def test_imap(server, username, password):
+ auth = lambda data : f'\x00{username}\x00{password}'
print(f'Authenticating to imaps://{username}:{password}@{server}:993/')
with imaplib.IMAP4_SSL(server) as conn:
- conn.login(username, password)
+ conn.authenticate('PLAIN', auth)
conn.noop()
print('OK')
print(f'Authenticating to imaps://{username}:{password}@{server}:143/')
with imaplib.IMAP4(server) as conn:
conn.starttls()
- conn.login(username, password)
+ conn.authenticate('PLAIN', auth)
conn.noop()
print('OK')
print(f'Authenticating to imap://{username}:{password}@{server}:143/')
try:
with imaplib.IMAP4(server) as conn:
- conn.login(username, password)
+ conn.authenticate('PLAIN', auth)
print(f'Authenticating to imap://{username}:{password}@{server}:143/ worked without STARTTLS!')
sys.exit(102)
except imaplib.IMAP4.error:
@@ -121,7 +127,7 @@ def test_managesieve(server, username, password):
if m.login('', username, password) != 'OK':
print(f'Authenticating to sieve://{username}:{password}@{server}:4190/ has failed!')
sys.exit(109)
-
+
if m.listscripts()[0] != 'OK':
print(f'Listing scripts failed!')
sys.exit(110)
@@ -130,5 +136,7 @@ def test_managesieve(server, username, password):
if __name__ == '__main__':
test_imap(SERVER, USERNAME, PASSWORD)
test_pop3(SERVER, USERNAME, PASSWORD)
- test_SMTP(SERVER, USERNAME, PASSWORD)
+ test_SMTP(SERVER, USERNAME_ASCII, PASSWORD_ASCII)
test_managesieve(SERVER, USERNAME, PASSWORD)
+#https://github.com/python/cpython/issues/73936
+#SMTPlib does not support UTF8 passwords.
\ No newline at end of file
| No special characters in passwords allowed
<!--
Thank you for opening an issue with Mailu. Please understand that issues are meant for bugs and enhancement-requests.
For **user-support questions**, reach out to us on [matrix](https://matrix.to/#/#mailu:tedomum.net).
To be able to help you best, we need some more information.
Before you open your issue
- Check if no issue or pull-request for this already exists.
- Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)
- You understand `Mailu` is made by volunteers in their **free time** β be concise, civil and accept that delays can occur.
- The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.
Please put your text outside of the comment blocks to be visible. You can use the button "Preview" above to check.
-->
## Environment & Version
### Environment
- [x] docker compose
- [ ] kubernetes
- [ ] docker swarm
### Version
- Version: `2.0`
## Description
Authentication fails when using special characters in a user's password.
I cannot use special characters in a password, e.g. 'β¬'. It worked in version 1.9
## Replication Steps
Use for example 'β¬' in a user's password.
## Observed behaviour
Clients are not able to login to IMAP server.
## Expected behaviour
Clients should be able to login to IMAP server.
## Logs
Logs from `admin` container:
```
192.168.203.1 - - [30/May/2023:08:39:07 +0000] "GET /internal/auth/email HTTP/1.0" 500 0 "-" "-"
[2023-05-30 08:39:07 +0000] [18] [ERROR] Error handling request /internal/auth/email
Traceback (most recent call last):
File "/app/venv/lib/python3.10/site-packages/gunicorn/workers/gthread.py", line 271, in handle
keepalive = self.handle_request(req, conn)
File "/app/venv/lib/python3.10/site-packages/gunicorn/workers/gthread.py", line 331, in handle_request
resp.close()
File "/app/venv/lib/python3.10/site-packages/gunicorn/http/wsgi.py", line 391, in close
self.send_headers()
File "/app/venv/lib/python3.10/site-packages/gunicorn/http/wsgi.py", line 322, in send_headers
util.write(self.sock, util.to_bytestring(header_str, "latin-1"))
File "/app/venv/lib/python3.10/site-packages/gunicorn/util.py", line 565, in to_bytestring
return value.encode(encoding)
UnicodeEncodeError: 'latin-1' codec can't encode character '\u20ac' in position 267: ordinal not in range(256)
```
| I can only replicate this for imap/smtp login (so login for email clients).
Login via the SSO form (Web) works fine.
I found the problematic part:
https://github.com/Mailu/Mailu/blob/097bad138f35c92823b65488066abc5b97e4a125/core/admin/mailu/internal/nginx.py#L114
If i comment this line, then the error does not occur. I guess this must be encoded? | 2023-05-30T14:13:08 |
Mailu/Mailu | 2,903 | Mailu__Mailu-2903 | [
"2683"
] | 32d1c7d899e81d7de24f2e15cb39bf0eb298a2fe | diff --git a/core/admin/mailu/internal/views/auth.py b/core/admin/mailu/internal/views/auth.py
--- a/core/admin/mailu/internal/views/auth.py
+++ b/core/admin/mailu/internal/views/auth.py
@@ -28,8 +28,6 @@ def nginx_authentication():
response = flask.Response()
response.headers['Auth-Status'] = status
response.headers['Auth-Error-Code'] = code
- if int(flask.request.headers['Auth-Login-Attempt']) < 10:
- response.headers['Auth-Wait'] = '3'
return response
raw_password = urllib.parse.unquote(headers['Auth-Pass']) if 'Auth-Pass' in headers else ''
headers = nginx.handle_authentication(flask.request.headers)
@@ -45,8 +43,6 @@ def nginx_authentication():
response = flask.Response()
response.headers['Auth-Status'] = status
response.headers['Auth-Error-Code'] = code
- if int(flask.request.headers['Auth-Login-Attempt']) < 10:
- response.headers['Auth-Wait'] = '3'
return response
is_valid_user = True
if headers.get("Auth-Status") == "OK":
diff --git a/core/nginx/letsencrypt.py b/core/nginx/letsencrypt.py
--- a/core/nginx/letsencrypt.py
+++ b/core/nginx/letsencrypt.py
@@ -1,9 +1,15 @@
#!/usr/bin/env python3
+import logging as log
import os
-import time
+import requests
+import sys
import subprocess
+import time
+from threading import Thread
+from http.server import HTTPServer, SimpleHTTPRequestHandler
+log.basicConfig(stream=sys.stderr, level="WARNING")
hostnames = ','.join(set(host.strip() for host in os.environ['HOSTNAMES'].split(',')))
command = [
@@ -39,8 +45,34 @@
# Wait for nginx to start
time.sleep(5)
+class MyRequestHandler(SimpleHTTPRequestHandler):
+ def do_GET(self):
+ if self.path == '/testing':
+ self.send_response(204)
+ else:
+ self.send_response(404)
+ self.send_header('Content-Type', 'text/plain')
+ self.end_headers()
+
+def serve_one_request():
+ with HTTPServer(("0.0.0.0", 8008), MyRequestHandler) as server:
+ server.handle_request()
+
# Run certbot every day
while True:
+ while True:
+ hostname = os.environ['HOSTNAMES'].split(' ')[0]
+ target = f'http://{hostname}/.well-known/acme-challenge/testing'
+ thread = Thread(target=serve_one_request)
+ thread.start()
+ r = requests.get(target)
+ if r.status_code != 204:
+ log.error(f"Can't reach {target}!, please ensure it's fixed or change the TLS_FLAVOR.")
+ time.sleep(5)
+ else:
+ break
+ thread.join()
+
subprocess.call(command)
subprocess.call(command2)
time.sleep(86400)
| Certdump: Make acme.json as a variable - enhancement
## Environment & Version
### Environment
- [x] docker-compose
- [ ] kubernetes
- [ ] docker swarm
### Version
- Version: `1.9`
## Description
I am using Traefik as reverse proxy and Traefik can handle multiple domain names for one host. For this reason i setup Traefik to store for each domain the certificates in different files, e. g. `acme-<domain>.json`
Certdumps script has a hardcoded point to acme.json in a specific folder. I mounted the volume of Traefik which contains the acme files.
Without editing the script in the running Certdump container i can't get the updated certificate that is done by Traefik
## Replication Steps
Store cert in a separate file.
traefik.yml:
```
certificatesResolvers:
letsencrypt-<domain>:
acme:
email: [email protected]
storage: /data/acme-<domain>.json
tlsChallenge: {}
```
Using the cert resolver for Mailu.
Mailu docker-compose:
```
traefik.http.routers.mailu.tls.certresolver=letsencrypt-<domain>
```
Mount the acme.json folder to certdump
Mailu docker-compose:
```
certdumper:
image: ${DOCKER_REGISTRY}/${DOCKER_ORG:-mailu}/${DOCKER_PREFIX:-}traefik-certdumper:${MAILU_VERSION:-1.9}
container_name: mailu_certdump-1.9
restart: always
environment:
- DOMAIN=mail.<fqdn>
- TRAEFIK_VERSION=${TRAEFIK_VERSION}
volumes:
- /etc/localtime:/etc/localtime:ro
# Folder, which contains the acme.json
- "traefik_data:/traefik:ro"
# Folder, where cert.pem and key.pem will be written
- "certdumper-data-1.9:/output"
```
## Observed behaviour
In the Traefik folder is still an old acme.json file with expired certificates. Certdump always takes acme.json until I change it manually in `run.sh` script. Of course this works only until the container will be deployed again.
## Expected behaviour
Certdump should not take hardcoded acme.json
## Logs
Not necessary.
| 2023-08-12T06:59:11 |
||
Mailu/Mailu | 2,923 | Mailu__Mailu-2923 | [
"2917"
] | fb97cec2381d86555c25943495f8383c82564be3 | diff --git a/core/base/libs/socrate/socrate/system.py b/core/base/libs/socrate/socrate/system.py
--- a/core/base/libs/socrate/socrate/system.py
+++ b/core/base/libs/socrate/socrate/system.py
@@ -97,6 +97,8 @@ def set_env(required_secrets=[], log_filters=[], log_file=None):
for secret in required_secrets:
os.environ[f'{secret}_KEY'] = hmac.new(bytearray(secret_key, 'utf-8'), bytearray(secret, 'utf-8'), 'sha256').hexdigest()
+ os.system('find /run -xdev -type f -name \*.pid -print -delete')
+
return {
key: _coerce_value(os.environ.get(key, value))
for key, value in os.environ.items()
| IMAP container error on reboot
<!--
Thank you for opening an issue with Mailu. Please understand that issues are meant for bugs and enhancement-requests.
For **user-support questions**, reach out to us on [matrix](https://matrix.to/#/#mailu:tedomum.net).
To be able to help you best, we need some more information.
Before you open your issue
- Check if no issue or pull-request for this already exists.
- Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)
- You understand `Mailu` is made by volunteers in their **free time** β be concise, civil and accept that delays can occur.
- The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.
Please put your text outside of the comment blocks to be visible. You can use the button "Preview" above to check.
-->
## Environment & Version
### Environment
- [x] docker compose
- [ ] kubernetes
- [ ] docker swarm
### Version
- Version: `2.0`
<!--
To find your version, get the image name of a mailu container and read the version from the tag (example for version 1.7).
$> docker ps -a | grep mailu
140b09d4b09c mailu/roundcube:1.7 "docker-php-entrypoiβ¦" 2 weeks ago Up 2 days (healthy) 80/tcp
$> grep MAILU_VERSION docker-compose.yml mailu.env
-->
## Description
<!--
Further explain the bug in a few words. It should be clear what the unexpected behaviour is. Share it in an easy-to-understand language.
-->
Occasionally, when performing a server reboot for routine maintenance, Mailu does not start correctly. I have narrowed it down to the imap container. The only error I see in the imap container logs is: Fatal: Dovecot is already running with PID 9 (read from /run/dovecot/master.pid). This is on my production machine, so I cannot keep the service down too long to troubleshoot. Before, I would restart the entire stack, but since I looked into it more on this last occurrence, I simply restarted the IMAP container and everything works fine. I am apprehensive about even posting this bug, since I do not have more information to offer. I can spin up a test environment and keep rebooting until the error occurs if necessary. I was running 1.9 for about a year, and it seems like this only started once I moved to 2.0 back in April, but I'm not sure.
Thanks for all of the hard work you all do.
## Replication Steps
<!--
Steps for replicating your issue
-->
It is very difficult to reproduce. It happens maybe 10% of the time.
## Logs
<!--
Often it is very useful to include log fragments of the involved component.
You can get the logs via `docker logs <container name> --tail 1000`.
For example for the admin container: `docker logs mailu_admin_1 --tail 1000`
or using docker compose `docker compose -f /mailu/docker-compose.yml logs --tail 1000 admin`
If you can find the relevant section, please share only the parts that seem relevant. If you have any logs, please enclose them in code tags, like so:
-->
```
Fatal: Dovecot is already running with PID 9 (read from /run/dovecot/master.pid)
```
| I set up a test environment and was able to reproduce the issue. Below is the full scrubbed IMAP container log. This time around, restarting the IMAP container alone did not remedy the problem. However, restarting the entire compose stack did fix it.
```
2023-08-21T01:14:38.123746000Z WARNING:root:Disabling hardened-malloc on this CPU
2023-08-21T01:14:38.251769000Z Aug 20 20:14:38 master: Info: Dovecot v2.3.20 (80a5ac675d) starting up for imap, pop3, lmtp, sieve
2023-08-21T01:16:42.609326000Z Aug 20 20:16:42 lmtp(41): Error: SSL context initialization failed, disabling SSL: Can't load SSL certificate (ssl_cert setting): The certificate is empty
2023-08-21T01:16:42.616802000Z Aug 20 20:16:42 lmtp(41): Info: Connect from 172.16.7.10
2023-08-21T01:16:42.710910000Z Aug 20 20:16:42 lmtp([email protected])<41><CYm8JPq64mQpAAAAGOvSGA>: Info: sieve: msgid=unspecified: stored mail into mailbox 'INBOX'
2023-08-21T01:16:42.736340000Z Aug 20 20:16:42 indexer-worker([email protected])<47><CYm8JPq64mQpAAAAGOvSGA:gqjHK/q64mQvAAAAGOvSGA>: Info: FTS Xapian: 'INBOX' (/mail/[email protected]/xapian-indexes/db_48120528fabae2642900000018ebd218) indexes do not exist. Initializing DB
2023-08-21T01:16:42.739600000Z Aug 20 20:16:42 lmtp(41): Info: Disconnect from 172.16.7.10: Logged out (state=READY)
2023-08-21T01:16:44.427003000Z Aug 20 20:16:44 lmtp(41): Info: Connect from 172.16.7.10
2023-08-21T01:16:44.475830000Z Aug 20 20:16:44 lmtp([email protected])<41><eLZsGfy64mQpAAAAGOvSGA>: Info: sieve: msgid=unspecified: stored mail into mailbox 'INBOX'
2023-08-21T01:16:44.498321000Z Aug 20 20:16:44 lmtp(41): Info: Disconnect from 172.16.7.10: Logged out (state=READY)
2023-08-21T01:17:15.657275000Z Aug 20 20:17:15 lmtp(41): Info: Connect from 172.16.7.10
2023-08-21T01:17:15.699291000Z Aug 20 20:17:15 lmtp([email protected])<41><yK4lJxu74mQpAAAAGOvSGA>: Info: sieve: msgid=<[email protected]>: stored mail into mailbox 'INBOX'
2023-08-21T01:17:15.730934000Z Aug 20 20:17:15 lmtp(41): Info: Disconnect from 172.16.7.10: Logged out (state=READY)
2023-08-21T01:17:20.463985000Z Aug 20 20:17:20 lmtp(41): Info: Connect from 172.16.7.10
2023-08-21T01:17:20.501934000Z Aug 20 20:17:20 lmtp([email protected])<41><+OugGyC74mQpAAAAGOvSGA>: Info: sieve: msgid=<[email protected]>: stored mail into mailbox 'INBOX'
2023-08-21T01:17:20.529061000Z Aug 20 20:17:20 lmtp(41): Info: Disconnect from 172.16.7.10: Logged out (state=READY)
2023-08-21T01:17:23.022773000Z Aug 20 20:17:23 lmtp(41): Info: Connect from 172.16.7.10
2023-08-21T01:17:23.035358000Z Aug 20 20:17:23 lmtp(55): Error: SSL context initialization failed, disabling SSL: Can't load SSL certificate (ssl_cert setting): The certificate is empty
2023-08-21T01:17:23.042875000Z Aug 20 20:17:23 lmtp(55): Info: Connect from 172.16.7.10
2023-08-21T01:17:23.055468000Z Aug 20 20:17:23 lmtp(56): Error: SSL context initialization failed, disabling SSL: Can't load SSL certificate (ssl_cert setting): The certificate is empty
2023-08-21T01:17:23.063121000Z Aug 20 20:17:23 lmtp(56): Info: Connect from 172.16.7.10
2023-08-21T01:17:23.070807000Z Aug 20 20:17:23 lmtp([email protected])<41><CMw5ASO74mQpAAAAGOvSGA>: Info: sieve: msgid=<[email protected]>: stored mail into mailbox 'INBOX'
2023-08-21T01:17:23.099715000Z Aug 20 20:17:23 lmtp([email protected])<55><mBSJAiO74mQ3AAAAGOvSGA>: Info: sieve: msgid=<[email protected]>: stored mail into mailbox 'INBOX'
2023-08-21T01:17:23.109304000Z Aug 20 20:17:23 lmtp(41): Info: Disconnect from 172.16.7.10: Logged out (state=READY)
2023-08-21T01:17:23.133226000Z Aug 20 20:17:23 lmtp(55): Info: Disconnect from 172.16.7.10: Logged out (state=READY)
2023-08-21T01:17:23.136763000Z Aug 20 20:17:23 lmtp([email protected])<56><auu4AyO74mQ4AAAAGOvSGA>: Info: sieve: msgid=<[email protected]>: stored mail into mailbox 'INBOX'
2023-08-21T01:17:23.158341000Z Aug 20 20:17:23 lmtp(56): Info: Disconnect from 172.16.7.10: Logged out (state=READY)
2023-08-21T01:17:23.344474000Z Aug 20 20:17:23 lmtp(41): Info: Connect from 172.16.7.10
2023-08-21T01:17:23.381523000Z Aug 20 20:17:23 lmtp([email protected])<41><QCuAFCO74mQpAAAAGOvSGA>: Info: sieve: msgid=<[email protected]>: stored mail into mailbox 'INBOX'
2023-08-21T01:17:23.412098000Z Aug 20 20:17:23 lmtp(41): Info: Disconnect from 172.16.7.10: Logged out (state=READY)
2023-08-21T01:17:24.136545000Z Aug 20 20:17:24 lmtp(41): Info: Connect from 172.16.7.10
2023-08-21T01:17:24.174305000Z Aug 20 20:17:24 lmtp([email protected])<41><8P4cCCS74mQpAAAAGOvSGA>: Info: sieve: msgid=<[email protected]>: stored mail into mailbox 'INBOX'
2023-08-21T01:17:24.205175000Z Aug 20 20:17:24 lmtp(41): Info: Disconnect from 172.16.7.10: Logged out (state=READY)
2023-08-21T01:17:24.417230000Z Aug 20 20:17:24 lmtp(56): Info: Connect from 172.16.7.10
2023-08-21T01:17:24.453522000Z Aug 20 20:17:24 lmtp([email protected])<56><ODnVGCS74mQ4AAAAGOvSGA>: Info: sieve: msgid=<[email protected]>: stored mail into mailbox 'INBOX'
2023-08-21T01:17:24.456623000Z ERROR:asyncio:Unclosed connection
2023-08-21T01:17:24.456793000Z client_connection: Connection<ConnectionKey(host='admin', port=80, is_ssl=False, ssl=None, proxy=None, proxy_auth=None, proxy_headers_hash=None)>
2023-08-21T01:17:24.521764000Z Aug 20 20:17:24 lmtp(56): Info: Disconnect from 172.16.7.10: Logged out (state=READY)
2023-08-21T01:18:23.166671000Z WARNING:root:Disabling hardened-malloc on this CPU
2023-08-21T01:18:23.498318000Z Fatal: Dovecot is already running with PID 9 (read from /run/dovecot/master.pid)
2023-08-21T01:18:23.498512000Z Aug 20 20:18:23 master: Fatal: Dovecot is already running with PID 9 (read from /run/dovecot/master.pid)
2023-08-21T01:36:28.971116000Z WARNING:root:Disabling hardened-malloc on this CPU
2023-08-21T01:36:29.019423000Z Fatal: Dovecot is already running with PID 9 (read from /run/dovecot/master.pid)
2023-08-21T01:36:29.019639000Z Aug 20 20:36:29 master: Fatal: Dovecot is already running with PID 9 (read from /run/dovecot/master.pid)
2023-08-21T01:37:45.890071000Z WARNING:root:Disabling hardened-malloc on this CPU
2023-08-21T01:37:45.938297000Z Fatal: Dovecot is already running with PID 9 (read from /run/dovecot/master.pid)
2023-08-21T01:37:45.938515000Z Aug 20 20:37:45 master: Fatal: Dovecot is already running with PID 9 (read from /run/dovecot/master.pid)
```
Same sort of issue here, but the bug report was closed. They claimed the Dovecot error is unimportant: https://github.com/Mailu/Mailu/issues/2886
For the sake of completeness I'm attaching my `mailu.env` and `docker-compose.yml` :
- [docker-compose.yml](https://github.com/Mailu/Mailu/files/12420009/docker-compose.yml.txt)
- [mailu.env](https://github.com/Mailu/Mailu/files/12420012/mailu.env.txt)
Thanks @geckolinux. I searched through the issues, but did not run across the one you referenced. I'm not sure why it was closed. There is definitely a bug, most likely in the imap container.
I think this could possibly be the fix: #2913. I have disabled ipv6 on my server. I added the dovecot override and did a reboot without issues. But again, this one has been elusive to report since it doesn't happen every time.
Thanks a lot @DrDoug88 for confirming that.
I'm not sure if IMAP is also part of the problem, but in my case I can confirm that POP3 was failing when my third party webapp tried to access it. Also the redirection error that prevented the web interface from working.
Thanks for finding #2913, so in that case is it a problem if IPv6 is _enabled_ or _disabled_ on the server? And at what level? At the container level or the Linux kernel level? In my case the server doesn't have a public IPv6 address, but I also haven't explicitly disabled the IPv6 stack.
> But again, this one has been elusive to report since it doesn't happen every time.
Yes, definitely this is a challenge. The randomness makes me think it's some kind of a race condition or missing service dependency. In my case the bug doesn't usually seem to occur unless the server was up for more than about 24h before the reboot.
I disabled IPv6 on the host via `net.ipv6.conf.all.disable_ipv6 = 1` in /etc/sysctl.conf. I also have `"ipv6": false` in /etc/docker/daemon.json. I too had troubles with the web interface as well as the other services (IMAP, POP, etc.) when this issue occurred. I'm not sure if it occurred because ipv6 was enabled or disabled on the server or not. It could be those settings that affected it, or something else entirely. My thought for now is: set the mailu-imap container to only listen on IPv4 and see if the issue occurs again... ;). I appreciate your responses though. Hopefully we can determine the root of the problem by process of elimination.
I am not sure why you think it is related to ipv6 and the other ticket...
Next time you are able to reproduce the issue please verify/confirm:
- The content of /run/dovecot/master.pid (``cat /run/dovecot/master.pid``)
- Whether there is an actual process (and which) with that PID.
- If it is dovecot, why is the startup script run again (the container is obviously already running!?!) ?
- What are the permissions of the file and parent folder (``ls -ld /run/dovecot /run/dovecot/master.pid``) ?
I am not sure why you think it is _**not**_ related to ipv6. There seems to be a myriad of issues related to the imap/dovecot container. Other issues on this topic have been raised and closed without proper resolution. I will provide the information that you have requested. I know you are very actively working on this project @nextgens, I see you responding to nearly every issue. And I have nothing but gratitude for your hard work. But please read the room. We are just trying to get to the bottom of the problem man.
> I am not sure why you think it is not related to ipv6.
There is nothing in the logs you have provided that suggests it is: all the IP addresses appearing in your logs are ipv4.
geckolinux keeps unhelpfully adding noise to existing tickets... that in all likelihood and by his own admission are unrelated. I suggest you do like me and ignore him until he bothers to put a description of the symptoms, the relevant config and logs on the same ticket.
@nextgens So please confirm, you would like duplicate tickets to be opened when the same symptoms are present?
@geckolinux here you are adding noise about your config while complaining about POP3 and webmail while saying "I'm not sure if IMAP is also part of the problem". How is that related to a problem with the PID file (which is the Fatal error reported here)?
On #2886 you were complaining about a log message that I told you is expected and harmless. The ticket has been closed because the reporter provided no logs nor config file.
On the face of it the symptoms exhibited are not the same.
They are **_entirely_** related. @geckolinux thankfully pointed me to that ticket, not noise, very helpful. In #2886, the OP says that the webmail is not working. The next responses seem to indicate IMAP/dovecot, and "Fatal: Dovecot is already running with PID 9". This is what I was running into. @geckolinux responded that he too was having the same issues, but only after the server was up for more than 24 hours. Then the ticket was closed without explanation.
The objective of the forum concept is for others to find related issues and engage on those topics rather than creating duplicates. We are CLEARLY experiencing the same symptoms.
On my ticket, I confirmed that it was the imap container, since on one instance when this occurred, I noticed the errors in the imap container, then I restarted only that container, and the issue was resolved, i.e. everything worked. This has happened randomly a handful of times to me over the past few months, but it only occurs on server reboot. I had originally thought it was webmail related too. I didn't have the time to investigate and report the bug properly, but now I do.
I love Mailu, it's awesome, and I want to do my part to help from the user side. I spun up a test machine solely because of this issue, to test and find it, so that I could get the right kind of information/logs together to report it properly. In fact, I will even spin up a second test machine, one with both ipv4 and ipv6, and the other with only ipv4, to see if I can get the issue to reoccur. I have yet to replicate the issue since adding the listening interface override in dovecot.conf.
I'm afraid I honestly don't understand. Is not the error from my logs `Fatal error: Dovecot is already running` that I mentioned [here](https://github.com/Mailu/Mailu/issues/2886#issuecomment-1672724295) the same error that @DrDoug88 is reporting? He is using IMAP, and I am using POP3. Is not Dovecot responsable for both IMAP and POP3 in Mailu? And is a "Fatal" error insignificant?
At any rate, I will report back with the requested information from https://github.com/Mailu/Mailu/issues/2917#issuecomment-1691222299 the next time this happens.
Okay, I created a fresh VM, performed the installation, did a reboot. This one is with ipv4 and ipv6, i.e. no dovecot override. Upon reboot, the issue has occurred. Here are the details that you have requested:
1. Contents of master.pid:
```
c8622416fc70:/run/dovecot# cat master.pid
9
```
2. Processes:
```
c8622416fc70:/run/dovecot# ps -A
PID USER TIME COMMAND
1 root 0:01 python3 /start.py
9 mail 0:00 python3 /start.py
58 root 0:00 bash
76 root 0:00 ps -A
```
3. Permissions:
```
c8622416fc70:/run/dovecot# ls -ld /run/dovecot /run/dovecot/master.pid
drwxr-xr-x 5 root root 4096 Aug 25 03:14 /run/dovecot
-rw------- 1 root root 2 Aug 25 03:11 /run/dovecot/master.pid
```
Here is the accompanying log:
```
2023-08-25T08:11:35.515076000Z WARNING:root:Disabling hardened-malloc on this CPU
2023-08-25T08:11:35.695948000Z Aug 25 03:11:35 master: Info: Dovecot v2.3.20 (80a5ac675d) starting up for imap, pop3, lmtp, sieve
2023-08-25T08:14:54.996990000Z Aug 25 03:14:54 lmtp(57): Error: SSL context initialization failed, disabling SSL: Can't load SSL certificate (ssl_cert setting): The certificate is empty
2023-08-25T08:14:55.004778000Z Aug 25 03:14:55 lmtp(57): Info: Connect from 172.16.7.9
2023-08-25T08:14:55.117758000Z Aug 25 03:14:55 lmtp([email protected])<57><HXFCAP9i6GQ5AAAAZXHIzA>: Info: sieve: msgid=unspecified: stored mail into mailbox 'INBOX'
2023-08-25T08:14:55.152520000Z Aug 25 03:14:55 indexer-worker([email protected])<63><HXFCAP9i6GQ5AAAAZXHIzA:0GPsCP9i6GQ/AAAAZXHIzA>: Info: FTS Xapian: 'INBOX' (/mail/[email protected]/xapian-indexes/db_d8b18603ff62e864390000006571c8cc) indexes do not exist. Initializing DB
2023-08-25T08:14:55.173329000Z Aug 25 03:14:55 lmtp(57): Info: Disconnect from 172.16.7.9: Logged out (state=READY)
2023-08-25T08:14:56.644450000Z Aug 25 03:14:56 lmtp(57): Info: Connect from 172.16.7.9
2023-08-25T08:14:56.712615000Z Aug 25 03:14:56 lmtp([email protected])<57><MK1gJgBj6GQ5AAAAZXHIzA>: Info: sieve: msgid=unspecified: stored mail into mailbox 'INBOX'
2023-08-25T08:14:56.735001000Z Aug 25 03:14:56 lmtp(57): Info: Disconnect from 172.16.7.9: Logged out (state=READY)
System Rebooted...
2023-08-25T08:16:37.759918000Z WARNING:root:Disabling hardened-malloc on this CPU
2023-08-25T08:16:37.966560000Z Fatal: Dovecot is already running with PID 9 (read from /run/dovecot/master.pid)
2023-08-25T08:16:37.966822000Z Aug 25 03:16:37 master: Fatal: Dovecot is already running with PID 9 (read from /run/dovecot/master.pid)
```
I can leave this machine in this state as long as necessary and perform any other requested actions.
@DrDoug88 thank you for proving my point.
I will leave the two of you investigate v6 related issues while I work on the PR to fix the issue you have reported. | 2023-08-25T11:45:35 |
|
Mailu/Mailu | 2,929 | Mailu__Mailu-2929 | [
"2928"
] | 585549ce92af7027a5e5d642f9c14e4a42b944f7 | diff --git a/optional/fetchmail/fetchmail.py b/optional/fetchmail/fetchmail.py
--- a/optional/fetchmail/fetchmail.py
+++ b/optional/fetchmail/fetchmail.py
@@ -64,7 +64,7 @@ def run(debug):
username=escape_rc_string(fetch["username"]),
password=escape_rc_string(fetch["password"]),
options=options,
- folders=folders,
+ folders='' if fetch['protocol'] == 'pop3' else folders,
lmtp='' if fetch['scan'] else 'lmtp',
)
if debug:
| fetchmail does not fetch mails in 2.0
<!--
Thank you for opening an issue with Mailu. Please understand that issues are meant for bugs and enhancement-requests.
For **user-support questions**, reach out to us on [matrix](https://matrix.to/#/#mailu:tedomum.net).
To be able to help you best, we need some more information.
Before you open your issue
- Check if no issue or pull-request for this already exists.
- Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)
- You understand `Mailu` is made by volunteers in their **free time** β be concise, civil and accept that delays can occur.
- The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.
Please put your text outside of the comment blocks to be visible. You can use the button "Preview" above to check.
-->
## Environment & Version
### Environment
- [X] docker compose
- [ ] kubernetes
- [ ] docker swarm
### Version
- Version: 2.0, Container-"Id": "sha256:dd0a30f2ecb74491c49c8ab1b1d8d710eb55cc1e81d411acea9171ebd40ff314",
<!--
To find your version, get the image name of a mailu container and read the version from the tag (example for version 1.7).
$> docker ps -a | grep mailu
140b09d4b09c mailu/roundcube:1.7 "docker-php-entrypoiβ¦" 2 weeks ago Up 2 days (healthy) 80/tcp
$> grep MAILU_VERSION docker-compose.yml mailu.env
-->
## Description
<!--
Further explain the bug in a few words. It should be clear what the unexpected behaviour is. Share it in an easy-to-understand language.
-->
After updating from 1.9 to 2.0 fetchmails does not fetch mails from remote POP3s-Servers. The container-logs contain the following information with log-level WARNING. Log-level DEBUG does not print more useful information
```
mailu-20-fetchmail-1 | Option --folder is not supported with POP3
mailu-20-fetchmail-1 | Option --folder is not supported with POP3
mailu-20-fetchmail-1 | Option --folder is not supported with POP3
mailu-20-fetchmail-1 | Sleeping for 1000 seconds
mailu-20-fetchmail-1 | fetchmail: Query status=5 (SYNTAX)
mailu-20-fetchmail-1 |
mailu-20-fetchmail-1 |
mailu-20-fetchmail-1 | fetchmail: Query status=5 (SYNTAX)
```
I copied the mailu.db and fired up the new stack based on the newly created configs and docker-compose.yml-files.
## Replication Steps
<!--
Steps for replicating your issue
-->
migrated DB from 1.9 copied to the new folder and starting the stack.
## Observed behaviour
<!--
Explain or paste the result you received.
-->
Any chance to get more detailed information from within the container?
## Expected behaviour
<!--
Explain what results you expected - be as specific as possible.
Just saying "it doesnβt work as expected" is not useful. It's also helpful to describe what you actually experienced.
-->
Fetching the remote mails and make them available in mailus mail-storage
## Logs
<!--
Often it is very useful to include log fragments of the involved component.
You can get the logs via `docker logs <container name> --tail 1000`.
For example for the admin container: `docker logs mailu_admin_1 --tail 1000`
or using docker compose `docker compose -f /mailu/docker-compose.yml logs --tail 1000 admin`
If you can find the relevant section, please share only the parts that seem relevant. If you have any logs, please enclose them in code tags, like so:
```
Your logs here!
```
-->
| We can either find a way to enable MBOX (and assume the remote server supports it) or skip setting folders in the configuration file when we are configured to use pop3
https://sources.debian.org/src/fetchmail/6.3.26-3/pop3.c/#L978 | 2023-08-30T11:19:00 |
|
Mailu/Mailu | 2,958 | Mailu__Mailu-2958 | [
"2919"
] | b71039572c2f49f6dc2c81a67cdc465473e752fd | diff --git a/core/rspamd/start.py b/core/rspamd/start.py
--- a/core/rspamd/start.py
+++ b/core/rspamd/start.py
@@ -37,4 +37,4 @@
os.system("mkdir -m 755 -p /run/rspamd")
os.system("chown rspamd:rspamd /run/rspamd")
os.system("find /var/lib/rspamd | grep -v /filter | xargs -n1 chown rspamd:rspamd")
-os.execv("/usr/sbin/rspamd", ["rspamd", "-f", "-u", "rspamd", "-g", "rspamd"])
+os.execv("/usr/bin/rspamd", ["rspamd", "-f", "-u", "rspamd", "-g", "rspamd"])
| Vulnerability in ClamAV
## Environment & Version
### Environment
- [ ] docker compose
- [x] kubernetes
- [ ] docker swarm
### Version
- Version: `master`
## Description
ClamAV version 0.105.2 is vulnerable to [CVE-2023-20197](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-20197)
Unfortunately, ClamAV will not provide any update in the 0.105 branch, as it is EOL see https://blog.clamav.net/2023/07/2023-08-16-releases.html
## Replication Steps
```
$ docker run --pull=always --rm -it ghcr.io/mailu/clamav:master clamd --version
master: Pulling from mailu/clamav
Digest: sha256:dd088fc80ab063b0588160a69fce034d5d1f33db6d85d57296154fc51cdeaffa
Status: Image is up to date for ghcr.io/mailu/clamav:master
ClamAV 0.105.2
```
## Observed behaviour
ClamAV is in a vulnerable state
## Expected behaviour
I expect ClamAV to be updated to a fixed version (1.1.1 or 1.0.2)
| It looks like the base image should be updated to Alpine 3.18. As seen [here](https://pkgs.alpinelinux.org/packages?name=clamav&branch=v3.18&repo=&arch=&maintainer=), it allows to install ClamAV 1.1.0
Yeah well for master the fix is quite straightforward... I'm not sure we want to backport a switch of the base image though ;)
So how do we progress this? Has someone filled in a ticket with Alpine so that we get the fix backported to 3.17?
> Yeah well for master the fix is quite straightforward... I'm not sure we want to backport a switch of the base image though ;)
Yes, I agree to that. Updating Alpine in the base might also bump the versions of other installed packages.
> So how do we progress this? Has someone filled in a ticket with Alpine so that we get the fix backported to 3.17?
I'm not sure what the policy of Alpine is about backporting this. @cpernot-hf mentioned ClamAV 0.105 is EOL, so the backport will mean a completely different version.
Maybe `docker sbom` could provide some insights in all of the installed packages on the Mailu container images. This could then be compared with the versions of Alpine 3.18 to get a better overview of the changes and if it might be feasible to update the Alpine version non the less.
3.17 is "stable" and security-supported until end of 2024 according to https://alpinelinux.org/releases/
My suggestion is to log a ticket with them and see what they say. They may be able to backport just the fix or the full package.
I've filed a backport ticket to the alpine aport repository - https://gitlab.alpinelinux.org/alpine/aports/-/issues/15249
I've filed a backport ticket to the alpine aport repository - https://gitlab.alpinelinux.org/alpine/aports/-/issues/15249
@dasrecht thank you.
I suggest you highlight the CVE/vulnerability on the ticket otherwise it will be ignored.
Good point, added that to the ticket - Thanks
Added that - The answer is clear and in line with the alpine release schedules - `community repository is supported until next stable release`
So from an alpine standpoint, there's only the possibility to move the base image to 3.18. I am unsure what the stance of Mailu on this is.
I think it would also be worth looking into using the official ClamAV container image, which is maintained regularly - but I also understand that there might be considerations of not going with an official image.
Ok, i've looked a bit into this - It should be fairly easy to move to alpine 3.18 and a first short test shows that this would also work.
```
docker run -it clamav-test clamd --version
ClamAV 1.1.2
```
Will wrap that into a MR tomorrow morning
I am definitely in favour of going with upstream's image.
I do not consider switching the base image of the release to be an option we could backport.
The CVE here is about a DoS: something low risk that few people care about outside of the context of email gateways.
I agree on the low-risk CVE - I see it more as a problem that the version of ClamAV shipped with Alpine 3.17 is [EOL](https://blog.clamav.net/2023/07/2023-08-16-releases.html) - so whatever gets patched in newer versions will become an issue on the old release.
Is there a planned update of the base image at some point? As 3.17 is only getting security fixes.
If I find some time in the coming days I'll have a look if there's a way to use the official images. | 2023-10-05T16:58:03 |
|
Mailu/Mailu | 2,961 | Mailu__Mailu-2961 | [
"2959"
] | b71039572c2f49f6dc2c81a67cdc465473e752fd | diff --git a/core/base/libs/socrate/socrate/system.py b/core/base/libs/socrate/socrate/system.py
--- a/core/base/libs/socrate/socrate/system.py
+++ b/core/base/libs/socrate/socrate/system.py
@@ -66,7 +66,8 @@ def _is_compatible_with_hardened_malloc():
lines = f.readlines()
for line in lines:
# See #2764, we need vmovdqu
- if line.startswith('flags') and ' avx ' not in line:
+ # See #2959, we need vpunpckldq
+ if line.startswith('flags') and ' avx2 ' not in line:
return False
# See #2541
if line.startswith('Features') and ' lrcpc ' not in line:
@@ -79,9 +80,9 @@ def set_env(required_secrets=[], log_filters=[], log_file=None):
sys.stderr = LogFilter(sys.stderr, log_filters, log_file)
log.basicConfig(stream=sys.stderr, level=os.environ.get("LOG_LEVEL", 'WARNING'))
- if 'LD_PRELOAD' in os.environ and not _is_compatible_with_hardened_malloc():
- log.warning('Disabling hardened-malloc on this CPU')
- del os.environ['LD_PRELOAD']
+ if not 'LD_PRELOAD' in os.environ and _is_compatible_with_hardened_malloc():
+ log.warning('Your CPU has Advanced Vector Extensions available, we recommend you enable hardened-malloc earlier in the boot process by adding LD_PRELOAD=/usr/lib/libhardened_malloc.so to your mailu.env')
+ os.environ['LD_PRELOAD'] = '/usr/lib/libhardened_malloc.so'
""" This will set all the environment variables and retains only the secrets we need """
if 'SECRET_KEY_FILE' in os.environ:
diff --git a/core/oletools/start.py b/core/oletools/start.py
new file mode 100755
--- /dev/null
+++ b/core/oletools/start.py
@@ -0,0 +1,8 @@
+#!/usr/bin/env python3
+
+from socrate import system
+
+system.set_env()
+
+with open('/app/olefy.py') as olefy:
+ exec(olefy.read())
| 2.0.25 gives Restarting (132) issue in docker
<!--
Thank you for opening an issue with Mailu. Please understand that issues are meant for bugs and enhancement-requests.
For **user-support questions**, reach out to us on [matrix](https://matrix.to/#/#mailu:tedomum.net).
To be able to help you best, we need some more information.
Before you open your issue
- Check if no issue or pull-request for this already exists.
- Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)
- You understand `Mailu` is made by volunteers in their **free time** β be concise, civil and accept that delays can occur.
- The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.
Please put your text outside of the comment blocks to be visible. You can use the button "Preview" above to check.
-->
## Environment & Version
### Environment
- [x ] docker compose
- [ ] kubernetes
- [ ] docker swarm
### Version
- Version: `2.0.25`
<!--
To find your version, get the image name of a mailu container and read the version from the tag (example for version 1.7).
$> docker ps -a | grep mailu
140b09d4b09c mailu/roundcube:1.7 "docker-php-entrypoiβ¦" 2 weeks ago Up 2 days (healthy) 80/tcp
$> grep MAILU_VERSION docker-compose.yml mailu.env
-->
## Description
<!--
Further explain the bug in a few words. It should be clear what the unexpected behaviour is. Share it in an easy-to-understand language.
-->
pulled the latest build (2.0.25) and the mailu docker containers would no longer start. They would go to a status of "Restarting (132)" and continuously just restart
## Replication Steps
<!--
Steps for replicating your issue
-->
## Observed behaviour
<!--
Explain or paste the result you received.
-->
## Expected behaviour
<!--
Explain what results you expected - be as specific as possible.
Just saying "it doesnβt work as expected" is not useful. It's also helpful to describe what you actually experienced.
-->
Docker containers to start up and run
## Logs
<!--
Often it is very useful to include log fragments of the involved component.
You can get the logs via `docker logs <container name> --tail 1000`.
For example for the admin container: `docker logs mailu_admin_1 --tail 1000`
or using docker compose `docker compose -f /mailu/docker-compose.yml logs --tail 1000 admin`
If you can find the relevant section, please share only the parts that seem relevant. If you have any logs, please enclose them in code tags, like so:
```
Your logs here!
```
-->
| same here.
how to revert to 2.0.24 while fixes are released ?
> same here.
> how to revert to 2.0.24 while fixes are released ?
Create an `.env` file in your Mailu directory with this content:
```
MAILU_VERSION=2.0.24
```
Then run:
```shell
docker compose down
docker compose pull
docker compose up -d
```
Your issue does not contain sufficient information for troubleshooting. It does not even contain logs.
This version was tested on x64, armv7 and arm64 hardware. I can also not replicate this on my environment.
Please provide logs and provide more information about the hardware you are running this on. Especially the cpu properties are interesting. You can get these via `cat /proc/cpuinfo`
@Diman0
My `/proc/cpuinfo` (from a single core):
```
processor : 0
vendor_id : AuthenticAMD
cpu family : 6
model : 6
model name : QEMU Virtual CPU version 2.5+
stepping : 3
microcode : 0x1000065
cpu MHz : 1996.247
cache size : 512 KB
physical id : 0
siblings : 2
core id : 0
cpu cores : 2
apicid : 0
initial apicid : 0
fpu : yes
fpu_exception : yes
cpuid level : 13
wp : yes
flags : fpu de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx lm nopl cpuid tsc_known_freq pni pclmulqdq ssse3 cx16 sse4_1 sse4_2 x2apic popcnt aes xsave rdrand hypervisor lahf_lm cmp_legacy abm 3dnowprefetch ssbd ibpb vmmcall arch_capabilities
bugs : fxsave_leak sysret_ss_attrs null_seg spectre_v1 spectre_v2 spec_store_bypass
bogomips : 3992.49
TLB size : 1024 4K pages
clflush size : 64
cache_alignment : 64
address sizes : 40 bits physical, 48 bits virtual
power management:
```
For me, there are no logs. With `docker compose logs -f`, I only see:
```
mailu-antivirus-1 exited with code 132
mailu-webmail-1 exited with code 0
mailu-oletools-1 exited with code 0
mailu-imap-1 exited with code 132
mailu-smtp-1 exited with code 132
mailu-resolver-1 exited with code 132
mailu-admin-1 exited with code 132
mailu-front-1 exited with code 0
mailu-antispam-1 exited with code 132
mailu-webmail-1 exited with code 132
mailu-antivirus-1 exited with code 132
mailu-imap-1 exited with code 132
mailu-smtp-1 exited with code 132
mailu-oletools-1 exited with code 132
mailu-resolver-1 exited with code 132
mailu-admin-1 exited with code 132
mailu-antispam-1 exited with code 0
mailu-webmail-1 exited with code 132
mailu-antivirus-1 exited with code 132
mailu-imap-1 exited with code 132
mailu-oletools-1 exited with code 132
mailu-admin-1 exited with code 132
mailu-smtp-1 exited with code 132
mailu-antispam-1 exited with code 132
mailu-resolver-1 exited with code 132
mailu-imap-1 exited with code 132
mailu-webmail-1 exited with code 132
mailu-antivirus-1 exited with code 0
mailu-smtp-1 exited with code 132
mailu-antispam-1 exited with code 132
mailu-admin-1 exited with code 132
mailu-webmail-1 exited with code 132
mailu-oletools-1 exited with code 132
mailu-imap-1 exited with code 132
```
Does it work when you set an empty `LD_PRELOAD=` in your mailu.env file?
Almost, all containers except oletools start. oletools continues with exit code 132.
I just see that on 2.0.24, I have the following message in every container log (at the very beginning):
```
WARNING:root:Disabling hardened-malloc on this CPU
```
Might that help? @Diman0
It is probably due to the updated hardened malloc. It might need more modern hardware now.
`LD_PRELOAD=` should disable it due to
https://github.com/Mailu/Mailu/blob/b71039572c2f49f6dc2c81a67cdc465473e752fd/core/base/libs/socrate/socrate/system.py#L76C65-L76C65
but oletools has not been updated to use this method:
https://github.com/Mailu/Mailu/blob/b71039572c2f49f6dc2c81a67cdc465473e752fd/core/oletools/Dockerfile
So oletools must be updated to also use this method.
@Dennis14e
In QEMU what processor did you configure that QEMU must emulate? Hopefully I can replicate the issue when I configure the same CPU model.
A potential workaround is too pass all the CPU flags of the host processor to the VM.
For more info see:
https://www.qemu.org/docs/master/system/qemu-cpu-models.html#qemu-command-line
https://www.qemu.org/docs/master/system/qemu-cpu-models.html#libvirt-guest-xml
@Diman0
I don't configured qemu, I use a VM from an hosting provider. I don't know what real CPU is used.
Same problem here, and same as @Dennis14e: I am using a VM from an hosting provider, which I cannot control.
I have the same problem on a 'real' server; /proc/cpuinfo
`
processor : 0
vendor_id : GenuineIntel
cpu family : 6
model : 156
model name : Intel(R) Pentium(R) Silver N6000 @ 1.10GHz
stepping : 0
microcode : 0x24000024
cpu MHz : 2688.031
cache size : 4096 KB
physical id : 0
siblings : 4
core id : 0
cpu cores : 4
apicid : 0
initial apicid : 0
fpu : yes
fpu_exception : yes
cpuid level : 27
wp : yes
flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg cx16 xtpr pdcm sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave rdrand lahf_lm 3dnowprefetch cpuid_fault epb cat_l2 cdp_l2 ssbd ibrs ibpb stibp ibrs_enhanced tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust smep erms rdt_a rdseed smap clflushopt clwb intel_pt sha_ni xsaveopt xsavec xgetbv1 xsaves split_lock_detect dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp hwp_pkg_req umip waitpkg gfni rdpid movdiri movdir64b md_clear flush_l1d arch_capabilities
vmx flags : vnmi preemption_timer posted_intr invvpid ept_x_only ept_ad flexpriority apicv tsc_offset vtpr mtf vapic ept vpid unrestricted_guest vapic_reg vid ple shadow_vmcs ept_mode_based_exec tsc_scaling usr_wait_pause
bugs : spectre_v1 spectre_v2 spec_store_bypass swapgs srbds mmio_stale_data
bogomips : 2227.20
clflush size : 64
cache_alignment : 64
address sizes : 39 bits physical, 48 bits virtual
power management:
processor : 1
vendor_id : GenuineIntel
cpu family : 6
model : 156
model name : Intel(R) Pentium(R) Silver N6000 @ 1.10GHz
stepping : 0
microcode : 0x24000024
cpu MHz : 1100.000
cache size : 4096 KB
physical id : 0
siblings : 4
core id : 1
cpu cores : 4
apicid : 2
initial apicid : 2
fpu : yes
fpu_exception : yes
cpuid level : 27
wp : yes
flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg cx16 xtpr pdcm sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave rdrand lahf_lm 3dnowprefetch cpuid_fault epb cat_l2 cdp_l2 ssbd ibrs ibpb stibp ibrs_enhanced tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust smep erms rdt_a rdseed smap clflushopt clwb intel_pt sha_ni xsaveopt xsavec xgetbv1 xsaves split_lock_detect dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp hwp_pkg_req umip waitpkg gfni rdpid movdiri movdir64b md_clear flush_l1d arch_capabilities
vmx flags : vnmi preemption_timer posted_intr invvpid ept_x_only ept_ad flexpriority apicv tsc_offset vtpr mtf vapic ept vpid unrestricted_guest vapic_reg vid ple shadow_vmcs ept_mode_based_exec tsc_scaling usr_wait_pause
bugs : spectre_v1 spectre_v2 spec_store_bypass swapgs srbds mmio_stale_data
bogomips : 2227.20
clflush size : 64
cache_alignment : 64
address sizes : 39 bits physical, 48 bits virtual
power management:
processor : 2
vendor_id : GenuineIntel
cpu family : 6
model : 156
model name : Intel(R) Pentium(R) Silver N6000 @ 1.10GHz
stepping : 0
microcode : 0x24000024
cpu MHz : 1100.000
cache size : 4096 KB
physical id : 0
siblings : 4
core id : 2
cpu cores : 4
apicid : 4
initial apicid : 4
fpu : yes
fpu_exception : yes
cpuid level : 27
wp : yes
flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg cx16 xtpr pdcm sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave rdrand lahf_lm 3dnowprefetch cpuid_fault epb cat_l2 cdp_l2 ssbd ibrs ibpb stibp ibrs_enhanced tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust smep erms rdt_a rdseed smap clflushopt clwb intel_pt sha_ni xsaveopt xsavec xgetbv1 xsaves split_lock_detect dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp hwp_pkg_req umip waitpkg gfni rdpid movdiri movdir64b md_clear flush_l1d arch_capabilities
vmx flags : vnmi preemption_timer posted_intr invvpid ept_x_only ept_ad flexpriority apicv tsc_offset vtpr mtf vapic ept vpid unrestricted_guest vapic_reg vid ple shadow_vmcs ept_mode_based_exec tsc_scaling usr_wait_pause
bugs : spectre_v1 spectre_v2 spec_store_bypass swapgs srbds mmio_stale_data
bogomips : 2227.20
clflush size : 64
cache_alignment : 64
address sizes : 39 bits physical, 48 bits virtual
power management:
processor : 3
vendor_id : GenuineIntel
cpu family : 6
model : 156
model name : Intel(R) Pentium(R) Silver N6000 @ 1.10GHz
stepping : 0
microcode : 0x24000024
cpu MHz : 1100.000
cache size : 4096 KB
physical id : 0
siblings : 4
core id : 3
cpu cores : 4
apicid : 6
initial apicid : 6
fpu : yes
fpu_exception : yes
cpuid level : 27
wp : yes
flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg cx16 xtpr pdcm sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave rdrand lahf_lm 3dnowprefetch cpuid_fault epb cat_l2 cdp_l2 ssbd ibrs ibpb stibp ibrs_enhanced tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust smep erms rdt_a rdseed smap clflushopt clwb intel_pt sha_ni xsaveopt xsavec xgetbv1 xsaves split_lock_detect dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp hwp_pkg_req umip waitpkg gfni rdpid movdiri movdir64b md_clear flush_l1d arch_capabilities
vmx flags : vnmi preemption_timer posted_intr invvpid ept_x_only ept_ad flexpriority apicv tsc_offset vtpr mtf vapic ept vpid unrestricted_guest vapic_reg vid ple shadow_vmcs ept_mode_based_exec tsc_scaling usr_wait_pause
bugs : spectre_v1 spectre_v2 spec_store_bypass swapgs srbds mmio_stale_data
bogomips : 2227.20
clflush size : 64
cache_alignment : 64
address sizes : 39 bits physical, 48 bits virtual
power management:
` | 2023-10-06T09:53:59 |
|
Mailu/Mailu | 2,972 | Mailu__Mailu-2972 | [
"2976"
] | d21eb8bea85c55136af8bc4e33c34d998215f348 | diff --git a/core/admin/mailu/configuration.py b/core/admin/mailu/configuration.py
--- a/core/admin/mailu/configuration.py
+++ b/core/admin/mailu/configuration.py
@@ -75,6 +75,8 @@
'API': False,
'WEB_API': '/api',
'API_TOKEN': None,
+ 'FULL_TEXT_SEARCH': 'en',
+ 'FULL_TEXT_SEARCH_ATTACHMENTS': False,
'LOG_LEVEL': 'INFO',
'SESSION_KEY_BITS': 128,
'SESSION_TIMEOUT': 3600,
| FTS doesn't perform OCR on attachments for keyword extraction
If one send a PDF composed of images, no keywords will be extracted/indexed.
To fix that, we could do some OCR:
- https://github.com/tesseract-ocr/tesseract
or if we want to get fancy, something like
- https://github.com/JaidedAI/EasyOCR
Or the built-in integration with
- https://tika.apache.org/
There is a security argument going for that option: it's written in java (memory safe language) and would remove the need for decode2text and friends.
This is a follow-up to #2184 and #2972
| 2023-10-09T15:18:08 |
||
Mailu/Mailu | 2,975 | Mailu__Mailu-2975 | [
"2937"
] | 118a91256cd486111f2e7e938e6e2202de7f2f47 | diff --git a/core/rspamd/start.py b/core/rspamd/start.py
--- a/core/rspamd/start.py
+++ b/core/rspamd/start.py
@@ -16,7 +16,8 @@
config_files = []
for rspamd_file in glob.glob("/conf/*"):
conf.jinja(rspamd_file, env, os.path.join("/etc/rspamd/local.d", os.path.basename(rspamd_file)))
- config_files.append(os.path.basename(rspamd_file))
+ if rspamd_file != '/conf/forbidden_file_extension.map':
+ config_files.append(os.path.basename(rspamd_file))
for override_file in glob.glob("/overrides/*"):
if os.path.basename(override_file) not in config_files:
| Overriding rspamd forbidden_file_extension.map doesn't work as documented
<!--
Thank you for opening an issue with Mailu. Please understand that issues are meant for bugs and enhancement-requests.
For **user-support questions**, reach out to us on [matrix](https://matrix.to/#/#mailu:tedomum.net).
To be able to help you best, we need some more information.
Before you open your issue
- Check if no issue or pull-request for this already exists.
- Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)
- You understand `Mailu` is made by volunteers in their **free time** β be concise, civil and accept that delays can occur.
- The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.
Please put your text outside of the comment blocks to be visible. You can use the button "Preview" above to check.
-->
## Environment & Version
The host is Ubuntu 22.04
### Environment
- [x] docker compose
- [ ] kubernetes
- [ ] docker swarm
### Version
- Version: `2.0`
<!--
To find your version, get the image name of a mailu container and read the version from the tag (example for version 1.7).
$> docker ps -a | grep mailu
140b09d4b09c mailu/roundcube:1.7 "docker-php-entrypoiβ¦" 2 weeks ago Up 2 days (healthy) 80/tcp
$> grep MAILU_VERSION docker-compose.yml mailu.env
-->
## Description
<!--
Further explain the bug in a few words. It should be clear what the unexpected behaviour is. Share it in an easy-to-understand language.
-->
My forbidden_file_extension.map placed in `overrides/rspamd/` is ignored and the default file is used instead.
## Replication Steps
<!--
Steps for replicating your issue
-->
With a Mailu 2.0 instance, follow the [instructions](https://mailu.io/2.0/antispam.html#can-i-change-the-list-of-authorized-file-attachments) for changing allowed attachments. Change `overrides/rspamd/forbidden_file_extension.map` and add/remove some lines, then run `docker compose restart antispam`. Re-run the `docker compose exec antispam cat /etc/rspamd/local.d/forbidden_file_extension.map` command to see the file used by rspamd.
## Observed behaviour
<!--
Explain or paste the result you received.
-->
I get the output of `core/rspamd/conf/forbidden_file_extension.map` from the Mailu distribution. Indeed, attachments allowed are still in line with this file, rather than the new changed file.
## Expected behaviour
<!--
Explain what results you expected - be as specific as possible.
Just saying "it doesnβt work as expected" is not useful. It's also helpful to describe what you actually experienced.
-->
I'd expect the modified forbidden_file_extension.map in the `overrides/rspamd` directory is taken into account. For instance, if I remove js from the list, I should be able to send an attachment with the js extension.
<!--
Often it is very useful to include log fragments of the involved component.
You can get the logs via `docker logs <container name> --tail 1000`.
For example for the admin container: `docker logs mailu_admin_1 --tail 1000`
or using docker compose `docker compose -f /mailu/docker-compose.yml logs --tail 1000 admin`
If you can find the relevant section, please share only the parts that seem relevant. If you have any logs, please enclose them in code tags, like so:
```
Your logs here!
```
-->
## My diagnosis
The overridden file is available at `/overrides/forbidden_file_extension.map` in the antispam container, as expected. However, `core/rspamd/start.py` adds `forbidden_file_extension.map` to the `config_files` variable so the override file at `/overrides/forbidden_file_extension.map` is ignored and not copied over. The start script runs Jinja on `/conf/forbidden_file_extension.map` but since there's no directives it's just copied verbatim to `/etc/rspamd/local.d/`. This default file is what rspamd reads. The `multimap.conf` file references the map file, but that has no directive for overriding the path of the map file so overriding `multimap.conf` doesn't help.
## Possible solution
To get the expected behaviour (my override file is copied to `/etc/rspamd/local.d/` instead of the default file), I suspect the start script needs to be altered. I'm unsure how this was intended to be done originally, but I'd probably modify the start script so an administrator can specify whether certain override configuration files should be copied despite being on the `config_files` list. A pleasant way to still keep Jinja flexibility is for the copied override configuration files to be processed as templates too. For instance I could copy the `multimap.conf` template to a file in the overrides directory, change it and let the start script process the new template. This is nicely used in Kolla Ansible for instance. If it's something relatively straightforward like this I'd be happy to make a PR for it, but might take some time since it's my first issue/PR.
| This is indeed a defect. Those instructions will not work. You are correct that original configuration files are not overridden. This is by design. They should not be overwritten. For all *.conf files the rspamd native import mechanism is used.
`forbidden_file_extension.map` is an exception to the rule that this file must have the possibility to be overridden.
It is sufficient to create an exception for this map in start.py file that it can be overridden.
I will create a fix for this. | 2023-10-10T08:21:41 |
|
Mailu/Mailu | 2,979 | Mailu__Mailu-2979 | [
"2824"
] | d21eb8bea85c55136af8bc4e33c34d998215f348 | diff --git a/core/admin/mailu/api/v1/user.py b/core/admin/mailu/api/v1/user.py
--- a/core/admin/mailu/api/v1/user.py
+++ b/core/admin/mailu/api/v1/user.py
@@ -14,6 +14,7 @@
'password': fields.String(description="Hash of the user's password; Example='$bcrypt-sha256$v=2,t=2b,r=12$fmsAdJbYAD1gGQIE5nfJq.$zLkQUEs2XZfTpAEpcix/1k5UTNPm0jO'"),
'comment': fields.String(description='A description for the user. This description is shown on the Users page', example='my comment'),
'quota_bytes': fields.Integer(description='The maximum quota for the userβs email box in bytes', example='1000000000'),
+ 'quota_bytes_used': fields.Integer(description='The size of the userβs email box in bytes', example='5000000'),
'global_admin': fields.Boolean(description='Make the user a global administrator'),
'enabled': fields.Boolean(description='Enable the user. When an user is disabled, the user is unable to login to the Admin GUI or webmail or access his email via IMAP/POP3 or send mail'),
'change_pw_next_login': fields.Boolean(description='Force the user to change their password at next login'),
| Add quota_bytes_used on UserGet object API
## Environment & Version
### Environment
- [x] docker compose
- [ ] kubernetes
- [ ] docker swarm
### Version
- Version: `2.0`
## Description
Hello,
Can you add quota_bytes_used on UserGet object API ?
| 2023-10-11T15:56:35 |
||
Mailu/Mailu | 2,982 | Mailu__Mailu-2982 | [
"2059"
] | bc6a38b655a4b90a0e243d1feec99cfea0ade954 | diff --git a/optional/clamav/start.py b/optional/clamav/start.py
deleted file mode 100755
--- a/optional/clamav/start.py
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/usr/bin/env python3
-
-import os
-import logging as logger
-import sys
-from socrate import system
-
-system.set_env(log_filters=r'SelfCheck: Database status OK\.$')
-
-# Bootstrap the database if clamav is running for the first time
-if not os.path.isfile("/data/main.cvd"):
- logger.info("Starting primary virus DB download")
- os.system("freshclam")
-
-# Run the update daemon
-logger.info("Starting the update daemon")
-os.system("freshclam -d -c 6")
-
-# Run clamav
-logger.info("Starting clamav")
-os.system("clamd")
| diff --git a/.github/workflows/build_test_deploy.yml b/.github/workflows/build_test_deploy.yml
--- a/.github/workflows/build_test_deploy.yml
+++ b/.github/workflows/build_test_deploy.yml
@@ -474,7 +474,7 @@ jobs:
strategy:
fail-fast: false
matrix:
- target: ["setup", "docs", "fetchmail", "webmail", "admin", "traefik-certdumper", "radicale", "clamav", "rspamd", "oletools", "postfix", "dovecot", "unbound", "nginx"]
+ target: ["setup", "docs", "fetchmail", "webmail", "admin", "traefik-certdumper", "radicale", "rspamd", "oletools", "postfix", "dovecot", "unbound", "nginx"]
steps:
- uses: actions/checkout@v3
- name: Retrieve global variables
diff --git a/tests/build-ci.hcl b/tests/build-ci.hcl
--- a/tests/build-ci.hcl
+++ b/tests/build-ci.hcl
@@ -49,7 +49,6 @@ group "default" {
"webmail",
- "antivirus",
"fetchmail",
"resolver",
"traefik-certdumper",
@@ -207,15 +206,6 @@ target "webmail" {
# -----------------------------------------------------------------------------------------
# Optional images
# -----------------------------------------------------------------------------------------
-target "antivirus" {
- inherits = ["defaults"]
- context = "optional/clamav/"
- contexts = {
- base = "docker-image://${DOCKER_ORG}/base:${MAILU_VERSION}"
- }
- tags = tag("clamav")
-}
-
target "fetchmail" {
inherits = ["defaults"]
context = "optional/fetchmail/"
diff --git a/tests/build.hcl b/tests/build.hcl
--- a/tests/build.hcl
+++ b/tests/build.hcl
@@ -45,7 +45,6 @@ group "default" {
"webmail",
- "antivirus",
"fetchmail",
"resolver",
"traefik-certdumper",
@@ -201,15 +200,6 @@ target "webmail" {
# -----------------------------------------------------------------------------------------
# Optional images
# -----------------------------------------------------------------------------------------
-target "antivirus" {
- inherits = ["defaults"]
- context = "optional/clamav/"
- contexts = {
- base = "target:base"
- }
- tags = tag("clamav")
-}
-
target "fetchmail" {
inherits = ["defaults"]
context = "optional/fetchmail/"
diff --git a/tests/compose/filters/docker-compose.yml b/tests/compose/filters/docker-compose.yml
--- a/tests/compose/filters/docker-compose.yml
+++ b/tests/compose/filters/docker-compose.yml
@@ -70,7 +70,7 @@ services:
hostname: oletools
restart: always
networks:
- - noinet
+ - oletools
antispam:
image: ${DOCKER_ORG:-mailu}/${DOCKER_PREFIX:-}rspamd:${MAILU_VERSION:-local}
@@ -78,7 +78,8 @@ services:
env_file: mailu.env
networks:
- default
- - noinet
+ - oletools
+ - clamav
volumes:
- "/mailu/filter:/var/lib/rspamd"
- "/mailu/dkim:/dkim"
@@ -88,11 +89,22 @@ services:
# Optional services
antivirus:
- image: ${DOCKER_ORG:-mailu}/${DOCKER_PREFIX:-}clamav:${MAILU_VERSION:-local}
+ image: clamav/clamav-debian:1.2.0-6
restart: always
- env_file: mailu.env
+ logging:
+ driver: journald
+ options:
+ tag: mailu-clamav
+ networks:
+ - clamav
volumes:
- - "/mailu/filter:/data"
+ - "/mailu/filter/clamav:/var/lib/clamav"
+ healthcheck:
+ test: ["CMD-SHELL", "kill -0 `cat /tmp/clamd.pid` && kill -0 `cat /tmp/freshclam.pid`"]
+ interval: 10s
+ timeout: 5s
+ retries: 3
+ start_period: 10s
resolver:
image: ${DOCKER_ORG:-mailu}/${DOCKER_PREFIX:-}unbound:${MAILU_VERSION:-local}
@@ -112,6 +124,8 @@ networks:
driver: default
config:
- subnet: 192.168.203.0/24
- noinet:
+ clamav:
+ driver: bridge
+ oletools:
driver: bridge
internal: true
| Use official clamav docker image for Mailu clamav image
With https://github.com/Cisco-Talos/clamav having official docker support https://hub.docker.com/r/clamav/clamav it might be worth considering referring or preferring that container in the future?
| It might be worthwhile to have a deeper look at using their image. However a quick look already shows a couple of issues
- no TZData package (timezone settings)
- No proper healthcheck
But this can be addressed in our Dockerfile which will use the clamav docker image of course.
I see they also offer pinned versions. That means we can use it.
@diman0, Hi
as the original author of the docker container, can you point out the issues in more detail? I'm sure we can discuss and integrate the needed things.
No TZData package, that is of course trivial to add. I'm no clamav expert, but is it used by clamav if available? If so, we could and should of course include it.
No proper healthcheck is supprising, because afaik it was inpsired by the healthcheck in mailu :p (but this was in 2019 so it may have changed significantly). Getting a better healthcheck in is of course only better then a poor healthcheck. But how is the current healthcheck not proper in your eyes?
Edit: I've opened a merge request to clamav's repo to delay the healthcheck on startup; and add the tzdata package. Thanks for that @Diman0. Happy to address any other issues in the container if needed be.
I see the Dockerfile does contain a HEALTHCHECK, but it is in the wrong location.
(https://github.com/Cisco-Talos/clamav/blob/679883fdb6b123ac534d5e7c27c902e04cbfa711/Dockerfile#L14
If this is moved below the second FROM (line 90), then the docker image will have a working healthcheck.
But that is actually not required for inclusion with Mailu. We would also have to define this in our Dockerfile.
@diman0, thank you for pointing out these oversights. I'm fairly certain it worked at some point :) Docker improvement or broken Dockerfile for a long time. Either or; they are addressed. | 2023-10-13T14:26:10 |
Mailu/Mailu | 2,990 | Mailu__Mailu-2990 | [
"2966"
] | d0d4876a85f59360d43b06634da6429bfe492240 | diff --git a/core/admin/start.py b/core/admin/start.py
--- a/core/admin/start.py
+++ b/core/admin/start.py
@@ -29,7 +29,7 @@ def test_unsupported():
import codecs
if os.path.isfile(codecs.decode('/.qbpxrerai', 'rot13')) or os.environ.get(codecs.decode('V_XABJ_ZL_FRGHC_QBRFAG_SVG_ERDHVERZRAGF_NAQ_JBAG_SVYR_VFFHRF_JVGUBHG_CNGPURF', 'rot13'), None):
return
- print('Your system is not supported. Please start by reading the documentation and then http://www.catb.org/~esr/faqs/smart-questions.html')
+ log.critical('Your system is not supported. Please start by reading the documentation and then http://www.catb.org/~esr/faqs/smart-questions.html')
while True:
time.sleep(5)
diff --git a/core/nginx/letsencrypt.py b/core/nginx/letsencrypt.py
--- a/core/nginx/letsencrypt.py
+++ b/core/nginx/letsencrypt.py
@@ -61,13 +61,13 @@ def serve_one_request():
# Run certbot every day
while True:
while True:
- hostname = os.environ['HOSTNAMES'].split(' ')[0]
+ hostname = os.environ['HOSTNAMES'].split(',')[0]
target = f'http://{hostname}/.well-known/acme-challenge/testing'
thread = Thread(target=serve_one_request)
thread.start()
r = requests.get(target)
if r.status_code != 204:
- log.error(f"Can't reach {target}!, please ensure it's fixed or change the TLS_FLAVOR.")
+ log.critical(f"Can't reach {target}!, please ensure it's fixed or change the TLS_FLAVOR.")
time.sleep(5)
else:
break
| mail-letsencrypt -> front --> Invalid response from http://mail.example.fr/
## Environment & Version
### Environment
- [*] docker compose
### Version
- Version: `master` 2.0
## Description
After setting up Mailu behind a reverse proxy with the mail-letsencrypt TLS setting, I cannot renew my certificates.
After checking the logs for the front container, I get Error:
```
Certbot failed to authenticate some domains (authenticator: standalone). The Certificate Authority reported these problems:
Domain: mail.example.fr
Type: unauthorized
Detail: XXX.XXX.XXX.XXX: Invalid response from http://mail.example.fr/.well-known/acme-challenge/jw_V7ZveSz5gp2Gypw6et4LczGynp8veedfJ5f4-AF4: 404
```
## Replication Steps
1. Rebuild Mailu using the docker-compose settings below
2. Try to add an account in my client => Certificate is expired
## Expected behaviour
Certificate renewal
## Logs
```
2023-10-06 13:55:02,423:DEBUG:acme.client:Received response:
HTTP 200
Server: nginx
Date: Fri, 06 Oct 2023 13:55:02 GMT
Content-Type: application/json
Content-Length: 1026
Connection: keep-alive
Boulder-Requester: 405748950
Cache-Control: public, max-age=0, no-cache
Link: <https://acme-v02.api.letsencrypt.org/directory>;rel="index"
Replay-Nonce: 3hclikJOYQeRqfKi-jIPScoJJR3C-lh4lt7atk0b2yydJ2qurDI
X-Frame-Options: DENY
Strict-Transport-Security: max-age=604800
{
"identifier": {
"type": "dns",
"value": "mail.example.fr"
},
"status": "invalid",
"expires": "2023-10-13T13:54:59Z",
"challenges": [
{
"type": "http-01",
"status": "invalid",
"error": {
"type": "urn:ietf:params:acme:error:unauthorized",
"detail": "XXX.XXX.XXX.XXX: Invalid response from http://mail.example.fr/.well-known/acme-challenge/jw_V7ZveSz5gp2Gypw6et4LczGynp8veedfJ5f4-AF4: 404",
"status": 403
},
"url": "https://acme-v02.api.letsencrypt.org/acme/chall-v3/271292522506/NQsvZw",
"token": "jw_V7ZveSz5gp2Gypw6et4LczGynp8veedfJ5f4-AF4",
"validationRecord": [
{
"url": "http://mail.example.fr/.well-known/acme-challenge/jw_V7ZveSz5gp2Gypw6et4LczGynp8veedfJ5f4-AF4",
"hostname": "mail.example.fr",
"port": "80",
"addressesResolved": [
"XXX.XXX.XXX.XXX"
],
"addressUsed": "XXX.XXX.XXX.XXX"
}
],
"validated": "2023-10-06T13:55:00Z"
}
]
}
2023-10-06 13:55:02,424:DEBUG:acme.client:Storing nonce: 3hclikJOYQeRqfKi-jIPScoJJR3C-lh4lt7atk0b2yydJ2qurDI
2023-10-06 13:55:02,424:INFO:certbot._internal.auth_handler:Challenge failed for domain mail.example.fr
```
## Configuration files
### mailu.env:
```
# Mailu main configuration file
#
# This file is autogenerated by the configuration management wizard for compose flavor.
# For a detailed list of configuration variables, see the documentation at
# https://mailu.io
###################################
# Common configuration variables
###################################
# Set to a randomly generated 16 bytes string
SECRET_KEY=REDACTED4PRIVACY
# Subnet of the docker network. This should not conflict with any networks to which your system is connected. (Internal and external!)
SUBNET=192.168.203.0/24
# Main mail domain
DOMAIN=2rock.fr
# Hostnames for this server, separated with comas
HOSTNAMES=mail.example.fr,autoconfig.example.fr,autoconfig.example.fr
# Postmaster local part (will append the main mail domain)
POSTMASTER=admin
# Choose how secure connections will behave (value: letsencrypt, cert, notls, mail, mail-letsencrypt)
TLS_FLAVOR=mail-letsencrypt
# Authentication rate limit per IP (per /24 on ipv4 and /48 on ipv6)
AUTH_RATELIMIT_IP=5/hour
# Authentication rate limit per user (regardless of the source-IP)
AUTH_RATELIMIT_USER=10/day
#
AUTH_RATELIMIT_EXEMPTION_LENGTH=86400
# Opt-out of statistics, replace with "True" to opt out
DISABLE_STATISTICS=True
###################################
# Optional features
###################################
# Expose the admin interface (value: true, false)
ADMIN=true
# Choose which webmail to run if any (values: roundcube, snappymail, none)
WEBMAIL=roundcube
# Expose the API interface (value: true, false)
API=true
# Dav server implementation (value: radicale, none)
WEBDAV=radicale
# Antivirus solution (value: clamav, none)
ANTIVIRUS=clamav
# Scan Macros solution (value: true, false)
SCAN_MACROS=true
###################################
# Mail settings
###################################
# Message size limit in bytes
# Default: accept messages up to 50MB
# Max attachment size will be 33% smaller
MESSAGE_SIZE_LIMIT=50000000
# Message rate limit (per user)
MESSAGE_RATELIMIT=200/day
# Networks granted relay permissions
# Use this with care, all hosts in this networks will be able to send mail without authentication!
RELAYNETS=
# Will relay all outgoing mails if configured
RELAYHOST=
# Enable fetchmail
FETCHMAIL_ENABLED=true
# Fetchmail delay
FETCHMAIL_DELAY=600
# Recipient delimiter, character used to delimiter localpart from custom address part
RECIPIENT_DELIMITER=+
# DMARC rua and ruf email
DMARC_RUA=admin
DMARC_RUF=admin
# Welcome email, enable and set a topic and body if you wish to send welcome
# emails to all users.
WELCOME=false
WELCOME_SUBJECT=Welcome to your new email account
WELCOME_BODY=Welcome to your new email account, if you can read this, then it is configured properly!
# Maildir Compression
# choose compression-method, default: none (value: gz, bz2, zstd)
COMPRESSION=bz2
# change compression-level, default: 6 (value: 1-9)
COMPRESSION_LEVEL=6
# IMAP full-text search is enabled by default. Set the following variable to off in order to disable the feature.
# FULL_TEXT_SEARCH=off
###################################
# Web settings
###################################
# Path to redirect / to
WEBROOT_REDIRECT=/webmail
# Path to the admin interface if enabled
WEB_ADMIN=/admin
# Path to the webmail if enabled
WEB_WEBMAIL=/webmail
# Path to the API interface if enabled
WEB_API=/api
# Website name
SITENAME=Mailu
# Linked Website URL
WEBSITE=https://example.Fr
###################################
# Advanced settings
###################################
# Docker-compose project name, this will prepended to containers names.
COMPOSE_PROJECT_NAME=mailu
# Number of rounds used by the password hashing scheme
CREDENTIAL_ROUNDS=12
# Header to take the real ip from
REAL_IP_HEADER=X-Real-IP
# IPs for nginx set_real_ip_from (CIDR list separated by commas)
REAL_IP_FROM=10.0.0.3
# choose wether mailu bounces (no) or rejects (yes) mail when recipient is unknown (value: yes, no)
REJECT_UNLISTED_RECIPIENT=
# Log level threshold in start.py (value: CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET)
LOG_LEVEL=INFO
# Timezone for the Mailu containers. See this link for all possible values https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
TZ=Etc/UTC
# Default spam threshold used for new users
DEFAULT_SPAM_THRESHOLD=80
# API token required for authenticating to the RESTful API.
# This is a mandatory setting for using the RESTful API.
API_TOKEN=REDACTED4PRIVACY
```
### docker-compose.yml:
```
*# This file is auto-generated by the Mailu configuration wizard.
# Please read the documentation before attempting any change.
# Generated for compose flavor
version: '2.2'
services:
# External dependencies
redis:
image: redis:alpine
restart: always
volumes:
- "/opt/mailu/redis:/data"
depends_on:
- resolver
dns:
- 192.168.203.254
# Core services
front:
image: ${DOCKER_ORG:-ghcr.io/mailu}/${DOCKER_PREFIX:-}nginx:${MAILU_VERSION:-2.0}
restart: always
env_file: mailu.env
logging:
driver: journald
options:
tag: mailu-front
ports:
- "10.0.0.5:80:80"
- "10.0.0.5:443:443"
- "10.0.0.5:25:25"
- "10.0.0.5:465:465"
- "10.0.0.5:587:587"
- "10.0.0.5:110:110"
- "10.0.0.5:995:995"
- "10.0.0.5:143:143"
- "10.0.0.5:993:993"
networks:
- default
- webmail
- radicale
volumes:
- "/opt/mailu/certs:/certs"
- "/opt/mailu/overrides/nginx:/overrides:ro"
depends_on:
- resolver
dns:
- 192.168.203.254
resolver:
image: ${DOCKER_ORG:-ghcr.io/mailu}/${DOCKER_PREFIX:-}unbound:${MAILU_VERSION:-2.0}
env_file: mailu.env
restart: always
networks:
default:
ipv4_address: 192.168.203.254
admin:
image: ${DOCKER_ORG:-ghcr.io/mailu}/${DOCKER_PREFIX:-}admin:${MAILU_VERSION:-2.0}
restart: always
env_file: mailu.env
logging:
driver: journald
options:
tag: mailu-admin
volumes:
- "/opt/mailu/data:/data"
- "/opt/mailu/dkim:/dkim"
depends_on:
- redis
- resolver
dns:
- 192.168.203.254
imap:
image: ${DOCKER_ORG:-ghcr.io/mailu}/${DOCKER_PREFIX:-}dovecot:${MAILU_VERSION:-2.0}
restart: always
env_file: mailu.env
logging:
driver: journald
options:
tag: mailu-imap
volumes:
- "/opt/mailu/mail:/mail"
- "/opt/mailu/overrides/dovecot:/overrides:ro"
depends_on:
- front
- resolver
dns:
- 192.168.203.254
smtp:
image: ${DOCKER_ORG:-ghcr.io/mailu}/${DOCKER_PREFIX:-}postfix:${MAILU_VERSION:-2.0}
restart: always
env_file: mailu.env
logging:
driver: journald
options:
tag: mailu-smtp
volumes:
- "/opt/mailu/mailqueue:/queue"
- "/opt/mailu/overrides/postfix:/overrides:ro"
depends_on:
- front
- resolver
dns:
- 192.168.203.254
oletools:
image: ${DOCKER_ORG:-ghcr.io/mailu}/${DOCKER_PREFIX:-}oletools:${MAILU_VERSION:-2.0}
hostname: oletools
restart: always
networks:
- noinet
depends_on:
- resolver
dns:
- 192.168.203.254
antispam:
image: ${DOCKER_ORG:-ghcr.io/mailu}/${DOCKER_PREFIX:-}rspamd:${MAILU_VERSION:-2.0}
hostname: antispam
restart: always
env_file: mailu.env
logging:
driver: journald
options:
tag: mailu-antispam
networks:
- default
- noinet
volumes:
- "/opt/mailu/filter:/var/lib/rspamd"
- "/opt/mailu/overrides/rspamd:/overrides:ro"
depends_on:
- front
- redis
- oletools
- antivirus
- resolver
dns:
- 192.168.203.254
# Optional services
antivirus:
image: ${DOCKER_ORG:-ghcr.io/mailu}/${DOCKER_PREFIX:-}clamav:${MAILU_VERSION:-2.0}
restart: always
env_file: mailu.env
volumes:
- "/opt/mailu/filter:/data"
depends_on:
- resolver
dns:
- 192.168.203.254
webdav:
image: ${DOCKER_ORG:-ghcr.io/mailu}/${DOCKER_PREFIX:-}radicale:${MAILU_VERSION:-2.0}
restart: always
volumes:
- "/opt/mailu/dav:/data"
networks:
- radicale
fetchmail:
image: ${DOCKER_ORG:-ghcr.io/mailu}/${DOCKER_PREFIX:-}fetchmail:${MAILU_VERSION:-2.0}
restart: always
env_file: mailu.env
volumes:
- "/opt/mailu/data/fetchmail:/data"
depends_on:
- admin
- smtp
- imap
- resolver
dns:
- 192.168.203.254
# Webmail
webmail:
image: ${DOCKER_ORG:-ghcr.io/mailu}/${DOCKER_PREFIX:-}webmail:${MAILU_VERSION:-2.0}
restart: always
env_file: mailu.env
volumes:
- "/opt/mailu/webmail:/data"
- "/opt/mailu/overrides/roundcube:/overrides:ro"
networks:
- webmail
depends_on:
- front
networks:
default:
driver: bridge
ipam:
driver: default
config:
- subnet: 192.168.203.0/24
radicale:
driver: bridge
webmail:
driver: bridge
noinet:
driver: bridge
internal: true
```
### nginx reverse proxy configuration mail.example.fr.conf
```
server {
server_name mail.example.fr;
listen 80;
listen [::]:80;
location ^~ /.well-known/acme-challenge/ {
#alias /var/www/html/mail.example.fr/.well-know;
proxy_pass http://10.0.0.5/.well-known/acme-challenge/;
proxy_set_header Host $host;
}
location / {
return 301 https://$host$request_uri;
}
}
server {
server_name mail.example.fr;
listen 443 ssl http2;
listen [::]:443 ssl http2;
location / {
rewrite / https://mail.example.fr/webmail redirect;
}
location ~* ^/(admin|api|sso|static|webdav|webmail|(apple\.)?mobileconfig|(\.well\-known/autoconfig/)?mail/|Autodiscover/Autodiscover) {
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_pass https://10.0.0.5;
}
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_headers_hash_max_size 512;
proxy_headers_hash_bucket_size 128;
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload";
client_max_body_size 0;
ssl_certificate /etc/letsencrypt/live/mail.example.fr/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/mail.example.fr/privkey.pem;
ssl_trusted_certificate /etc/letsencrypt/live/mail.example.fr/chain.pem;
include /etc/nginx/snippets/header.conf;
include /etc/nginx/snippets/ssl.conf;
access_log /var/log/nginx/mail.example.fr.access.log;
error_log /var/log/nginx/mail.example.fr.error.log;
}
```
### DNS configuration

| It's most likely your reverse proxy's configuration.
You have a single ``server_name`` defined for each block whereas you have multiple in ``HOSTNAMES`` and ``DOMAIN``. Add the missing ones and it should start working.
If it doesn't, use tcpdump and try to find out how far the request on port 80 goes: is it going past your reverse proxy? is it going past front and to port 8008 like it ought to? | 2023-10-17T12:00:00 |
|
Mailu/Mailu | 3,011 | Mailu__Mailu-3011 | [
"2919"
] | aa25d716d66fe4c932cdea6cf709d1bf939cbf97 | diff --git a/core/admin/start.py b/core/admin/start.py
--- a/core/admin/start.py
+++ b/core/admin/start.py
@@ -27,7 +27,7 @@
def test_unsupported():
import codecs
- if os.path.isfile(codecs.decode('/.qbpxrerai', 'rot13')) or os.environ.get(codecs.decode('V_XABJ_ZL_FRGHC_QBRFAG_SVG_ERDHVERZRAGF_NAQ_JBAG_SVYR_VFFHRF_JVGUBHG_CNGPURF', 'rot13'), None):
+ if os.path.isfile(codecs.decode('/.qbpxrerai', 'rot13')) or os.environ.get(codecs.decode('V_XABJ_ZL_FRGHC_QBRFAG_SVG_ERDHVERZRAGF_NAQ_JBAG_SVYR_VFFHRF_JVGUBHG_CNGPURF', 'rot13'), None) or os.environ.get(codecs.decode('ZNVYH_URYZ_PUNEG', 'rot13'), None):
return
print('Your system is not supported. Please start by reading the documentation and then http://www.catb.org/~esr/faqs/smart-questions.html')
while True:
diff --git a/core/rspamd/start.py b/core/rspamd/start.py
--- a/core/rspamd/start.py
+++ b/core/rspamd/start.py
@@ -38,4 +38,4 @@
os.system("mkdir -m 755 -p /run/rspamd")
os.system("chown rspamd:rspamd /run/rspamd")
os.system("find /var/lib/rspamd | grep -v /filter | xargs -n1 chown rspamd:rspamd")
-os.execv("/usr/sbin/rspamd", ["rspamd", "-f", "-u", "rspamd", "-g", "rspamd"])
+os.execv("/usr/bin/rspamd", ["rspamd", "-f", "-u", "rspamd", "-g", "rspamd"])
| diff --git a/tests/compose/filters/docker-compose.yml b/tests/compose/filters/docker-compose.yml
--- a/tests/compose/filters/docker-compose.yml
+++ b/tests/compose/filters/docker-compose.yml
@@ -85,6 +85,8 @@ services:
- "/mailu/overrides/rspamd:/etc/rspamd/override.d"
depends_on:
- front
+ - antivirus
+ - oletools
# Optional services
antivirus:
| Vulnerability in ClamAV
## Environment & Version
### Environment
- [ ] docker compose
- [x] kubernetes
- [ ] docker swarm
### Version
- Version: `master`
## Description
ClamAV version 0.105.2 is vulnerable to [CVE-2023-20197](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-20197)
Unfortunately, ClamAV will not provide any update in the 0.105 branch, as it is EOL see https://blog.clamav.net/2023/07/2023-08-16-releases.html
## Replication Steps
```
$ docker run --pull=always --rm -it ghcr.io/mailu/clamav:master clamd --version
master: Pulling from mailu/clamav
Digest: sha256:dd088fc80ab063b0588160a69fce034d5d1f33db6d85d57296154fc51cdeaffa
Status: Image is up to date for ghcr.io/mailu/clamav:master
ClamAV 0.105.2
```
## Observed behaviour
ClamAV is in a vulnerable state
## Expected behaviour
I expect ClamAV to be updated to a fixed version (1.1.1 or 1.0.2)
| It looks like the base image should be updated to Alpine 3.18. As seen [here](https://pkgs.alpinelinux.org/packages?name=clamav&branch=v3.18&repo=&arch=&maintainer=), it allows to install ClamAV 1.1.0
Yeah well for master the fix is quite straightforward... I'm not sure we want to backport a switch of the base image though ;)
So how do we progress this? Has someone filled in a ticket with Alpine so that we get the fix backported to 3.17?
> Yeah well for master the fix is quite straightforward... I'm not sure we want to backport a switch of the base image though ;)
Yes, I agree to that. Updating Alpine in the base might also bump the versions of other installed packages.
> So how do we progress this? Has someone filled in a ticket with Alpine so that we get the fix backported to 3.17?
I'm not sure what the policy of Alpine is about backporting this. @cpernot-hf mentioned ClamAV 0.105 is EOL, so the backport will mean a completely different version.
Maybe `docker sbom` could provide some insights in all of the installed packages on the Mailu container images. This could then be compared with the versions of Alpine 3.18 to get a better overview of the changes and if it might be feasible to update the Alpine version non the less.
3.17 is "stable" and security-supported until end of 2024 according to https://alpinelinux.org/releases/
My suggestion is to log a ticket with them and see what they say. They may be able to backport just the fix or the full package.
I've filed a backport ticket to the alpine aport repository - https://gitlab.alpinelinux.org/alpine/aports/-/issues/15249
I've filed a backport ticket to the alpine aport repository - https://gitlab.alpinelinux.org/alpine/aports/-/issues/15249
@dasrecht thank you.
I suggest you highlight the CVE/vulnerability on the ticket otherwise it will be ignored.
Good point, added that to the ticket - Thanks
Added that - The answer is clear and in line with the alpine release schedules - `community repository is supported until next stable release`
So from an alpine standpoint, there's only the possibility to move the base image to 3.18. I am unsure what the stance of Mailu on this is.
I think it would also be worth looking into using the official ClamAV container image, which is maintained regularly - but I also understand that there might be considerations of not going with an official image.
Ok, i've looked a bit into this - It should be fairly easy to move to alpine 3.18 and a first short test shows that this would also work.
```
docker run -it clamav-test clamd --version
ClamAV 1.1.2
```
Will wrap that into a MR tomorrow morning
I am definitely in favour of going with upstream's image.
I do not consider switching the base image of the release to be an option we could backport.
The CVE here is about a DoS: something low risk that few people care about outside of the context of email gateways.
I agree on the low-risk CVE - I see it more as a problem that the version of ClamAV shipped with Alpine 3.17 is [EOL](https://blog.clamav.net/2023/07/2023-08-16-releases.html) - so whatever gets patched in newer versions will become an issue on the old release.
Is there a planned update of the base image at some point? As 3.17 is only getting security fixes.
If I find some time in the coming days I'll have a look if there's a way to use the official images. | 2023-10-28T07:44:11 |
Mailu/Mailu | 3,023 | Mailu__Mailu-3023 | [
"2618"
] | 4e351e1dd41955fd14299768957f95fbe8d675b8 | diff --git a/core/admin/mailu/models.py b/core/admin/mailu/models.py
--- a/core/admin/mailu/models.py
+++ b/core/admin/mailu/models.py
@@ -232,9 +232,7 @@ def dns_dkim(self):
""" return DKIM record for domain """
if self.dkim_key:
selector = app.config['DKIM_SELECTOR']
- txt = f'v=DKIM1; k=rsa; p={self.dkim_publickey}'
- record = ' '.join(f'"{txt[p:p+250]}"' for p in range(0, len(txt), 250))
- return f'{selector}._domainkey.{self.name}. 600 IN TXT {record}'
+ return f'{selector}._domainkey.{self.name}. 600 IN TXT "v=DKIM1; k=rsa; p={self.dkim_publickey}"'
@cached_property
def dns_dmarc(self):
diff --git a/core/admin/mailu/ui/views/domains.py b/core/admin/mailu/ui/views/domains.py
--- a/core/admin/mailu/ui/views/domains.py
+++ b/core/admin/mailu/ui/views/domains.py
@@ -70,6 +70,27 @@ def domain_details(domain_name):
domain = models.Domain.query.get(domain_name) or flask.abort(404)
return flask.render_template('domain/details.html', domain=domain)
[email protected]('/domain/details/<domain_name>/zonefile', methods=['GET'])
[email protected]_admin(models.Domain, 'domain_name')
+def domain_download_zonefile(domain_name):
+ domain = models.Domain.query.get(domain_name) or flask.abort(404)
+ res = [domain.dns_mx, domain.dns_spf]
+ if domain.dkim_publickey:
+ record = domain.dns_dkim.split('"', 1)[0].strip()
+ txt = f'v=DKIM1; k=rsa; p={domain.dkim_publickey}'
+ txt = ' '.join(f'"{txt[p:p+250]}"' for p in range(0, len(txt), 250))
+ res.append(f'{record} {txt}')
+ res.append(domain.dns_dmarc)
+ if domain.dns_tlsa:
+ res.append(domain.dns_tlsa)
+ res.extend(domain.dns_autoconfig)
+ res.append("")
+ return flask.Response(
+ "\n".join(res),
+ content_type="text/plain",
+ headers={"Content-disposition": f"attachment; filename={domain.name}-zonefile.txt"}
+ )
+
@ui.route('/domain/genkeys/<domain_name>', methods=['GET', 'POST'])
@access.domain_admin(models.Domain, 'domain_name')
| DNS DKIM entry is broken up in two parts (for 2048b keys), making it error-prone
<!--
Thank you for opening an issue with Mailu. Please understand that issues are meant for bugs and enhancement-requests.
For **user-support questions**, reach out to us on [matrix](https://matrix.to/#/#mailu:tedomum.net).
To be able to help you best, we need some more information.
Before you open your issue
- Check if no issue or pull-request for this already exists.
- Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)
- You understand `Mailu` is made by volunteers in their **free time** β be concise, civil and accept that delays can occur.
- The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.
Please put your text outside of the comment blocks to be visible. You can use the button "Preview" above to check.
-->
## Environment & Version
### Environment
- [x] docker-compose
- [ ] kubernetes
- [ ] docker swarm
### Version
- Version: 1.9
<!--
To find your version, get the image name of a mailu container and read the version from the tag (example for version 1.7).
$> docker ps -a | grep mailu
140b09d4b09c mailu/roundcube:1.7 "docker-php-entrypoiβ¦" 2 weeks ago Up 2 days (healthy) 80/tcp
$> grep MAILU_VERSION docker-compose.yml mailu.env
-->
## Description
<!--
Further explain the bug in a few words. It should be clear what the unexpected behaviour is. Share it in an easy-to-understand language.
-->
DNS DKIM entry in mailu-admin is broken into two parts for 2048b keys.
## Replication Steps
<!--
Steps for replicating your issue
-->
1. Set up a mail domain
2. Generate keys in admin
3. Observe DNS DKIM entry
## Observed behaviour
<!--
Explain or paste the result you received.
-->
```
[...] 600 IN TXT "v=DKIM1; k=rsa; p=[.....]" "[......]"
```
(the " " in between is hard to notice, and easily make people just copy that into the dns configuration, rendering the dkim entry invalid)
## Expected behaviour
<!--
Explain what results you expected - be as specific as possible.
Just saying "it doesnβt work as expected" is not useful. It's also helpful to describe what you actually experienced.
-->
```
[...] 600 IN TXT "v=DKIM1; k=rsa; p=[.....][......]"
```
(shouldn't be broken up into two parts)
## Logs
<!--
Often it is very useful to include log fragments of the involved component.
You can get the logs via `docker logs <container name> --tail 1000`.
For example for the admin container: `docker logs mailu_admin_1 --tail 1000`
or using docker-compose `docker-compose -f /mailu/docker-compose.yml logs --tail 1000 admin`
If you can find the relevant section, please share only the parts that seem relevant. If you have any logs, please enclose them in code tags, like so:
```
Your logs here!
```
-->
not relevant
| @tusooa You can read here https://www.mailhardener.com/blog/how-to-enter-txt-values-in-google-cloud-dns why we do this intentionally.
Maybe adding a toggle which splits/joins the field (using javascript) would be a solution for everyone.
Hm... making it break everywhere in order to not break in Google Cloud DNS is a bit suboptimal (especially because those are reported as bugs there and were not closed by Google).
I'd add instructions on how to split to FAQ instead (there are apparently tools like https://www.mailhardener.com/tools/dns-record-splitter for this) and leave it contiguous the way it should be for everyone else.
As a new user, i lost an entire day troubleshooting this issue..
kubernetes user here btw
i was trying to setup the entries in a cloudflare dns manually, i was not really sure the correct way it was supposed to be inserted..
i eventually found out somewhere that i could paste the entries in a text file and upload to cloudflare and it created the correct entries...
maybe at least we could get a button to generate the text file so we can just import the entries into the dns provider?
or better yet if the correct implementation is only to solve an issue with 1 specific dns provider, don't make the solution for 1, be the solution for everybody else..
maybe put a note for that specific provider..
i also wanted to leave a note, probably should create my own issue...
in the same page the MX entry has a green mark saying its "ok"
for some time while troubleshooting i was hoping the other entries showed up with a green mark as well
I'm currently compiling a PR with interfaces changes (auto-focus of form fields, better navigation, dns validation, ...)
I'll add the non-splitted version there (show both or make toggling possible, sth like this.)
For now you can output all the keys using the config-export (which makes it easy to automate the update using a dns api):
```
docker compose exec admin flask mailu config-export -jd domain | jq -r '.domain[] | select(.dkim_publickey) | "dkim._domainkey.\(.name). 600 IN TXT \"v=DKIM1; k=rsa; p=\(.dkim_publickey)\""'
``` | 2023-10-29T10:54:47 |
|
Mailu/Mailu | 3,025 | Mailu__Mailu-3025 | [
"1524"
] | 4e351e1dd41955fd14299768957f95fbe8d675b8 | diff --git a/setup/server.py b/setup/server.py
--- a/setup/server.py
+++ b/setup/server.py
@@ -10,12 +10,16 @@
import ipaddress
import hashlib
import time
-
+import secrets
+from flask_bootstrap import StaticCDN
version = os.getenv("this_version", "master")
static_url_path = "/" + version + "/static"
app = flask.Flask(__name__, static_url_path=static_url_path)
+app.secret_key = secrets.token_hex(16)
flask_bootstrap.Bootstrap(app)
+# Load our jQuery. Do not use jQuery 1.
+app.extensions['bootstrap']['cdns']['jquery'] = StaticCDN()
db = redis.StrictRedis(host='redis', port=6379, db=0)
@@ -90,12 +94,47 @@ def wizard():
def submit():
data = flask.request.form.copy()
data['uid'] = str(uuid.uuid4())
+ valid = True
+ try:
+ ipaddress.IPv4Address(data['bind4'])
+ except:
+ flask.flash('Configured IPv4 address is invalid', 'error')
+ valid = False
+ try:
+ ipaddress.IPv6Address(data['bind6'])
+ except:
+ flask.flash('Configured IPv6 address is invalid', 'error')
+ valid = False
+ try:
+ ipaddress.IPv4Network(data['subnet'])
+ except:
+ flask.flash('Configured subnet(IPv4) is invalid', 'error')
+ valid = False
+ try:
+ ipaddress.IPv6Network(data['subnet6'])
+ except:
+ flask.flash('Configured subnet(IPv6) is invalid', 'error')
+ valid = False
try:
data['dns'] = str(ipaddress.IPv4Network(data['subnet'], strict=False)[-2])
except ValueError as err:
- return "Error while generating files: " + str(err)
- db.set(data['uid'], json.dumps(data))
- return flask.redirect(flask.url_for('.setup', uid=data['uid']))
+ flask.flash('Invalid configuration: ' + str(err))
+ valid = False
+ if 'api_enabled' in data:
+ if (data['api_enabled'] == 'true'):
+ if data['api_token'] == '':
+ flask.flash('API token cannot be empty when API is enabled', 'error')
+ valid = False
+ if valid:
+ db.set(data['uid'], json.dumps(data))
+ return flask.redirect(flask.url_for('.setup', uid=data['uid']))
+ else:
+ return flask.render_template(
+ 'wizard.html',
+ flavor="compose",
+ steps=sorted(os.listdir(os.path.join(path, "templates", "steps", "compose"))),
+ subnet6=random_ipv6_subnet()
+ )
@prefix_bp.route("/setup/<uid>", methods=["GET"])
@root_bp.route("/setup/<uid>", methods=["GET"])
| [SUGG] Little verification in Mailu setup
Hi thanks for Mailu it is a good project.
I submit this suggestion because i made the error and take many time to find it.
In step 4 of Mailu Setup for Docker compose :
**Subnet of the docker network** it could be nice to verify if the last octet of the IP4 address is equal to 0 because if it is not the SMTP wont work.

Regards
| I agree, just simple bitand should work to check if IP is valid for CIDR
Hi There,
The `Mailu`-Project is currently in a bit of a bind! We are short on man-power, and we need to judge if it is possible for us to put in some work on this issue.
To help with that, we are currently trying to find out which issues are actively keeping users from using `Mailu`, which issues have someone who want to work on them β and which issues may be less important. These a less important ones could be discarded for the time being, until the project is in a more stable and regular state once again.
In order for us to better assess this, it would be helpful if you could put a **reaction on this post** (use the :smiley: icon to the top-right).
- ποΈ if you **need this** to be able to use Mailu. Ideally, youβd also be able to test this on your installation, and provide feedback β¦
- π if you find it a **nice bonus**, but no deal-breaker
- π if you want to **work on it yourself**!
We want to keep this **voting open for 2 weeks** from now, so please help out!
This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.
An easy dirty fix could be to add this explicitly to the setup website. Warning the user that the last octet must be 0 | 2023-10-29T13:02:00 |
|
Mailu/Mailu | 3,096 | Mailu__Mailu-3096 | [
"3095"
] | 2ee1c0698e1009e80c5ecb7bad1bf692a56e36a3 | diff --git a/core/admin/mailu/ui/forms.py b/core/admin/mailu/ui/forms.py
--- a/core/admin/mailu/ui/forms.py
+++ b/core/admin/mailu/ui/forms.py
@@ -38,7 +38,7 @@ def __init__(self,message=_('Invalid email address.')):
self.message = message
def __call__(self, form, field):
- pattern = re.compile(r'^([_a-z0-9\-\+]+)(\.[_a-z0-9\-\+]+)*@([a-z0-9\-]{1,}\.)*([a-z]{1,})(,([_a-z0-9\-\+]+)(\.[_a-z0-9\-\+]+)*@([a-z0-9\-]{1,}\.)*([a-z]{2,}))*$')
+ pattern = re.compile(r'^([_a-z0-9\-\+]+)(\.[_a-z0-9\-\+]+)*@([a-z0-9\-]{1,}\.)*([a-z]{1,})(,([_a-z0-9\-\+]+)(\.[_a-z0-9\-\+]+)*@([a-z0-9\-]{1,}\.)*([a-z]{2,}))*$', re.IGNORECASE)
if not pattern.match(field.data.replace(" ", "")):
raise validators.ValidationError(self.message)
| Validation of redirect address in UI
There are several CMSs that require the forwarding address to be specified partially in uppercase. Validation does not allow this.


```
grep MAILU_VERSION docker-compose.yml mailu.env
docker-compose.yml: image: ${DOCKER_ORG:-ghcr.io/mailu}/${DOCKER_PREFIX:-}nginx:${MAILU_VERSION:-2.0}
docker-compose.yml: image: ${DOCKER_ORG:-ghcr.io/mailu}/${DOCKER_PREFIX:-}unbound:${MAILU_VERSION:-2.0}
docker-compose.yml: image: ${DOCKER_ORG:-ghcr.io/mailu}/${DOCKER_PREFIX:-}admin:${MAILU_VERSION:-2.0}
docker-compose.yml: image: ${DOCKER_ORG:-ghcr.io/mailu}/${DOCKER_PREFIX:-}dovecot:${MAILU_VERSION:-2.0}
docker-compose.yml: image: ${DOCKER_ORG:-ghcr.io/mailu}/${DOCKER_PREFIX:-}postfix:${MAILU_VERSION:-2.0}
docker-compose.yml: image: ${DOCKER_ORG:-ghcr.io/mailu}/${DOCKER_PREFIX:-}oletools:${MAILU_VERSION:-2.0}
docker-compose.yml: image: ${DOCKER_ORG:-ghcr.io/mailu}/${DOCKER_PREFIX:-}rspamd:${MAILU_VERSION:-2.0}
docker-compose.yml: image: ${DOCKER_ORG:-ghcr.io/mailu}/${DOCKER_PREFIX:-}radicale:${MAILU_VERSION:-2.0}
docker-compose.yml: image: ${DOCKER_ORG:-ghcr.io/mailu}/${DOCKER_PREFIX:-}fetchmail:${MAILU_VERSION:-2.0}
docker-compose.yml: image: ${DOCKER_ORG:-ghcr.io/mailu}/${DOCKER_PREFIX:-}webmail:${MAILU_VERSION:-2.0}
```
| 2023-12-19T17:27:32 |
||
Mailu/Mailu | 3,100 | Mailu__Mailu-3100 | [
"3094"
] | 774dfa4da8212a8caed7cdb45db079560cae43e0 | diff --git a/core/admin/mailu/internal/views/auth.py b/core/admin/mailu/internal/views/auth.py
--- a/core/admin/mailu/internal/views/auth.py
+++ b/core/admin/mailu/internal/views/auth.py
@@ -37,7 +37,7 @@ def nginx_authentication():
is_valid_user = False
username = response.headers.get('Auth-User', None)
if response.headers.get("Auth-User-Exists") == "True":
- if not is_app_token and utils.limiter.should_rate_limit_user(username, client_ip):
+ if not is_from_webmail and not is_app_token and utils.limiter.should_rate_limit_user(username, client_ip):
# FIXME could be done before handle_authentication()
status, code = nginx.get_status(flask.request.headers['Auth-Protocol'], 'ratelimit')
response = flask.Response()
| Webmail is not exempt from rate limiting
## Environment & Version
- `docker compose version`: _Docker Compose version 2.23.3_
- Version: mailu `2.0.34`
## Description
Trying to open my webmail (roundcube) the browser just showed an error due to _too many redirects_ - which were to `sso.php` of the webmailer.
Debugging this I found the following to be the reason:
> front-1 | [info] 15#15: *153382 client login failed: "Temporary authentication failure (rate-limit)" while in http auth state, client: **172.29.0.3**, server: 0.0.0.0:10143, login: "[email protected]"
Where `172.29.0.3` is the IP of the webmail container.
As far as I could debug this everything else was working fine, `sso.php` could correctly get valid credentials provided by `front` via HTTP headers but trying to use them would fail since the webmail container was rate limited. The failed login would the redirect again to `sso.php` - in a loop...
## Replication Steps
Unfortunately I have no idea how the webmail container's IP could end up on the rate limited list...
The webmail container should only ever try to login with credentials provided by `front` via HTTP headers - which then should always be valid
## Observed behaviour
Webmailer was blocked by rate limiting, preventing it from successfully authenticate, causing its login in to fail and damning the browser into an infinite redirection loop.
## Expected behaviour
The webmailer should not be blocked by rate limiting since the credentials are passed from an already valid login via SSO anyway.
## Possible solutions
1. prevent the webmailer from hitting the rate limits: this requires some more debugging,since I don't know how it could end up rate limited in the first place since every login it tries should be successful...
2. exempt the webmail container from rate limits: this is the workaround I used now by adapting my `docker-compose.yml` config to give the network used by the webmailer a known subnet and exempting it from rate limits:
~~~diff
--- a/mailu.env
+++ b/.mailu.env
@@ -38,6 +38,10 @@ AUTH_RATELIMIT_IP=60/hour
# Authentication rate limit per user (regardless of the source-IP)
AUTH_RATELIMIT_USER=100/day
+# exempt webmail container from rate limiting
+WEBMAIL_SUBNET=172.29.0.0/24
+AUTH_RATELIMIT_EXEMPTION=$WEBMAIL_SUBNET
+
# Opt-out of statistics, replace with "True" to opt out
DISABLE_STATISTICS=False
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -184,3 +184,7 @@ networks:
webmail:
driver: bridge
+ ipam:
+ driver: default
+ config:
+ - subnet: "$WEBMAIL_SUBNET"
~~~
| The rate limiter doesn't work the way you think it does.
Webmail is excluded from IP rate limits but not from account rate limits. ``AUTH_RATELIMIT_USER`` is what you are running into.
> The rate limiter doesn't work the way you think it does.
@nextgens: I'm open to be corrected here. This was only my limited understanding so far.
But the resulting behavior (a redirect loop) was unexpected for me nevertheless and really feels like a bug. Especially since I did not find any other way to recover from it.
Do you have any other suggestions then how to avoid running into this situation again?
Find out what has caused [email protected] to be rate-limited; address that.
AUTH_RATELIMIT_USER defaults to 50/day ... that means that something somewhere has attempted at least 50 different passwords against that account.
I agree that feedback on what is going on could be improved... some work has been done on it in master already (the admin container now logs exactly what is going on authentication-wise).
> Find out what has caused [email protected] to be rate-limited; address that.
>
> `AUTH_RATELIMIT_USER` defaults to 50/day ... that means that something somewhere has attempted at least 50 different passwords against that account.
Ah, I see, so it was some other client that hit the rate limit and caused [email protected] to be blocked.
I should be able to find hints on the culprit in the log output of the containers, right?
But wouldn't it still make sense, to exempt the webmailer container from rate limiting anyway, since it has no login form but uses SSO (or at least roundcube in my case)?
Or is that what's already done in master?
> > Find out what has caused [[email protected]](mailto:[email protected]) to be rate-limited; address that.
> > `AUTH_RATELIMIT_USER` defaults to 50/day ... that means that something somewhere has attempted at least 50 different passwords against that account.
>
> Ah, I see, so it was some other client that hit the rate limit and caused [[email protected]](mailto:[email protected]) to be blocked. I should be able to find hints on the culprit in the log output of the containers, right?
>
Yes. It may not be trivial since there is no information about "distinctness" of passwords attempted in the logs... that's what has been fixed in master.
> But wouldn't it still make sense, to exempt the webmailer container from rate limiting anyway, since it has no login form but uses SSO (or at least roundcube in my case)? Or is that what's already done in master?
Hmm, the check is already done in SSO so if that was a new session it means your browser had the right cookies to bypass the rate-limit. Yeah I guess we can fix that; I'll send a PR. | 2023-12-21T11:39:11 |
|
Mailu/Mailu | 3,188 | Mailu__Mailu-3188 | [
"3187"
] | a7cf68163a214a3c006b526fb4595c8cb3b8d6cd | diff --git a/core/nginx/config.py b/core/nginx/config.py
--- a/core/nginx/config.py
+++ b/core/nginx/config.py
@@ -5,6 +5,60 @@
import sys
from socrate import system, conf
+from cryptography import x509
+from cryptography.hazmat.primitives.serialization import Encoding, PublicFormat
+from cryptography.x509.verification import PolicyBuilder, Store, DNSName
+from cryptography.x509.oid import NameOID
+import hashlib
+
+ISRG_ROOT_X1 = x509.load_pem_x509_certificate(b'''-----BEGIN CERTIFICATE-----
+MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
+TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
+cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
+WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
+ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
+MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
+h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
+0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
+A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
+T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
+B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
+B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
+KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
+OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
+jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
+qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
+rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
+HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
+hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
+ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
+3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
+NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
+ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
+TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
+jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
+oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
+4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
+mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
+emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
+-----END CERTIFICATE-----
+''')
+ISRG_ROOT_X2 = x509.load_pem_x509_certificate(b'''-----BEGIN CERTIFICATE-----
+MIICGzCCAaGgAwIBAgIQQdKd0XLq7qeAwSxs6S+HUjAKBggqhkjOPQQDAzBPMQsw
+CQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJuZXQgU2VjdXJpdHkgUmVzZWFyY2gg
+R3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBYMjAeFw0yMDA5MDQwMDAwMDBaFw00
+MDA5MTcxNjAwMDBaME8xCzAJBgNVBAYTAlVTMSkwJwYDVQQKEyBJbnRlcm5ldCBT
+ZWN1cml0eSBSZXNlYXJjaCBHcm91cDEVMBMGA1UEAxMMSVNSRyBSb290IFgyMHYw
+EAYHKoZIzj0CAQYFK4EEACIDYgAEzZvVn4CDCuwJSvMWSj5cz3es3mcFDR0HttwW
++1qLFNvicWDEukWVEYmO6gbf9yoWHKS5xcUy4APgHoIYOIvXRdgKam7mAHf7AlF9
+ItgKbppbd9/w+kHsOdx1ymgHDB/qo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0T
+AQH/BAUwAwEB/zAdBgNVHQ4EFgQUfEKWrt5LSDv6kviejM9ti6lyN5UwCgYIKoZI
+zj0EAwMDaAAwZQIwe3lORlCEwkSHRhtFcP9Ymd70/aTSVaYgLXTWNLxBo1BfASdW
+tL4ndQavEi51mI38AjEAi/V3bNTIZargCyzuFJ0nN6T5U6VR5CmD1/iQMVtCnwr1
+/q4AaOeMSQ+2b1tbFfLn
+-----END CERTIFICATE-----
+''')
+
args = system.set_env()
log.basicConfig(stream=sys.stderr, level=args.get("LOG_LEVEL", "WARNING"))
@@ -29,21 +83,47 @@
"notls": None
}[args["TLS_FLAVOR"]]
-def format_for_nginx(fullchain, output):
+def format_for_nginx(fullchain, output, strip_CA=args.get('LETSENCRYPT_SHORTCHAIN')):
""" We may want to strip ISRG Root X1 out """
if not os.path.exists(fullchain):
return
- split = '-----END CERTIFICATE-----\n'
- with open(fullchain, 'r') as pem:
- certs = [f'{cert}{split}' for cert in pem.read().split(split) if cert]
- if len(certs)>2 and args.get('LETSENCRYPT_SHORTCHAIN'):
- del certs[-1]
- with open(output, 'w') as pem:
- pem.write(''.join(certs))
+ chain=[]
+ with open(fullchain, 'rb') as f:
+ chain = x509.load_pem_x509_certificates(f.read())
+ builder = PolicyBuilder().store(Store([ISRG_ROOT_X1, ISRG_ROOT_X2]))
+ verifier = builder.build_server_verifier(DNSName(chain[0].subject.get_attributes_for_oid(NameOID.COMMON_NAME)[0].value))
+ try:
+ valid_chain = verifier.verify(chain[0], chain[1:])
+ except Exception as e:
+ log.error(e)
+ valid_chain = chain
+ log.info(f'The certificate chain looks as follows for {fullchain}:')
+ indent = ' '
+ has_found_PIN = False
+ for cert in valid_chain:
+ pubkey_der = cert.public_key().public_bytes(Encoding.DER, PublicFormat.SubjectPublicKeyInfo)
+ digest = hashlib.sha256(pubkey_der).hexdigest()
+ log.info(f'{indent}{cert.subject.rfc4514_string()} {digest}')
+ indent += ' '
+ if digest == '0b9fa5a59eed715c26c1020c711b4f6ec42d58b0015e14337a39dad301c5afc3': # ISRG Root X1
+ log.info('ISRG X1 PIN FOUND!')
+ has_found_PIN = True
+ elif digest == '762195c225586ee6c0237456e2107dc54f1efc21f61a792ebd515913cce68332': # ISRG Root X2
+ log.info('ISRG X2 PIN FOUND!')
+ has_found_PIN = True
+ if not has_found_PIN:
+ log.error('Neither ISRG X1 nor ISRG X2 have been found in the certificate chain. Please check your DANE records.')
+ with open(output, 'wt') as f:
+ for cert in valid_chain:
+ if strip_CA and (cert.subject.rfc4514_string() in ['CN=ISRG Root X1,O=Internet Security Research Group,C=US', 'CN=ISRG Root X2,O=Internet Security Research Group,C=US']):
+ continue
+ f.write(f'{cert.public_bytes(encoding=Encoding.PEM).decode("ascii").strip()}\n')
if args['TLS_FLAVOR'] in ['letsencrypt', 'mail-letsencrypt']:
format_for_nginx('/certs/letsencrypt/live/mailu/fullchain.pem', '/certs/letsencrypt/live/mailu/nginx-chain.pem')
+ format_for_nginx('/certs/letsencrypt/live/mailu/fullchain.pem', '/certs/letsencrypt/live/mailu/DANE-chain.pem', False)
format_for_nginx('/certs/letsencrypt/live/mailu-ecdsa/fullchain.pem', '/certs/letsencrypt/live/mailu-ecdsa/nginx-chain.pem')
+ format_for_nginx('/certs/letsencrypt/live/mailu-ecdsa/fullchain.pem', '/certs/letsencrypt/live/mailu-ecdsa/DANE-chain.pem', False)
if args["TLS"] and not all(os.path.exists(file_path) for file_path in args["TLS"]):
print("Missing cert or key file, disabling TLS")
@@ -55,4 +135,4 @@ def format_for_nginx(fullchain, output):
conf.jinja("/conf/nginx.conf", args, "/etc/nginx/nginx.conf")
conf.jinja("/dovecot_conf/login.lua", args, "/etc/dovecot/login.lua")
conf.jinja("/dovecot_conf/proxy.conf", args, "/etc/dovecot/proxy.conf")
-os.system("killall -HUP nginx dovecot")
+os.system("killall -q -HUP nginx dovecot")
| ISRG_X1 not included in fullchain.pem (--> DANE validation failed)
Hi folks,
and thanks for this great software.
_Job-so._
## Environment & Version
- docker-compose
- Mailu Version: `2.0`
## Description
- Same issue as #2138
- As ISRG_X1 is not present in /certs/letsencrypt/live/mailu-ecdsa/fullchain.pem, DANE validation failed when TLSA DNS record is `2 1 1 0b9fa5a59eed715c26c1020c711b4f6ec42d58b0015e14337a39dad301c5afc3`
## Replication Steps
```
$ echo | openssl s_client -connect test.mailu.io:25 -starttls smtp -showcerts
CONNECTED(00000003)
depth=2 C = US, O = Internet Security Research Group, CN = ISRG Root X1
verify return:1
depth=1 C = US, O = Let's Encrypt, CN = R3
verify return:1
depth=0 CN = mailu.io
verify return:1
---
Certificate chain
0 s:CN = mailu.io
i:C = US, O = Let's Encrypt, CN = R3
a:PKEY: rsaEncryption, 4096 (bit); sigalg: RSA-SHA256
v:NotBefore: Feb 19 12:43:20 2024 GMT; NotAfter: May 19 12:43:19 2024 GMT
-----BEGIN CERTIFICATE-----
MIIF/DCCBOSgAwIBAgISA/rJ9CbQM3HoUjwXuZKxe47TMA0GCSqGSIb3DQEBCwUA
MDIxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1MZXQncyBFbmNyeXB0MQswCQYDVQQD
EwJSMzAeFw0yNDAyMTkxMjQzMjBaFw0yNDA1MTkxMjQzMTlaMBMxETAPBgNVBAMT
CG1haWx1LmlvMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA4XEW8iHD
8G3Uj6vB2j5/VgH3PVlAI4vy2a89HBoq48SvTWahxjLDrtQr2fx0eh2AeiZJ1umM
4ezVy1KuN6mPrYOPgFb7PkpYedoezfBSo2IhcKxSUxB11BM8Y2BzU9P+HnMXgtXz
odKWwfLVPlMdLFjUuN9f5ycW7x/poHB5Jqxr0hfMD1uyhFidsfV/hTR8Vee6sdEt
TdOcOt2EFQjuVL2031eRrErZmCvTaDvm1ghPNhBz2Ej0F22xc7RPhx0cXAyQjxcr
nsX44G5/eu8C4bOwpMbL7JIFam4WKcMcZj0TinKLHKJLXYXzC3rQPuRhq6enqNQq
U1449pPjWi6bzcGgSE9Hy31ua7szSXvM0wPQ8wmF3G6w2eOfFixW1OlgHpeMHUYX
D8EypFkoXZnmQYcQSQDcFtuCF4sM7Hg1WXmViGKwBfs3VnebQOhIokoG8UYyIKHU
zsL4tpyvU5wmtdLOheaOUIa/Fn/zoXUei1jRxgZ4HZV0Db+6cNLBj6mgwUSmn5sF
YKLMkCAI+h5r4Zq3xa9j58xX8D1Aan1tMvcYOw9/6HtwLpsAnAOTXa8vbsKTApW0
14iIpaiKiSPBnmpVYv5rnwYE8tz7A8jK1okU7s3AKpPxeTdkhMGoTvl7faXUb9+5
sVodR5AAawFUivNrk89jEYiawf9V+mCZGcUCAwEAAaOCAikwggIlMA4GA1UdDwEB
/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDAYDVR0TAQH/
BAIwADAdBgNVHQ4EFgQU2ueOB46OgwMwhYgrQ+skLTQIxI0wHwYDVR0jBBgwFoAU
FC6zF7dYVsuuUAlA5h+vnYsUwsYwVQYIKwYBBQUHAQEESTBHMCEGCCsGAQUFBzAB
hhVodHRwOi8vcjMuby5sZW5jci5vcmcwIgYIKwYBBQUHMAKGFmh0dHA6Ly9yMy5p
LmxlbmNyLm9yZy8wMgYDVR0RBCswKYIIbWFpbHUuaW+CDnNldHVwLm1haWx1Lmlv
gg10ZXN0Lm1haWx1LmlvMBMGA1UdIAQMMAowCAYGZ4EMAQIBMIIBBAYKKwYBBAHW
eQIEAgSB9QSB8gDwAHYASLDja9qmRzQP5WoC+p0w6xxSActW3SyB2bu/qznYhHMA
AAGNwZtckQAABAMARzBFAiEAhA0n43Pk/VqxL9XKF3Vob1bExuyBm4HORBPIeV9y
SNwCIDOe93jpDgUPOUFIs4wHUxFPOLzDxp+Ihlc6Zrk8YwroAHYAouK/1h7eLy8H
oNZObTen3GVDsMa1LqLat4r4mm31F9gAAAGNwZtclAAABAMARzBFAiEAqp62nMR4
9lcdOfQYKyPuehqX9uYLRD0rjf39okQSdmsCICs7IB+sCMgsRVlMASg2d2XiC7Q4
3acid1F68YDn71UWMA0GCSqGSIb3DQEBCwUAA4IBAQBgZrlx36RaEXBf/MynUR9Z
Viaxd0OYznENBjdVJh44i3YAHESaySs40j39NpK3DsN0yiB7fLBkuyflu23hxdjf
grRleBdg47jZPzv8ElM9yY/cAHrQje4PM4+LNC7zz0G9KaBPg5ze+kxMqmxLA6fW
yRc9niOzm0xmsRyK4Lyt2xYhEIodNESiMZiVjUnIfMeZeDtwOsDt5XKWddmsng/n
xGImgilsxeyJQ8FFG2yUKka3H3+wic5RAsI6eFJHZwN1aA5o9qen4doMWbOyx8f9
c19qos53h0aM+LfBdF85G+sdlSkZMqSySsBva+JfzDjzzJv77uKCs9QK2owVtNjc
-----END CERTIFICATE-----
1 s:C = US, O = Let's Encrypt, CN = R3
i:C = US, O = Internet Security Research Group, CN = ISRG Root X1
a:PKEY: rsaEncryption, 2048 (bit); sigalg: RSA-SHA256
v:NotBefore: Sep 4 00:00:00 2020 GMT; NotAfter: Sep 15 16:00:00 2025 GMT
-----BEGIN CERTIFICATE-----
MIIFFjCCAv6gAwIBAgIRAJErCErPDBinU/bWLiWnX1owDQYJKoZIhvcNAQELBQAw
TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMjAwOTA0MDAwMDAw
WhcNMjUwOTE1MTYwMDAwWjAyMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNTGV0J3Mg
RW5jcnlwdDELMAkGA1UEAxMCUjMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
AoIBAQC7AhUozPaglNMPEuyNVZLD+ILxmaZ6QoinXSaqtSu5xUyxr45r+XXIo9cP
R5QUVTVXjJ6oojkZ9YI8QqlObvU7wy7bjcCwXPNZOOftz2nwWgsbvsCUJCWH+jdx
sxPnHKzhm+/b5DtFUkWWqcFTzjTIUu61ru2P3mBw4qVUq7ZtDpelQDRrK9O8Zutm
NHz6a4uPVymZ+DAXXbpyb/uBxa3Shlg9F8fnCbvxK/eG3MHacV3URuPMrSXBiLxg
Z3Vms/EY96Jc5lP/Ooi2R6X/ExjqmAl3P51T+c8B5fWmcBcUr2Ok/5mzk53cU6cG
/kiFHaFpriV1uxPMUgP17VGhi9sVAgMBAAGjggEIMIIBBDAOBgNVHQ8BAf8EBAMC
AYYwHQYDVR0lBBYwFAYIKwYBBQUHAwIGCCsGAQUFBwMBMBIGA1UdEwEB/wQIMAYB
Af8CAQAwHQYDVR0OBBYEFBQusxe3WFbLrlAJQOYfr52LFMLGMB8GA1UdIwQYMBaA
FHm0WeZ7tuXkAXOACIjIGlj26ZtuMDIGCCsGAQUFBwEBBCYwJDAiBggrBgEFBQcw
AoYWaHR0cDovL3gxLmkubGVuY3Iub3JnLzAnBgNVHR8EIDAeMBygGqAYhhZodHRw
Oi8veDEuYy5sZW5jci5vcmcvMCIGA1UdIAQbMBkwCAYGZ4EMAQIBMA0GCysGAQQB
gt8TAQEBMA0GCSqGSIb3DQEBCwUAA4ICAQCFyk5HPqP3hUSFvNVneLKYY611TR6W
PTNlclQtgaDqw+34IL9fzLdwALduO/ZelN7kIJ+m74uyA+eitRY8kc607TkC53wl
ikfmZW4/RvTZ8M6UK+5UzhK8jCdLuMGYL6KvzXGRSgi3yLgjewQtCPkIVz6D2QQz
CkcheAmCJ8MqyJu5zlzyZMjAvnnAT45tRAxekrsu94sQ4egdRCnbWSDtY7kh+BIm
lJNXoB1lBMEKIq4QDUOXoRgffuDghje1WrG9ML+Hbisq/yFOGwXD9RiX8F6sw6W4
avAuvDszue5L3sz85K+EC4Y/wFVDNvZo4TYXao6Z0f+lQKc0t8DQYzk1OXVu8rp2
yJMC6alLbBfODALZvYH7n7do1AZls4I9d1P4jnkDrQoxB3UqQ9hVl3LEKQ73xF1O
yK5GhDDX8oVfGKF5u+decIsH4YaTw7mP3GFxJSqv3+0lUFJoi5Lc5da149p90Ids
hCExroL1+7mryIkXPeFM5TgO9r0rvZaBFOvV2z0gp35Z0+L4WPlbuEjN/lxPFin+
HlUjr8gRsI3qfJOQFy/9rKIJR0Y/8Omwt/8oTWgy1mdeHmmjk7j1nYsvC9JSQ6Zv
MldlTTKB3zhThV1+XWYp6rjd5JW1zbVWEkLNxE7GJThEUG3szgBVGP7pSWTUTsqX
nLRbwHOoq7hHwg==
-----END CERTIFICATE-----
---
Server certificate
subject=CN = mailu.io
issuer=C = US, O = Let's Encrypt, CN = R3
---
No client certificate CA names sent
Peer signing digest: SHA256
Peer signature type: RSA-PSS
Server Temp Key: X25519, 253 bits
---
SSL handshake has read 3837 bytes and written 428 bytes
Verification: OK
---
New, TLSv1.3, Cipher is TLS_AES_256_GCM_SHA384
Server public key is 4096 bit
Secure Renegotiation IS NOT supported
Compression: NONE
Expansion: NONE
No ALPN negotiated
Early data was not sent
Verify return code: 0 (ok)
---
250 STARTTLS
DONE
```
## Observed behaviour
Only 2 certificates are returned :
1. CN = mailu.io
2. C = US, O = Let's Encrypt, CN = R3
## Expected behaviour
Expect the full chain, including root (3 certificates)
1. CN = mailu.io
2. C = US, O = Let's Encrypt, CN = R3
3. C = US, O = Internet Security Research Group, CN = ISRG Root X1
## Suggestion ?
- https://github.com/Mailu/Mailu/commit/0816cb9497f31ca4ee71aebdee97b87e71870cc8 removed `add_DANE_pin` function, maybe should we restore this function ?
- BTW `/certs/letsencrypt/live/mailu/nginx-chain.pem` seems to not be used anywhere ? , at least nginx.conf refers to `fullchain`, not `nginx-chain` files
| 2024-03-10T11:07:13 |
||
webkom/lego | 24 | webkom__lego-24 | [
"23"
] | 414ed6af1d422ae86ec2c6b0ecf9afe1a033c371 | diff --git a/lego/settings/base.py b/lego/settings/base.py
--- a/lego/settings/base.py
+++ b/lego/settings/base.py
@@ -6,6 +6,8 @@
TESTING = 'test' in sys.argv # Check if manage.py test has been run
+SHELL_PLUS = "ipython"
+
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
@@ -20,6 +22,8 @@
'django.contrib.messages',
'django.contrib.staticfiles',
+ 'django_extensions',
+
'oauth2_provider',
'rest_framework',
| Add shell pluss and django extensions
| :+1:
| 2014-10-31T13:47:04 |
|
webkom/lego | 894 | webkom__lego-894 | [
"890"
] | 75f2011cbb996b365135cd888b9d2e0b38eceb06 | diff --git a/lego/apps/events/models.py b/lego/apps/events/models.py
--- a/lego/apps/events/models.py
+++ b/lego/apps/events/models.py
@@ -74,7 +74,7 @@ def save(self, *args, **kwargs):
pool.save(update_fields=['counter'])
return super().save(*args, **kwargs)
- def admin_register(self, user, pool, admin_reason, feedback=''):
+ def admin_register(self, user, admin_reason, pool=None, feedback=''):
"""
Used to force registration for a user, even if the event is full
or if the user isn't allowed to register.
@@ -84,25 +84,25 @@ def admin_register(self, user, pool, admin_reason, feedback=''):
:param feedback: Feedback to organizers
:return: The registration
"""
- if self.pools.filter(id=pool.id).exists():
- with transaction.atomic():
- reg = self.registrations.update_or_create(
- event=self,
- user=user,
- defaults={'pool': pool,
- 'feedback': feedback,
- 'registration_date': timezone.now(),
- 'unregistration_date': None,
- 'status': constants.SUCCESS_REGISTER,
- 'admin_reason': admin_reason}
- )[0]
+ if pool and not self.pools.filter(id=pool.id).exists():
+ raise NoSuchPool()
+ with transaction.atomic():
+ reg = self.registrations.update_or_create(
+ event=self,
+ user=user,
+ defaults={'pool': pool,
+ 'feedback': feedback,
+ 'registration_date': timezone.now(),
+ 'unregistration_date': None,
+ 'status': constants.SUCCESS_REGISTER,
+ 'admin_reason': admin_reason}
+ )[0]
+ if pool:
locked_pool = Pool.objects.select_for_update().get(pk=pool.id)
locked_pool.increment()
- get_handler(Registration).handle_admin_registration(reg)
- return reg
- else:
- raise NoSuchPool()
+ get_handler(Registration).handle_admin_registration(reg)
+ return reg
def get_absolute_url(self):
return f'{settings.FRONTEND_URL}/events/{self.id}/'
diff --git a/lego/apps/events/serializers/registrations.py b/lego/apps/events/serializers/registrations.py
--- a/lego/apps/events/serializers/registrations.py
+++ b/lego/apps/events/serializers/registrations.py
@@ -13,7 +13,7 @@
class AdminRegistrationCreateAndUpdateSerializer(serializers.Serializer):
user = PrimaryKeyRelatedFieldNoPKOpt(queryset=User.objects.all())
- pool = PrimaryKeyRelatedFieldNoPKOpt(queryset=Pool.objects.all())
+ pool = PrimaryKeyRelatedFieldNoPKOpt(queryset=Pool.objects.all(), required=False)
feedback = serializers.CharField(required=False)
admin_reason = serializers.CharField(required=True)
| diff --git a/lego/apps/events/tests/test_admin_registrations.py b/lego/apps/events/tests/test_admin_registrations.py
--- a/lego/apps/events/tests/test_admin_registrations.py
+++ b/lego/apps/events/tests/test_admin_registrations.py
@@ -28,7 +28,7 @@ def test_admin_registration(self):
no_of_regs_before = event.number_of_registrations
pool_no_of_regs_before = pool.registrations.count()
- event.admin_register(user, pool, admin_reason='test')
+ event.admin_register(user, admin_reason='test', pool=pool)
self.assertEqual(event.number_of_registrations, no_of_regs_before + 1)
self.assertEqual(pool.registrations.count(), pool_no_of_regs_before + 1)
@@ -44,7 +44,7 @@ def test_ar_with_wrong_pool(self):
pool_no_of_regs_before = wrong_pool.registrations.count()
with self.assertRaises(ValueError):
- event_one.admin_register(user, wrong_pool, admin_reason='test')
+ event_one.admin_register(user, admin_reason='test', pool=wrong_pool)
self.assertEqual(event_one.number_of_registrations, e1_no_of_regs_before)
self.assertEqual(event_two.number_of_registrations, e2_no_of_regs_before)
self.assertEqual(wrong_pool.registrations.count(), pool_no_of_regs_before)
@@ -59,7 +59,7 @@ def test_ar_without_permissions_for_user(self):
e1_no_of_regs_before = event.number_of_registrations
pool_no_of_regs_before = pool.registrations.count()
- event.admin_register(user, pool, admin_reason='test')
+ event.admin_register(user, admin_reason='test', pool=pool)
self.assertEqual(event.number_of_registrations, e1_no_of_regs_before+1)
self.assertEqual(pool.registrations.count(), pool_no_of_regs_before+1)
@@ -73,7 +73,7 @@ def test_ar_after_merge(self):
e1_no_of_regs_before = event.number_of_registrations
pool_no_of_regs_before = pool.registrations.count()
- event.admin_register(user, pool, admin_reason='test')
+ event.admin_register(user, admin_reason='test', pool=pool)
self.assertEqual(event.number_of_registrations, e1_no_of_regs_before+1)
self.assertEqual(pool.registrations.count(), pool_no_of_regs_before+1)
@@ -91,7 +91,7 @@ def test_ar_to_full_pool(self):
e1_no_of_regs_before = event.number_of_registrations
pool_no_of_regs_before = pool.registrations.count()
- event.admin_register(user, pool, admin_reason='test')
+ event.admin_register(user, admin_reason='test', pool=pool)
self.assertEqual(event.number_of_registrations, e1_no_of_regs_before+1)
self.assertEqual(pool.registrations.count(), pool_no_of_regs_before+1)
@@ -109,7 +109,7 @@ def test_ar_to_full_event(self):
e1_no_of_regs_before = event.number_of_registrations
pool_no_of_regs_before = pool.registrations.count()
- event.admin_register(user, pool, admin_reason='test')
+ event.admin_register(user, admin_reason='test', pool=pool)
self.assertEqual(event.number_of_registrations, e1_no_of_regs_before+1)
self.assertEqual(pool.registrations.count(), pool_no_of_regs_before+1)
@@ -122,6 +122,17 @@ def test_ar_twice(self):
e1_no_of_regs_before = event.number_of_registrations
- event.admin_register(user, pool, admin_reason='test')
- event.admin_register(user, pool, admin_reason='test')
+ event.admin_register(user, admin_reason='test', pool=pool)
+ event.admin_register(user, admin_reason='test', pool=pool)
self.assertEqual(event.number_of_registrations, e1_no_of_regs_before+1)
+
+ def test_ar_without_pool(self):
+ """Test that admin registration without pool puts the registration in the waiting list"""
+ event = Event.objects.get(title='POOLS_NO_REGISTRATIONS')
+ user = get_dummy_users(1)[0]
+ AbakusGroup.objects.get(name='Abakus').add_user(user)
+
+ waiting_regs_before = event.waiting_registrations.count()
+
+ event.admin_register(user, admin_reason='test')
+ self.assertEqual(event.waiting_registrations.count(), waiting_regs_before+1)
diff --git a/lego/apps/events/tests/test_events_api.py b/lego/apps/events/tests/test_events_api.py
--- a/lego/apps/events/tests/test_events_api.py
+++ b/lego/apps/events/tests/test_events_api.py
@@ -654,6 +654,19 @@ def test_without_admin_reason(self):
self.assertEqual(registration_response.status_code, 400)
+ def test_ar_to_waiting_list(self):
+ AbakusGroup.objects.get(name='Webkom').add_user(self.request_user)
+ self.client.force_authenticate(self.request_user)
+ self.assertEqual(self.event.waiting_registrations.count(), 0)
+
+ registration_response = self.client.post(
+ f'{_get_registrations_list_url(self.event.id)}admin_register/',
+ {'user': self.user.id, 'admin_reason': 'test'}
+ )
+
+ self.assertEqual(registration_response.status_code, 201)
+ self.assertEqual(self.event.waiting_registrations.count(), 1)
+
@skipIf(not stripe.api_key, 'No API Key set. Set STRIPE_TEST_KEY in ENV to run test.')
class StripePaymentTestCase(APITestCase):
diff --git a/lego/apps/events/tests/test_registrations.py b/lego/apps/events/tests/test_registrations.py
--- a/lego/apps/events/tests/test_registrations.py
+++ b/lego/apps/events/tests/test_registrations.py
@@ -473,10 +473,10 @@ def test_unregistering_and_bumping_post_merge(self):
for user in abakus_users:
AbakusGroup.objects.get(name='Abakus').add_user(user)
- event.admin_register(user, pool_one, admin_reason='test')
+ event.admin_register(user, pool=pool_one, admin_reason='test')
for user in webkom_users:
AbakusGroup.objects.get(name='Webkom').add_user(user)
- event.admin_register(user, pool_two, admin_reason='test')
+ event.admin_register(user, pool=pool_two, admin_reason='test')
AbakusGroup.objects.get(name='Abakus').add_user(users[5])
registration = Registration.objects.get_or_create(event=event, user=users[5])[0]
| Admin registration to waiting list
I think we should support admin register to waiting list
| 2017-10-24T21:02:57 |
|
webkom/lego | 903 | webkom__lego-903 | [
"897"
] | 75f2011cbb996b365135cd888b9d2e0b38eceb06 | diff --git a/lego/apps/companies/serializers.py b/lego/apps/companies/serializers.py
--- a/lego/apps/companies/serializers.py
+++ b/lego/apps/companies/serializers.py
@@ -4,6 +4,7 @@
from lego.apps.comments.serializers import CommentSerializer
from lego.apps.companies.models import (Company, CompanyContact, CompanyFile, CompanyInterest,
Semester, SemesterStatus)
+from lego.apps.feed.registry import get_handler
from lego.apps.files.fields import FileField, ImageField
from lego.apps.users.fields import PublicUserField
from lego.apps.users.models import User
@@ -126,6 +127,15 @@ class Meta:
fields = ('id', 'company_name', 'contact_person', 'mail', 'semesters', 'events',
'other_offers', 'comment')
+ def create(self, validated_data):
+ semesters = validated_data.pop('semesters')
+ company_interest = CompanyInterest.objects.create(**validated_data)
+ company_interest.semesters.add(*semesters)
+ company_interest.save()
+ get_handler(CompanyInterest).handle_interest(company_interest)
+
+ return company_interest
+
class CompanyInterestListSerializer(serializers.ModelSerializer):
class Meta:
diff --git a/lego/apps/feed/feed_handlers/company_interest_handler.py b/lego/apps/feed/feed_handlers/company_interest_handler.py
--- a/lego/apps/feed/feed_handlers/company_interest_handler.py
+++ b/lego/apps/feed/feed_handlers/company_interest_handler.py
@@ -14,7 +14,7 @@ class CompanyInterestHandler(BaseHandler):
model = CompanyInterest
manager = feed_manager
- def handle_create(self, company_interest):
+ def handle_interest(self, company_interest):
activity = Activity(
actor=company_interest,
@@ -38,6 +38,9 @@ def handle_create(self, company_interest):
)
notification.notify()
+ def handle_create(self, company_interest):
+ pass
+
def handle_update(self, company_interest):
pass
| Semesters are missing from interest form e-mails
It seems that the signal to the feed handler is triggered before the semesters are stored on the model? The semesters show up when viewing the interest later, but are not present when the email is generated. Could this have happened after the changes to semesters, @odinuge?
A quick fix would be to send the email during creation, instead of in the feed handler.
| The reason is that many-to-many relations (eg. semesters in companyInterest) are created after the `post_save` hook that is use to send notifications via the feed. It shouldn't be any problem to use the `m2m-changed` signal instead (fired when m2m-relations are saved/changed) - but it would require some changes in the general feed handling flow.
https://docs.djangoproject.com/en/dev/ref/signals/#m2m-changed
Why did this work previously then? :thinking: | 2017-10-26T17:23:11 |
|
webkom/lego | 1,069 | webkom__lego-1069 | [
"1040"
] | 586bbab04278640cc79abc05bd8e06237c514338 | diff --git a/lego/apps/feed/feed_handlers/penalty_handler.py b/lego/apps/feed/feed_handlers/penalty_handler.py
--- a/lego/apps/feed/feed_handlers/penalty_handler.py
+++ b/lego/apps/feed/feed_handlers/penalty_handler.py
@@ -18,7 +18,6 @@ def get_activity(self, penalty):
time=penalty.created_at, extra_context={
'reason': penalty.reason,
'weight': penalty.weight,
- 'total': penalty.user.number_of_penalties()
}
)
diff --git a/lego/apps/users/notifications.py b/lego/apps/users/notifications.py
--- a/lego/apps/users/notifications.py
+++ b/lego/apps/users/notifications.py
@@ -16,7 +16,6 @@ def generate_mail(self):
'weight': penalty.weight,
'event': penalty.source_event.title,
'reason': penalty.reason,
- 'total': self.user.number_of_penalties()
},
subject=f'Du har fΓ₯tt en ny prikk',
plain_template='users/email/penalty.txt',
| diff --git a/lego/apps/feed/tests/test_penalty_handler.py b/lego/apps/feed/tests/test_penalty_handler.py
--- a/lego/apps/feed/tests/test_penalty_handler.py
+++ b/lego/apps/feed/tests/test_penalty_handler.py
@@ -36,7 +36,5 @@ def test_extra_content(self):
activity = self.all_activities(NotificationFeed(self.user.id))[0]
self.assertIn('reason', activity.extra_context)
self.assertIn('weight', activity.extra_context)
- self.assertIn('total', activity.extra_context)
self.assertEqual(activity.extra_context['reason'], self.penalty.reason)
self.assertEqual(activity.extra_context['weight'], self.penalty.weight)
- self.assertEqual(activity.extra_context['total'], self.user.number_of_penalties())
| Wrong penalty count in email
The counter in the penalty email is still wrong:

| Unable to reproduce locally, but happens consistently in prod :thinking: | 2018-02-13T19:51:15 |
webkom/lego | 1,092 | webkom__lego-1092 | [
"908"
] | c8fb115095dcb648fb9e7f7fec9c1f52a5d86d31 | diff --git a/lego/apps/meetings/serializers.py b/lego/apps/meetings/serializers.py
--- a/lego/apps/meetings/serializers.py
+++ b/lego/apps/meetings/serializers.py
@@ -45,7 +45,7 @@ class MeetingBulkInvite(serializers.Serializer):
)
-class MeetingSerializer(BasisModelSerializer):
+class MeetingDetailSerializer(BasisModelSerializer):
invitations = MeetingInvitationSerializer(many=True, read_only=True)
report = ContentSerializerField()
report_author = PublicUserField(queryset=User.objects.all(), allow_null=True, required=False)
@@ -63,3 +63,14 @@ def create(self, validated_data):
owner = validated_data['current_user']
meeting.invite_user(owner, owner)
return meeting
+
+
+class MeetingListSerializer(BasisModelSerializer):
+ report_author = PublicUserField(queryset=User.objects.all(), allow_null=True, required=False)
+ created_by = PublicUserField(read_only=True)
+
+ class Meta:
+ model = Meeting
+ fields = (
+ 'id', 'created_by', 'title', 'location', 'start_time', 'end_time', 'report_author'
+ )
diff --git a/lego/apps/meetings/views.py b/lego/apps/meetings/views.py
--- a/lego/apps/meetings/views.py
+++ b/lego/apps/meetings/views.py
@@ -6,8 +6,8 @@
from lego.apps.meetings.filters import MeetingFilterSet
from lego.apps.meetings.models import Meeting, MeetingInvitation
from lego.apps.meetings.serializers import (
- MeetingBulkInvite, MeetingGroupInvite, MeetingInvitationSerializer,
- MeetingInvitationUpdateSerializer, MeetingSerializer, MeetingUserInvite
+ MeetingBulkInvite, MeetingDetailSerializer, MeetingGroupInvite, MeetingInvitationSerializer,
+ MeetingInvitationUpdateSerializer, MeetingListSerializer, MeetingUserInvite
)
from lego.apps.permissions.api.views import AllowedPermissionsMixin
from lego.apps.permissions.utils import get_permission_handler
@@ -16,7 +16,7 @@
class MeetingViewSet(AllowedPermissionsMixin, viewsets.ModelViewSet):
filter_class = MeetingFilterSet
- serializer_class = MeetingSerializer
+ serializer_class = MeetingDetailSerializer
def get_queryset(self):
permission_handler = get_permission_handler(Meeting)
@@ -30,6 +30,11 @@ def get_ordering(self):
return ordering
return 'start_time'
+ def get_serializer_class(self):
+ if self.action == 'list':
+ return MeetingListSerializer
+ return super().get_serializer_class()
+
@decorators.detail_route(methods=['POST'], serializer_class=MeetingUserInvite)
def invite_user(self, request, *args, **kwargs):
meeting = self.get_object()
diff --git a/lego/apps/notifications/serializers.py b/lego/apps/notifications/serializers.py
--- a/lego/apps/notifications/serializers.py
+++ b/lego/apps/notifications/serializers.py
@@ -1,7 +1,7 @@
from rest_framework import serializers
from lego.apps.events.serializers.events import EventReadSerializer
-from lego.apps.meetings.serializers import MeetingSerializer
+from lego.apps.meetings.serializers import MeetingDetailSerializer
from lego.apps.users.serializers.abakus_groups import PublicAbakusGroupSerializer
from lego.apps.users.serializers.users import PublicUserSerializer
from lego.utils.serializers import BasisModelSerializer
@@ -31,7 +31,7 @@ class AnnouncementListSerializer(BasisModelSerializer):
users = PublicUserSerializer(many=True, read_only=True)
groups = PublicAbakusGroupSerializer(many=True, read_only=True)
events = EventReadSerializer(many=True, read_only=True)
- meetings = MeetingSerializer(many=True, read_only=True)
+ meetings = MeetingDetailSerializer(many=True, read_only=True)
class Meta:
model = Announcement
| Meetings contains the report property at the list endpoint
| 2018-02-19T20:45:37 |
||
webkom/lego | 1,113 | webkom__lego-1113 | [
"981"
] | 397f6a7118a5441747c6b582292bc504d5fc427d | diff --git a/lego/apps/events/tasks.py b/lego/apps/events/tasks.py
--- a/lego/apps/events/tasks.py
+++ b/lego/apps/events/tasks.py
@@ -127,8 +127,9 @@ def async_payment(self, registration_id, token, logger_context=None):
amount=event.get_price(self.registration.user), currency='NOK', source=token,
description=event.slug, metadata={
'EVENT_ID': event.id,
+ 'USER_ID': self.registration.user.id,
'USER': self.registration.user.full_name,
- 'EMAIL': self.registration.user.email
+ 'EMAIL': self.registration.user.email,
}
)
log.info('stripe_payment_success', registration_id=self.registration.id)
| Add user id to metadata related to all data/actions in stripe
| 2018-02-23T17:09:10 |
||
webkom/lego | 1,233 | webkom__lego-1233 | [
"913"
] | f6c0a9e5759017ef19638a37419914b0fb70eafd | diff --git a/lego/apps/joblistings/filters.py b/lego/apps/joblistings/filters.py
new file mode 100644
--- /dev/null
+++ b/lego/apps/joblistings/filters.py
@@ -0,0 +1,9 @@
+from django_filters import FilterSet
+
+from lego.apps.joblistings.models import Joblisting
+
+
+class JoblistingFilterSet(FilterSet):
+ class Meta:
+ model = Joblisting
+ fields = ('company', )
diff --git a/lego/apps/joblistings/views.py b/lego/apps/joblistings/views.py
--- a/lego/apps/joblistings/views.py
+++ b/lego/apps/joblistings/views.py
@@ -1,6 +1,7 @@
from django.utils import timezone
from rest_framework import viewsets
+from lego.apps.joblistings.filters import JoblistingFilterSet
from lego.apps.joblistings.models import Joblisting
from lego.apps.joblistings.serializer import (
JoblistingCreateAndUpdateSerializer, JoblistingDetailedSerializer, JoblistingSerializer
@@ -10,6 +11,7 @@
class JoblistingViewSet(AllowedPermissionsMixin, viewsets.ModelViewSet):
pagination_class = None
+ filter_class = JoblistingFilterSet
def get_serializer_class(self):
if self.action in ['create', 'update', 'partial_update']:
| Create filter by company in joblistings
Currently `https://apiserver/api/v1/joblistings/?company=id` returns all active joblistings
| 2018-06-14T20:21:49 |
||
webkom/lego | 1,279 | webkom__lego-1279 | [
"1275"
] | d47da77683fc01ae6ec59f42bc7ebc50c23e871c | diff --git a/lego/apps/email/fields.py b/lego/apps/email/fields.py
--- a/lego/apps/email/fields.py
+++ b/lego/apps/email/fields.py
@@ -20,6 +20,7 @@ def to_internal_value(self, data):
Create email if not exists.
"""
try:
+ data = data.lower()
email_address, _ = self.get_queryset().get_or_create(pk=data)
return email_address
except ObjectDoesNotExist:
diff --git a/lego/apps/email/validators.py b/lego/apps/email/validators.py
--- a/lego/apps/email/validators.py
+++ b/lego/apps/email/validators.py
@@ -5,6 +5,8 @@
def validate_email_address(email_address):
+ if email_address.email != email_address.email.lower():
+ raise ValidationError('Email is not lowercased')
if email_address.is_assigned():
raise ValidationError('The address is already assigned')
| diff --git a/lego/apps/email/tests/test_views.py b/lego/apps/email/tests/test_views.py
--- a/lego/apps/email/tests/test_views.py
+++ b/lego/apps/email/tests/test_views.py
@@ -146,6 +146,19 @@ def test_set_address_on_new_user(self):
)
self.assertEquals(status.HTTP_201_CREATED, response.status_code)
+ def test_set_address_on_capitalized_internal_email(self):
+ """Set an address that is capitalized to make sure it is lowercased in input sanitation"""
+ response = self.client.post(
+ self.url, {
+ 'user': 2,
+ 'internal_email': 'TestEmail123',
+ 'internal_email_enabled': True,
+ }
+ )
+ self.assertEquals(status.HTTP_201_CREATED, response.status_code)
+ self.assertEquals('testemail123', response.json()['internalEmail'])
+ self.assertEquals('testemail123', User.objects.get(pk=2).internal_email.email)
+
def test_set_address_to_assigned(self):
"""Not possible to set an assigned email"""
response = self.client.post(
diff --git a/lego/apps/users/tests/test_registration_api.py b/lego/apps/users/tests/test_registration_api.py
--- a/lego/apps/users/tests/test_registration_api.py
+++ b/lego/apps/users/tests/test_registration_api.py
@@ -36,6 +36,15 @@ def test_with_valid_token(self):
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data.get('email'), '[email protected]')
+ def test_with_valid_token_and_capitalized_email(self):
+ response = self.client.get(
+ _get_registration_token_url(
+ Registrations.generate_registration_token('[email protected]')
+ )
+ )
+ self.assertEqual(response.status_code, 200)
+ self.assertEqual(response.data.get('email'), '[email protected]')
+
class CreateRegistrationAPITestCase(BaseAPITestCase):
@@ -57,6 +66,16 @@ def test_with_invalid_email(self, *args):
)
self.assertEqual(response.status_code, 400)
+ @mock.patch('lego.apps.users.serializers.registration.verify_captcha', return_value=True)
+ def test_with_capitalized_email(self, mock_verify_captcha):
+ response = self.client.post(
+ _get_list_url(), {
+ 'email': '[email protected]',
+ 'captcha_response': 'testCaptcha'
+ }
+ )
+ self.assertEqual(response.status_code, 202)
+
@mock.patch('lego.apps.users.serializers.registration.verify_captcha', return_value=False)
def test_with_invalid_captcha(self, *args):
response = self.client.post(_get_list_url(), self._test_registration_data)
| EmailAddress case sensitivity
When creating a email address for a user (for gsuite sync) that includes capital letters, the user will be suspended. All input should be lowercased, since that is ehat google is doing.
| 2018-09-27T14:34:58 |
|
webkom/lego | 1,292 | webkom__lego-1292 | [
"1257"
] | bb6b45a047dfcc0f8fdf49386c17483a0a51944a | diff --git a/lego/apps/contact/send.py b/lego/apps/contact/send.py
--- a/lego/apps/contact/send.py
+++ b/lego/apps/contact/send.py
@@ -9,14 +9,12 @@ def send_message(title, message, user, anonymous):
"""
anonymous = anonymous if user.is_authenticated else True
abakus_group = AbakusGroup.objects.get(name='Hovedstyret')
- users = [membership.user for membership in abakus_group.memberships.select_related('user')]
- emails = [user.email_address for user in users]
from_name = 'Anonymous' if anonymous else user.full_name
from_email = 'Unknown' if anonymous else user.email_address
send_email.delay(
- to_email=emails, context={
+ to_email=abakus_group.contact_email, context={
'title': title,
'message': message,
'from_name': from_name,
diff --git a/lego/apps/users/fixtures/initial_abakus_groups.py b/lego/apps/users/fixtures/initial_abakus_groups.py
--- a/lego/apps/users/fixtures/initial_abakus_groups.py
+++ b/lego/apps/users/fixtures/initial_abakus_groups.py
@@ -108,11 +108,13 @@
'text': 'hei'
}, {}
],
- 'Hovedstyret':
- [{
- 'logo_id': 'abakus_hs.png',
- 'permissions': ['/sudo/admin/'],
- }, {}]
+ 'Hovedstyret': [
+ {
+ 'logo_id': 'abakus_hs.png',
+ 'permissions': ['/sudo/admin/'],
+ 'contact_email': "[email protected]"
+ }, {}
+ ]
}
],
'Interessegrupper':
| diff --git a/lego/apps/contact/tests/test_send.py b/lego/apps/contact/tests/test_send.py
--- a/lego/apps/contact/tests/test_send.py
+++ b/lego/apps/contact/tests/test_send.py
@@ -6,6 +6,13 @@
from lego.apps.users.models import User
from lego.utils.test_utils import BaseTestCase
+default_values = {
+ 'from_email': None,
+ 'html_template': 'contact/email/contact_form.html',
+ 'plain_template': 'contact/email/contact_form.txt',
+ 'subject': 'Ny henvendelse fra kontaktskjemaet'
+}
+
class SendTestCase(BaseTestCase):
@@ -16,15 +23,72 @@ class SendTestCase(BaseTestCase):
@mock.patch('lego.apps.contact.send.send_email.delay')
def test_send_anonymous(self, mock_send_email):
- send_message('title', 'message', AnonymousUser(), True)
+ """
+ Send in a contact form as not logged in user, set to be anonymous
+ """
+ anonymus_user = AnonymousUser()
+
+ send_message('title', 'message', anonymus_user, True)
+ mock_send_email.assert_called_with(
+ to_email="[email protected]", context={
+ 'title': 'title',
+ 'message': 'message',
+ 'from_name': "Anonymous",
+ 'from_email': "Unknown"
+ }, **default_values
+ )
mock_send_email.assert_called_once()
@mock.patch('lego.apps.contact.send.send_email.delay')
def test_send_anonymous_user(self, mock_send_email):
- send_message('title', 'message', AnonymousUser(), False)
+ """
+ Send in a contact form as not logged in user
+ """
+ anonymus_user = AnonymousUser()
+
+ send_message('title', 'message', anonymus_user, False)
+ mock_send_email.assert_called_with(
+ to_email="[email protected]", context={
+ 'title': 'title',
+ 'message': 'message',
+ 'from_name': "Anonymous",
+ 'from_email': "Unknown"
+ }, **default_values
+ )
mock_send_email.assert_called_once()
@mock.patch('lego.apps.contact.send.send_email.delay')
def test_send_user(self, mock_send_email):
- send_message('title', 'message', User.objects.first(), False)
+ """
+ Send in a contact form as logged in user, showing name
+ """
+ logged_in_user = User.objects.first()
+
+ send_message('title', 'message', logged_in_user, False)
+ mock_send_email.assert_called_with(
+ to_email="[email protected]", context={
+ 'title': 'title',
+ 'message': 'message',
+ 'from_name': logged_in_user.full_name,
+ 'from_email': logged_in_user.email_address
+ }, **default_values
+ )
+ mock_send_email.assert_called_once()
+
+ @mock.patch('lego.apps.contact.send.send_email.delay')
+ def test_send_user_set_anonymous(self, mock_send_email):
+ """
+ Send in a contact form as logged in user, set to be anonymous
+ """
+ logged_in_user = User.objects.first()
+
+ send_message('title', 'message', logged_in_user, True)
+ mock_send_email.assert_called_with(
+ to_email="[email protected]", context={
+ 'title': 'title',
+ 'message': 'message',
+ 'from_name': "Anonymous",
+ 'from_email': "Unknown"
+ }, **default_values
+ )
mock_send_email.assert_called_once()
| Make contact form send to mail list for HS, instead of individual members
The form currently fetches all members of the HS group, and sends the message to each individual address. This makes it cumbersome to discuss the message for HS, it would be better to send it to their mailing list instead.
| 2018-10-17T19:45:31 |
|
webkom/lego | 1,321 | webkom__lego-1321 | [
"1367"
] | 5c2ace0d42aafcd3e3303f1311a88f0eddb03217 | diff --git a/lego/apps/events/models.py b/lego/apps/events/models.py
--- a/lego/apps/events/models.py
+++ b/lego/apps/events/models.py
@@ -443,6 +443,8 @@ def rebalance_pool(self, from_pool, to_pool):
to_pool_permissions = to_pool.permission_groups.all()
bumped = False
for old_registration in self.registrations.filter(pool=from_pool):
+ if to_pool.is_full:
+ break
moveable = False
user_groups = old_registration.user.all_groups
for group in to_pool_permissions:
| diff --git a/lego/apps/events/tests/test_registrations.py b/lego/apps/events/tests/test_registrations.py
--- a/lego/apps/events/tests/test_registrations.py
+++ b/lego/apps/events/tests/test_registrations.py
@@ -799,6 +799,39 @@ def test_rebalance_pool_method(self):
self.assertEqual(abakus_pool.registrations.count(), 1)
self.assertEqual(webkom_pool.registrations.count(), 2)
+ def test_rebalance_pool_method_should_not_overflow(self):
+ """Test rebalancing method by moving registered user's pool to fit waiting list user"""
+ event = Event.objects.get(title="POOLS_NO_REGISTRATIONS")
+ abakus_pool = event.pools.get(name="Abakusmember")
+ webkom_pool = event.pools.get(name="Webkom")
+ users = get_dummy_users(6)
+ for user in users:
+ AbakusGroup.objects.get(name="Abakus").add_user(user)
+
+ webkom_users = users[:3]
+ abakus_users = users[3:]
+
+ for user in webkom_users:
+ registration = Registration.objects.get_or_create(event=event, user=user)[0]
+ event.register(registration)
+
+ self.assertEqual(abakus_pool.registrations.count(), 3)
+ self.assertEqual(webkom_pool.registrations.count(), 0)
+
+ for user in abakus_users:
+ registration = Registration.objects.get_or_create(event=event, user=user)[0]
+ event.register(registration)
+
+ self.assertEqual(abakus_pool.registrations.count(), 3)
+ self.assertEqual(webkom_pool.registrations.count(), 0)
+
+ for user in webkom_users:
+ AbakusGroup.objects.get(name="Webkom").add_user(user)
+
+ event.bump_on_pool_creation_or_expansion()
+ self.assertEqual(abakus_pool.registrations.count(), 3) # Abakus-pool has size 3
+ self.assertEqual(webkom_pool.registrations.count(), 2) # Webkom-pool has size 2
+
def test_cant_register_after_event_has_started(self):
"""Test that a user cannot register after the event has started."""
event = Event.objects.get(title="POOLS_NO_REGISTRATIONS")
| Fix event registration pool overflow
Proof of concept here: https://github.com/webkom/lego/pull/1321/files
| 2018-11-10T00:25:42 |
|
webkom/lego | 1,467 | webkom__lego-1467 | [
"1399"
] | 3cf00ff3ff703c2b336d91d8c987f1f4b8819315 | diff --git a/lego/apps/events/serializers/events.py b/lego/apps/events/serializers/events.py
--- a/lego/apps/events/serializers/events.py
+++ b/lego/apps/events/serializers/events.py
@@ -145,6 +145,18 @@ def get_attended_count(self, event):
return event.registrations.filter(presence=PRESENT).count()
+class EventUserRegSerializer(EventReadSerializer):
+ user_reg = serializers.SerializerMethodField()
+
+ class Meta:
+ model = Event
+ fields = EventReadSerializer.Meta.fields + ("user_reg",)
+ read_only = True
+
+ def get_user_reg(self, event):
+ return RegistrationReadSerializer(event.user_reg[0]).data
+
+
class EventReadUserDetailedSerializer(EventReadDetailedSerializer):
""" User specfic event serializer that appends data based on request.user """
diff --git a/lego/apps/events/views.py b/lego/apps/events/views.py
--- a/lego/apps/events/views.py
+++ b/lego/apps/events/views.py
@@ -28,6 +28,7 @@
EventReadAuthUserDetailedSerializer,
EventReadSerializer,
EventReadUserDetailedSerializer,
+ EventUserRegSerializer,
populate_event_registration_users_with_grade,
)
from lego.apps.events.serializers.pools import PoolCreateAndUpdateSerializer
@@ -192,14 +193,33 @@ def payment(self, request, *args, **kwargs):
@decorators.action(
detail=False,
- serializer_class=EventReadSerializer,
+ serializer_class=EventUserRegSerializer,
permission_classes=[permissions.IsAuthenticated],
)
def upcoming(self, request):
- queryset = self.get_queryset().filter(
- registrations__status=constants.SUCCESS_REGISTER,
- registrations__user=request.user,
- start_time__gt=timezone.now(),
+ queryset = (
+ self.get_queryset()
+ .filter(
+ registrations__status=constants.SUCCESS_REGISTER,
+ registrations__user=request.user,
+ start_time__gt=timezone.now(),
+ )
+ .prefetch_related(
+ Prefetch(
+ "registrations",
+ queryset=Registration.objects.filter(
+ user=request.user
+ ).select_related("user", "pool"),
+ to_attr="user_reg",
+ ),
+ Prefetch(
+ "pools",
+ queryset=Pool.objects.filter(
+ permission_groups__in=self.request.user.all_groups
+ ),
+ to_attr="possible_pools",
+ ),
+ )
)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
| In profile page, split my upcoming events into wait list and confirmed spot
A little improvement
| 2019-02-25T19:27:41 |
||
webkom/lego | 1,476 | webkom__lego-1476 | [
"1323",
"1234"
] | 5c2ace0d42aafcd3e3303f1311a88f0eddb03217 | diff --git a/lego/apps/search/backends/elasticsearch.py b/lego/apps/search/backends/elasticsearch.py
--- a/lego/apps/search/backends/elasticsearch.py
+++ b/lego/apps/search/backends/elasticsearch.py
@@ -14,10 +14,10 @@ class ElasticsearchBackend(SearchBacked):
connection = None
def set_up(self):
- hosts = getattr(settings, "ELASTICSEARCH", None)
- if hosts:
+ host = getattr(settings, "ELASTICSEARCH", None)
+ if host:
self.connection = Elasticsearch(
- hosts=settings.ELASTICSEARCH, ca_certs=certifi.where()
+ settings.ELASTICSEARCH, ca_certs=certifi.where()
)
def _index_name(self):
diff --git a/lego/settings/development.py b/lego/settings/development.py
--- a/lego/settings/development.py
+++ b/lego/settings/development.py
@@ -76,7 +76,7 @@
CELERY_TASK_ALWAYS_EAGER = True
CELERY_EAGER_PROPAGATES_EXCEPTIONS = True
-ELASTICSEARCH = [{"host": "127.0.0.1"}]
+ELASTICSEARCH = "127.0.0.1"
LDAP_SERVER = "127.0.0.1:389"
LDAP_USER = "cn=admin,dc=abakus,dc=no"
diff --git a/lego/settings/production.py b/lego/settings/production.py
--- a/lego/settings/production.py
+++ b/lego/settings/production.py
@@ -63,7 +63,7 @@
CHANNEL_LAYERS["default"]["CONFIG"] = {"hosts": [env("CHANNELS_REDIS_URL")]}
# Elasticsearch
-ELASTICSEARCH = [{"host": env("ELASTICSEARCH_HOST")}]
+ELASTICSEARCH = env("ELASTICSEARCH_HOST")
SEARCH_INDEX = env("SEARCH_INDEX", default="lego-search")
# Stripe
| diff --git a/lego/settings/test.py b/lego/settings/test.py
--- a/lego/settings/test.py
+++ b/lego/settings/test.py
@@ -52,7 +52,7 @@
}
}
-ELASTICSEARCH = [{"host": "localhost"}]
+ELASTICSEARCH = "localhost"
CELERY_TASK_ALWAYS_EAGER = True
CELERY_EAGER_PROPAGATES_EXCEPTIONS = True
| [Doppins] Upgrade dependency redis to ==3.2.0
Hi!
A new version was just released of `redis`, so [Doppins](https://doppins.com)
has upgraded your project's dependency ranges.
Make sure that it doesn't break anything, and happy merging! :shipit:
---
### Upgraded redis from `==2.10.6` to `==3.0.0`
[Doppins] Upgrade dependency elasticsearch to ==6.3.1
Hi!
A new version was just released of `elasticsearch`, so [Doppins](https://doppins.com)
has upgraded your project's dependency ranges.
Make sure that it doesn't break anything, and happy merging! :shipit:
---
### Upgraded elasticsearch from `==6.2.0` to `==6.3.0`
| An additional new version was just released of `redis`, so we've added an
extra commit upgrading your range to `==3.0.1`.
Hope that's alright!
β [Doppins](https://doppins.com)
An additional new version was just released of `redis`, so we've added an
extra commit upgrading your range to `==3.1.0`.
Hope that's alright!
β [Doppins](https://doppins.com)
An additional new version was just released of `redis`, so we've added an
extra commit upgrading your range to `==3.2.0`.
Hope that's alright!
β [Doppins](https://doppins.com)
An additional new version was just released of `elasticsearch`, so we've added an
extra commit upgrading your range to `==6.3.1`.
Hope that's alright!
β [Doppins](https://doppins.com)
| 2019-03-04T20:57:52 |
webkom/lego | 1,477 | webkom__lego-1477 | [
"1338"
] | cb85b4194e561fb3a81911aa76a27ce5c354a6ae | diff --git a/lego/apps/search/backends/elasticsearch.py b/lego/apps/search/backends/elasticsearch.py
--- a/lego/apps/search/backends/elasticsearch.py
+++ b/lego/apps/search/backends/elasticsearch.py
@@ -14,10 +14,10 @@ class ElasticsearchBackend(SearchBacked):
connection = None
def set_up(self):
- hosts = getattr(settings, "ELASTICSEARCH", None)
- if hosts:
+ host = getattr(settings, "ELASTICSEARCH", None)
+ if host:
self.connection = Elasticsearch(
- hosts=settings.ELASTICSEARCH, ca_certs=certifi.where()
+ settings.ELASTICSEARCH, ca_certs=certifi.where()
)
def _index_name(self):
diff --git a/lego/settings/development.py b/lego/settings/development.py
--- a/lego/settings/development.py
+++ b/lego/settings/development.py
@@ -76,7 +76,7 @@
CELERY_TASK_ALWAYS_EAGER = True
CELERY_EAGER_PROPAGATES_EXCEPTIONS = True
-ELASTICSEARCH = [{"host": "127.0.0.1"}]
+ELASTICSEARCH = "127.0.0.1"
LDAP_SERVER = "127.0.0.1:389"
LDAP_USER = "cn=admin,dc=abakus,dc=no"
diff --git a/lego/settings/production.py b/lego/settings/production.py
--- a/lego/settings/production.py
+++ b/lego/settings/production.py
@@ -63,7 +63,7 @@
CHANNEL_LAYERS["default"]["CONFIG"] = {"hosts": [env("CHANNELS_REDIS_URL")]}
# Elasticsearch
-ELASTICSEARCH = [{"host": env("ELASTICSEARCH_HOST")}]
+ELASTICSEARCH = env("ELASTICSEARCH_HOST")
SEARCH_INDEX = env("SEARCH_INDEX", default="lego-search")
# Stripe
| diff --git a/lego/settings/test.py b/lego/settings/test.py
--- a/lego/settings/test.py
+++ b/lego/settings/test.py
@@ -52,7 +52,7 @@
}
}
-ELASTICSEARCH = [{"host": "localhost"}]
+ELASTICSEARCH = "localhost"
CELERY_TASK_ALWAYS_EAGER = True
CELERY_EAGER_PROPAGATES_EXCEPTIONS = True
| [Doppins] Upgrade dependency channels to ==2.1.6
Hi!
A new version was just released of `channels`, so [Doppins](https://doppins.com)
has upgraded your project's dependency ranges.
Make sure that it doesn't break anything, and happy merging! :shipit:
---
### Upgraded channels from `==2.1.5` to `==2.1.6`
| 2019-03-04T21:52:25 |
|
webkom/lego | 1,505 | webkom__lego-1505 | [
"1504"
] | 1ba588c30cd3c44863bd78430c9bd0e60e4baac6 | diff --git a/lego/apps/events/serializers/events.py b/lego/apps/events/serializers/events.py
--- a/lego/apps/events/serializers/events.py
+++ b/lego/apps/events/serializers/events.py
@@ -62,6 +62,7 @@ class Meta:
"event_type",
"location",
"start_time",
+ "end_time",
"thumbnail",
"total_capacity",
"company",
| Add end_time of an event when getting all events with get request
I want to be able to get the end time of an event when getting all events. I know I can get the end time when getting a specific event, but it is a bit cumbersome.
| 2019-03-18T09:14:23 |
||
webkom/lego | 1,956 | webkom__lego-1956 | [
"1942"
] | aac748a4eb1e7a029825260b56495bbbd550731d | diff --git a/lego/apps/companies/constants.py b/lego/apps/companies/constants.py
--- a/lego/apps/companies/constants.py
+++ b/lego/apps/companies/constants.py
@@ -7,14 +7,16 @@
COURSE = "course"
LUNCH_PRESENTATION = "lunch_presentation"
BEDEX = "bedex"
+DIGITAL_PRESENTATION = "digital_presentation"
OTHER = "other"
SPONSOR = "sponsor"
START_UP = "start_up"
COMPANY_EVENTS = (
(COMPANY_PRESENTATION, COMPANY_PRESENTATION),
- (COURSE, COURSE),
(LUNCH_PRESENTATION, LUNCH_PRESENTATION),
+ (COURSE, COURSE),
+ (DIGITAL_PRESENTATION, DIGITAL_PRESENTATION),
(BEDEX, BEDEX),
(OTHER, OTHER),
(SPONSOR, SPONSOR),
@@ -23,9 +25,10 @@
TRANSLATED_EVENTS = {
COMPANY_PRESENTATION: "Bedriftspresentasjon",
- COURSE: "Kurs",
LUNCH_PRESENTATION: "Lunsjpresentasjon",
- BEDEX: "Bedex",
+ COURSE: "Kurs",
+ DIGITAL_PRESENTATION: "Digital presentasjon",
+ BEDEX: "BedEx (vinter 2021)",
OTHER: "Alternativt arrangement",
START_UP: "Start-up kveld",
}
diff --git a/lego/apps/companies/migrations/0018_auto_20200927_1524.py b/lego/apps/companies/migrations/0018_auto_20200927_1524.py
new file mode 100644
--- /dev/null
+++ b/lego/apps/companies/migrations/0018_auto_20200927_1524.py
@@ -0,0 +1,34 @@
+# Generated by Django 2.2.13 on 2020-09-27 15:24
+
+import django.contrib.postgres.fields
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ("companies", "0017_auto_20200224_1519"),
+ ]
+
+ operations = [
+ migrations.AlterField(
+ model_name="companyinterest",
+ name="events",
+ field=django.contrib.postgres.fields.ArrayField(
+ base_field=models.CharField(
+ choices=[
+ ("company_presentation", "company_presentation"),
+ ("lunch_presentation", "lunch_presentation"),
+ ("course", "course"),
+ ("digital_presentation", "digital_presentation"),
+ ("bedex", "bedex"),
+ ("other", "other"),
+ ("sponsor", "sponsor"),
+ ("start_up", "start_up"),
+ ],
+ max_length=64,
+ ),
+ size=None,
+ ),
+ ),
+ ]
| Update interest form with requirements from Bedkom
As per the email, they want the following changes to the interest form before Monday.
1. We would like to add "Digitalt arrangement" as one of the options
1. Change the "Bedex" alternative to "Bedex (vinter 2021)"
The form they refer to is located here https://abakus.no/interesse
This feature is called `companyInterest` in LEGO frontend and backend.
Start looking here in the frontend
https://github.com/webkom/lego-webapp/tree/master/app/routes/companyInterest
And here in the backend
https://github.com/webkom/lego/blob/873f6898e35ed2d9a33bdfb8339eaf6c9a61470c/lego/apps/companies/models.py#L108
https://github.com/webkom/lego/blob/873f6898e35ed2d9a33bdfb8339eaf6c9a61470c/lego/apps/companies/constants.py#L58
| 2020-09-27T15:15:06 |
||
webkom/lego | 1,985 | webkom__lego-1985 | [
"997"
] | 1e26513cd38584029c25d55be5bcef90ad22b627 | diff --git a/lego/apps/events/notifications.py b/lego/apps/events/notifications.py
--- a/lego/apps/events/notifications.py
+++ b/lego/apps/events/notifications.py
@@ -1,3 +1,7 @@
+from django.utils import timezone
+
+import pytz
+
from lego.apps.notifications.constants import (
EVENT_ADMIN_REGISTRATION,
EVENT_ADMIN_UNREGISTRATION,
@@ -40,12 +44,18 @@ class EventPaymentOverdueNotification(Notification):
def generate_mail(self):
event = self.kwargs["event"]
+ date = timezone.localtime(
+ value=event.payment_due_date, timezone=pytz.timezone("Europe/Oslo")
+ )
+
+ due_date = date.strftime("%d.%m.%y, kl. %H:%M")
+
return self._delay_mail(
to_email=self.user.email,
context={
"event": event.title,
"name": self.user.full_name,
- "due_date": event.payment_due_date,
+ "due_date": due_date,
"id": event.id,
},
subject=f"Du har ikke betalt pΓ₯meldingen pΓ₯ arrangementet {event.title}",
| Timezone email
Format dates in emails in the same language as the email template (Norwegian), and converted to the proper timezone.


| 2020-10-18T14:31:19 |
||
webkom/lego | 2,085 | webkom__lego-2085 | [
"2084"
] | dbae169c28af4ab0e34618577c39c80ff9a9a1f9 | diff --git a/lego/apps/restricted/message_processor.py b/lego/apps/restricted/message_processor.py
--- a/lego/apps/restricted/message_processor.py
+++ b/lego/apps/restricted/message_processor.py
@@ -149,6 +149,9 @@ def decorate(message, hide_sender, sender):
)
else:
footer.append("Opprinnelig avsender har valgt Γ₯ skjule sin adresse.")
+ footer.append(
+ f"Meld deg av her: {settings.FRONTEND_URL}/users/me/settings/notifications"
+ )
footer = "\n".join(footer)
charset = message.get_content_charset() or "us-ascii"
| Emails should contain a link to edit email preferences
Should be to just add a link to the base email template, as well as add a link to mails sent by restricted.
| 2021-03-09T20:05:31 |
||
webkom/lego | 2,269 | webkom__lego-2269 | [
"1916"
] | 070a455bcd52677171f71b3fd12695fe6b6cf970 | diff --git a/lego/apps/events/constants.py b/lego/apps/events/constants.py
--- a/lego/apps/events/constants.py
+++ b/lego/apps/events/constants.py
@@ -85,6 +85,7 @@
PAYMENT_SUCCESS = "succeeded"
PAYMENT_FAILURE = "failed"
PAYMENT_MANUAL = "manual"
+PAYMENT_CANCELED = "canceled"
PAYMENT_STATUS_CHOICES = (
(PAYMENT_MANUAL, PAYMENT_MANUAL),
@@ -95,6 +96,7 @@
STRIPE_EVENT_INTENT_SUCCESS = "payment_intent.succeeded"
STRIPE_EVENT_INTENT_PAYMENT_FAILED = "payment_intent.payment_failed"
+STRIPE_EVENT_INTENT_PAYMENT_CANCELED = "payment_intent.canceled"
STRIPE_EVENT_CHARGE_REFUNDED = "charge.refunded"
# See https://stripe.com/docs/api/payment_intents/object#payment_intent_object-status
diff --git a/lego/apps/events/tasks.py b/lego/apps/events/tasks.py
--- a/lego/apps/events/tasks.py
+++ b/lego/apps/events/tasks.py
@@ -205,6 +205,20 @@ def async_retrieve_payment(self, registration_id, logger_context=None):
)
return
+ # If the payment is canceled in stripe and the webhook for some reason
+ # did not go through, we update the registration to match this, and then
+ # initiate a new payment.
+ if payment_intent["status"] == constants.STRIPE_INTENT_CANCELED:
+ self.registration.payment_status = constants.PAYMENT_CANCELED
+ self.registration.payment_intent_id = None
+ self.registration.payment_idempotency_key = None
+ self.registration.save()
+ chain(
+ async_initiate_payment.s(self.registration.id),
+ save_and_notify_payment.s(self.registration.id),
+ ).delay()
+ return
+
notify_user_payment_initiated(
constants.SOCKET_INITIATE_PAYMENT_SUCCESS,
self.registration,
@@ -351,6 +365,7 @@ def stripe_webhook_event(self, event_id, event_type, logger_context=None):
if event_type in [
constants.STRIPE_EVENT_INTENT_SUCCESS,
constants.STRIPE_EVENT_INTENT_PAYMENT_FAILED,
+ constants.STRIPE_EVENT_INTENT_PAYMENT_CANCELED,
]:
serializer = StripePaymentIntentSerializer(data=event.data["object"])
@@ -380,6 +395,11 @@ def stripe_webhook_event(self, event_id, event_type, logger_context=None):
registration,
error_message="Betaling feilet",
)
+ elif event_type == constants.STRIPE_EVENT_INTENT_PAYMENT_CANCELED:
+ registration.payment_status = constants.PAYMENT_CANCELED
+ registration.payment_intent_id = None
+ registration.idempotency_key = None
+
registration.save()
elif event_type in [constants.STRIPE_EVENT_CHARGE_REFUNDED]:
| diff --git a/lego/apps/events/tests/test_events_api.py b/lego/apps/events/tests/test_events_api.py
--- a/lego/apps/events/tests/test_events_api.py
+++ b/lego/apps/events/tests/test_events_api.py
@@ -1525,7 +1525,8 @@ def test_unregister_with_payment(self):
self.client.delete(f"{_get_registrations_list_url(self.event.id)}{reg.id}/")
self.assertEqual(
- stripe.PaymentIntent.retrieve(reg.payment_intent_id)["status"], "canceled"
+ stripe.PaymentIntent.retrieve(reg.payment_intent_id)["status"],
+ constants.STRIPE_INTENT_CANCELED,
)
def test_admin_unregister_with_payment(self):
@@ -1550,7 +1551,61 @@ def test_admin_unregister_with_payment(self):
reg.refresh_from_db()
self.assertEqual(
- stripe.PaymentIntent.retrieve(reg.payment_intent_id)["status"], "canceled"
+ stripe.PaymentIntent.retrieve(reg.payment_intent_id)["status"],
+ constants.STRIPE_INTENT_CANCELED,
+ )
+
+ stripe_event = stripe.Event.list(limit=1)["data"][0]
+ stripe_webhook_event(
+ event_id=stripe_event["id"], event_type=stripe_event["type"]
+ )
+
+ reg.refresh_from_db()
+ self.assertEqual(reg.payment_status, constants.PAYMENT_CANCELED)
+ self.assertIsNone(reg.payment_intent_id)
+
+ @mock.patch("lego.apps.events.tasks.notify_user_payment_initiated")
+ def test_payment_is_possible_on_re_registration(self, mock_notify):
+ """
+ The user should be able to pay, even if the webhook from stripe on payment cancellation is
+ unsuccessful.
+ - User registers
+ - Payment is initiated, but not completed
+ - User unregisters
+ - User re-registers
+ """
+
+ reg = Registration.objects.get_or_create(
+ event=self.event, user=self.abakus_user_2
+ )[0]
+ self.event.register(reg)
+ self.event.save()
+
+ self.client.force_authenticate(self.abakus_user_2)
+ self.get_payment_intent()
+
+ self.client.delete(f"{_get_registrations_list_url(self.event.id)}{reg.id}/")
+ reg.refresh_from_db()
+ self.assertEqual(
+ stripe.PaymentIntent.retrieve(reg.payment_intent_id)["status"],
+ constants.STRIPE_INTENT_CANCELED,
+ )
+
+ self.event.register(reg)
+ self.get_payment_intent()
+
+ reg.refresh_from_db()
+ stripe_intent = stripe.PaymentIntent.retrieve(reg.payment_intent_id)
+ self.assertNotEqual(
+ stripe_intent["status"],
+ constants.STRIPE_INTENT_CANCELED,
+ )
+
+ mock_notify.assert_called_with(
+ constants.SOCKET_INITIATE_PAYMENT_SUCCESS,
+ reg,
+ success_message="Betaling pΓ₯begynt",
+ client_secret=stripe_intent["client_secret"],
)
def test_cancel_on_payment_manual(self):
| Stripe paymentIntent is not recreated on re-registration
Reproduction steps
- Register for a priced event
- Make sure the payment intent is created. F.ex. by using a card that will be declined
- Unregister from the event
- Re-register, either manually if possible or admin-register
- Try to pay.
### The issue:
The payment intent is `canceled` when unregistering, but the same registration is used when registering again. We only check whether the paymentIntent exists, not whether it is valid. This means that we don't change out the payment intent and we are stuck with a payment intent with a state of `canceled`.
### Proposed solution:
Check the state of the payment intent when creating a payment intent. If it is **not** `requires_payment_method`, we should create a new payment intent.
This check should be used anywhere where we use the `async_create_payment_intent` task or check whether the payment intent exists.
| 2021-08-02T21:12:03 |
|
webkom/lego | 2,331 | webkom__lego-2331 | [
"2310"
] | 3969487d7d5f20b5a23066f2ca8363c0a78c84fc | diff --git a/lego/apps/users/migrations/0029_auto_20210921_1835.py b/lego/apps/users/migrations/0029_auto_20210921_1835.py
new file mode 100644
--- /dev/null
+++ b/lego/apps/users/migrations/0029_auto_20210921_1835.py
@@ -0,0 +1,20 @@
+# Generated by Django 2.2.24 on 2021-09-21 18:35
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ("users", "0028_auto_20210523_1252"),
+ ]
+
+ operations = [
+ migrations.AlterField(
+ model_name="user",
+ name="allergies",
+ field=models.CharField(
+ blank=True, max_length=500, verbose_name="allergies"
+ ),
+ ),
+ ]
diff --git a/lego/apps/users/models.py b/lego/apps/users/models.py
--- a/lego/apps/users/models.py
+++ b/lego/apps/users/models.py
@@ -327,7 +327,7 @@ class User(
)
first_name = models.CharField("first name", max_length=50, blank=False)
last_name = models.CharField("last name", max_length=30, blank=False)
- allergies = models.CharField("allergies", max_length=100, blank=True)
+ allergies = models.CharField("allergies", max_length=500, blank=True)
selected_theme = models.CharField(
"selected theme",
max_length=50,
| Allow more characters for the allergies field
Like it is now, one can not have more than 100 characters in the allergies field for a user (the field in question is found in settings on the profile for a user on abakus.no). It turns out that some users need more space than this to be able to fit all their allergies, so we need to expand the limitation on this field.
Tip: The validation for this field is done backend. A good start could be to search for "allergies" and see what you find then:)
| 2021-09-21T18:42:18 |
||
webkom/lego | 2,342 | webkom__lego-2342 | [
"2028"
] | b4adadb56de79ba54cbff2786000f07729cf9d62 | diff --git a/lego/apps/users/serializers/registration.py b/lego/apps/users/serializers/registration.py
--- a/lego/apps/users/serializers/registration.py
+++ b/lego/apps/users/serializers/registration.py
@@ -41,4 +41,5 @@ class Meta:
"gender",
"password",
"allergies",
+ "phone_number",
)
| Phone number not saved from registration form
When creating a new user, LEGO ignores the phone number inserted into the registration form.
| I have take a look at the users registrations (is this?)
(https://github.com/webkom/lego/blob/master/lego/apps/users/registrations.pyl)
but I can't find any mention to the phone number. | 2021-10-05T22:42:01 |
|
webkom/lego | 2,560 | webkom__lego-2560 | [
"1299"
] | 247e58d369fb9adb0bd3c12116c0a2db5289c61e | diff --git a/lego/apps/restricted/notifications.py b/lego/apps/restricted/notifications.py
--- a/lego/apps/restricted/notifications.py
+++ b/lego/apps/restricted/notifications.py
@@ -8,7 +8,7 @@ class RestrictedMailSentNotification(Notification):
def generate_mail(self):
return self._delay_mail(
- to_email=self.user.email,
+ to_email=self.user.email.address,
context={"first_name": self.user.first_name},
subject="Begrenset epost sendt ut",
plain_template="restricted/email/process_success.txt",
diff --git a/lego/apps/restricted/serializers.py b/lego/apps/restricted/serializers.py
--- a/lego/apps/restricted/serializers.py
+++ b/lego/apps/restricted/serializers.py
@@ -1,3 +1,7 @@
+from functools import reduce
+
+from rest_framework import exceptions
+
from lego.apps.events.fields import PublicEventListField
from lego.apps.meetings.fields import MeetingListField
from lego.apps.restricted.models import RestrictedMail
@@ -28,6 +32,18 @@ class Meta(RestrictedMailListSerializer.Meta):
"hide_sender",
)
+ def create(self, validated_data):
+ groups = validated_data["groups"]
+ events = validated_data["events"]
+ MaxPermittedAmout = 500
+ num = reduce((lambda a, b: a + b.number_of_users), groups, 0)
+ num += reduce((lambda a, b: a + b.registration_count), events, 0)
+ if num > MaxPermittedAmout:
+ raise exceptions.ValidationError(
+ f"The number of students in selected groups/events exceed the permitted amount which is {MaxPermittedAmout}"
+ )
+ return super().create(validated_data)
+
class RestrictedMailDetailSerializer(RestrictedMailSerializer):
users = PublicUserListField({"read_only": True})
| RestrictedMail notification
> Restricted mail is used when sending mails to multiple users at once by selecting users/events/meetings, and then send the email to <[email protected]> together with the token.
The `restricted mail sent` should be sent to the proper email, not the `user.email` field. The address `user.email_address` should be used instead.
If the `from_address` is not the same as the `user.email_address`, both should receive the mail.
https://github.com/webkom/lego/blob/ccab14fbee223f16842ace6ca2ba0c2f3ac3ac86/lego/apps/restricted/notifications.py#L9
| 2022-03-10T13:35:32 |
||
webkom/lego | 2,772 | webkom__lego-2772 | [
"2744"
] | 64654701aeb13ce3be73aea1312355c8c6d8a0c8 | diff --git a/lego/apps/events/serializers/registrations.py b/lego/apps/events/serializers/registrations.py
--- a/lego/apps/events/serializers/registrations.py
+++ b/lego/apps/events/serializers/registrations.py
@@ -161,6 +161,7 @@ class RegistrationReadDetailedExportSerializer(RegistrationReadDetailedSerialize
class StripeMetaSerializer(serializers.Serializer):
EVENT_ID = serializers.IntegerField()
+ USER_ID = serializers.IntegerField()
USER = serializers.CharField()
EMAIL = serializers.EmailField()
diff --git a/lego/apps/events/tasks.py b/lego/apps/events/tasks.py
--- a/lego/apps/events/tasks.py
+++ b/lego/apps/events/tasks.py
@@ -395,7 +395,7 @@ def stripe_webhook_event(self, event_id, event_type, logger_context=None):
metadata = serializer.data["metadata"]
registration = Registration.objects.filter(
- event_id=metadata["EVENT_ID"], user__email=metadata["EMAIL"]
+ event_id=metadata["EVENT_ID"], user__id=metadata["USER_ID"]
).first()
if not registration:
log.error("stripe_webhook_error", event_id=event_id, metadata=metadata)
@@ -430,7 +430,7 @@ def stripe_webhook_event(self, event_id, event_type, logger_context=None):
metadata = serializer.data["metadata"]
registration = Registration.objects.filter(
- event_id=metadata["EVENT_ID"], user__email=metadata["EMAIL"]
+ event_id=metadata["EVENT_ID"], user__id=metadata["USER_ID"]
).first()
if not registration:
log.error("stripe_webhook_error", event_id=event_id, metadata=metadata)
| Stripe payments should link to user ID
At the moment, registrations are selected using the event id and the user email. Since users can change their email address, this association is not stable. We should instead select the registration using the user id, as this is already stored in the payment metadata.
Sentry Issue: [LEGO-AP](https://sentry.io/organizations/abakus/issues/3330779750/?referrer=github_integration)
```
WebhookDidNotFindRegistration: Stripe webhook with ID: evt_3Kxqk6LWnPJjOd3f0zPgIQSx for event 3122 tried getting registration for user <REDACTED>, but did not find any!
File "celery/app/trace.py", line 734, in __protected_call__
return self.run(*args, **kwargs)
File "lego/apps/events/tasks.py", line 437, in stripe_webhook_event
raise WebhookDidNotFindRegistration(event_id, metadata)
```
| 2022-07-07T13:38:56 |
||
webkom/lego | 3,013 | webkom__lego-3013 | [
"3005"
] | 315b310f30b53827fef7770b69fc5a75c416dc4b | diff --git a/lego/settings/base.py b/lego/settings/base.py
--- a/lego/settings/base.py
+++ b/lego/settings/base.py
@@ -174,7 +174,7 @@
LDAP_BASE_DN = "dc=abakus,dc=no"
-CAPTCHA_URL = "https://www.google.com/recaptcha/api/siteverify"
+CAPTCHA_URL = "https://challenges.cloudflare.com/turnstile/v0/siteverify"
PUSH_NOTIFICATIONS_SETTINGS = {
"APNS_USE_SANDBOX": False,
diff --git a/lego/settings/development.py b/lego/settings/development.py
--- a/lego/settings/development.py
+++ b/lego/settings/development.py
@@ -17,9 +17,7 @@
stripe.api_key = os.environ.get("STRIPE_TEST_KEY")
STRIPE_WEBHOOK_SECRET = os.environ.get("STRIPE_WEBHOOK_SECRET")
-CAPTCHA_KEY = (
- os.environ.get("CAPTCHA_KEY") or "6LeIxAcTAAAAAGG-vFI1TnRWxMZNFuojJ4WifJWe"
-)
+CAPTCHA_KEY = os.environ.get("CAPTCHA_KEY") or "1x0000000000000000000000000000000AA"
SESSION_COOKIE_SECURE = False
DATABASES = {
| Migrate from CAPTCHA to Turnstile
Turnstile
* is not from *Google*.
* doesn't force users to help train Google's car AI.
* easy (?) to [migrate to](https://developers.cloudflare.com/turnstile/get-started/migrating-from-recaptcha/).
* solves the stupid issue with showing captchas so early they expire (Bc. a Turnstile token is valid for 300 seconds).
Closes #2291
| Another alternative is also to use hCaptcha, which is very widely used and tested. https://www.hcaptcha.com/
It _does_ help train AIs as well, just not for a single entity, but anyone can use the results.
_And you donate to charity by using it_
HCaptcha is fine, but it has the same horrible ux when it thinks you're a bot or adblocker trips it up. | 2022-10-19T20:57:08 |
|
webkom/lego | 3,128 | webkom__lego-3128 | [
"3127"
] | d3def409d21e93960fdab10c38990d8554054cd9 | diff --git a/lego/apps/email/tasks.py b/lego/apps/email/tasks.py
--- a/lego/apps/email/tasks.py
+++ b/lego/apps/email/tasks.py
@@ -93,6 +93,7 @@ def create_weekly_mail(user):
if todays_weekly is None
else todays_weekly.get_absolute_url(),
"joblistings": joblistings,
+ "frontend_url": settings.FRONTEND_URL,
},
)
if events or joblistings or todays_weekly:
| Broken link on weekly mails
The link used to unsubscribe from the mail is broken, because `frontend_url` is undefined. Probably due to the weekly mails being handled differently than all other notifications.
| 2022-12-17T19:01:31 |
||
mosaicml/llm-foundry | 169 | mosaicml__llm-foundry-169 | [
"67"
] | d691eb3dac7fb74f6a39ef9133341f1c839a1416 | diff --git a/scripts/inference/mpt_ckpt_to_ft.py b/scripts/inference/mpt_ckpt_to_ft.py
new file mode 100644
--- /dev/null
+++ b/scripts/inference/mpt_ckpt_to_ft.py
@@ -0,0 +1,323 @@
+# Copyright 2022 MosaicML LLM Foundry authors
+# SPDX-License-Identifier: Apache-2.0
+
+# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Convert MPT model checkpoint to FT format.
+
+It's a modified version of
+https://github.com/NVIDIA/FasterTransformer/blob/main/examples/pytorch/gpt/utils/huggingface_gpt_convert.py
+"""
+
+import argparse
+import configparser
+import os
+from typing import Any, Dict, List
+
+import numpy as np
+import torch
+import transformers
+
+
+def get_weight_data_type(data_type: str):
+ if data_type == 'fp32':
+ return np.float32
+ elif data_type == 'fp16':
+ return np.float16
+ else:
+ raise RuntimeError('Unsupported data type: {data_type} for conversion')
+
+
+def write_zero_bias(weight_name: str, weight_file_path: str,
+ bias_shape: List[int]) -> None:
+ """Write zeros for bias.
+
+ MPT model might not have bias while FT expects bias.
+
+ Args:
+ weight_name (str): Name of the weight tensor.
+ weight_file_path (str): Output path for storing the weight (NOT zero bias).
+ bias_shape (List[int]): Shape of the bias array.
+ """
+ if 'weight' not in weight_file_path:
+ raise RuntimeError(
+ f'Cannot write zero bias for {weight_name}. Input is not a weight tensor'
+ )
+ print(f'zero bias for weight: {weight_name}')
+ bias_file_path = weight_file_path.replace('.weight', '.bias')
+ bias = np.zeros(bias_shape, dtype=np.float32)
+ bias.tofile(bias_file_path)
+
+
+def convert_weight_to_ft_each(save_dir: str, infer_gpu_num: int,
+ tensor_name: str, config: Dict[str, Any],
+ data: np.ndarray):
+ """Convert an MPT checkpoint to a FasterTransformer compatible format.
+
+ Args:
+ save_dir (str): Path of the directory to save the weight in FT format. The directory must already exist.
+ infer_gpu_num (int): The number of gpus you are planning to use for inference.
+ tensor_name (str): Name of the weight tensor. Used in naming the output file.
+ config (Dict[str, Any]): Configuration for the model. This is used in getting model specific parameters.
+ data (np.ndarray): Tensor data in np.ndarray format.
+
+ Returns:
+ None: Writes to a file in `save_dir`. File name is based on the `tensor_name`
+ """
+ if tensor_name.find('input_layernorm.weight') != -1 or tensor_name.find('input_layernorm.bias') != -1 or \
+ tensor_name.find('attention.dense.bias') != -1 or tensor_name.find('post_attention_layernorm.weight') != -1 or \
+ tensor_name.find('post_attention_layernorm.bias') != -1 or tensor_name.find('mlp.dense_4h_to_h.bias') != -1 or \
+ tensor_name.find('final_layernorm.weight') != -1 or tensor_name.find('final_layernorm.bias') != -1:
+
+ save_path = os.path.join(save_dir, f'model.{tensor_name}.bin')
+ data.tofile(save_path)
+ if 'weight' in tensor_name and config['no_bias']:
+ write_zero_bias(tensor_name, save_path, data.shape[-1])
+
+ elif tensor_name.find('attention.dense.weight') != -1:
+ assert data.shape == (
+ config['d_model'],
+ config['d_model']), f'unexpected dim for {tensor_name}'
+ # nn.Linear weights are transposed
+ data = data.T
+ split_vals = np.split(data, infer_gpu_num, axis=0)
+ for j in range(infer_gpu_num):
+ save_path = os.path.join(save_dir, f'model.{tensor_name}.{j}.bin')
+ split_vals[j].tofile(save_path)
+ if config['no_bias']:
+ fake_weight_path = os.path.join(save_dir,
+ f'model.{tensor_name}.bin')
+ write_zero_bias(tensor_name, fake_weight_path, data.shape[-1])
+
+ elif tensor_name.find('mlp.dense_4h_to_h.weight') != -1:
+ assert data.shape == (
+ config['d_model'], config['expansion_ratio'] *
+ config['d_model']), f'unexpected dim for {tensor_name}'
+ # nn.Linear weights are transposed
+ data = data.T
+ split_vals = np.split(data, infer_gpu_num, axis=0)
+ for j in range(infer_gpu_num):
+ save_path = os.path.join(save_dir, f'model.{tensor_name}.{j}.bin')
+ split_vals[j].tofile(save_path)
+ if config['no_bias']:
+ fake_weight_path = os.path.join(save_dir,
+ f'model.{tensor_name}.bin')
+ write_zero_bias(tensor_name, fake_weight_path, data.shape[-1])
+
+ elif tensor_name.find('mlp.dense_h_to_4h.weight') != -1:
+ assert data.shape == (
+ config['expansion_ratio'] * config['d_model'],
+ config['d_model']), f'unexpected dim for {tensor_name}'
+ # nn.Linear weights are transposed
+ data = data.T
+
+ split_vals = np.split(data, infer_gpu_num, axis=-1)
+ for j in range(infer_gpu_num):
+ save_path = os.path.join(save_dir, f'/model.{tensor_name}.{j}.bin')
+ split_vals[j].tofile(save_path)
+ if config['no_bias']:
+ write_zero_bias(tensor_name, save_path, split_vals[j].shape[-1])
+
+ elif tensor_name.find('mlp.dense_h_to_4h.bias') != -1:
+ assert data.shape == (
+ config['expansion_ratio'] *
+ config['d_model'],), f'unexpected dim for {tensor_name}'
+ split_vals = np.split(data, infer_gpu_num, axis=-1)
+ for j in range(infer_gpu_num):
+ save_path = os.path.join(save_dir + f'model.{tensor_name}.{j}.bin')
+ split_vals[j].tofile(save_path)
+
+ elif tensor_name.find('attention.query_key_value.bias') != -1:
+ assert data.shape == (
+ 3 * config['d_model'],), f'unexpected dim for {tensor_name}'
+
+ data = data.reshape(3, config['d_model'])
+
+ split_vals = np.split(data, infer_gpu_num, axis=-1)
+
+ for j in range(infer_gpu_num):
+ save_path = os.path.join(save_dir, f'model.{tensor_name}.{j}.bin')
+ split_vals[j].tofile(save_path)
+
+ elif tensor_name.find('attention.query_key_value.weight') != -1:
+ assert data.shape == (
+ 3 * config['d_model'],
+ config['d_model']), f'unexpected dim for {tensor_name}'
+ # nn.Linear weights are transposed
+ data = data.T
+
+ data = data.reshape(config['d_model'], 3, config['d_model'])
+ split_vals = np.split(data, infer_gpu_num, axis=-1)
+
+ for j in range(infer_gpu_num):
+ save_path = os.path.join(save_dir, f'model.{tensor_name}.{j}.bin')
+ split_vals[j].tofile(save_path)
+ if config['no_bias']:
+ write_zero_bias(tensor_name, save_path,
+ (3, split_vals[j].shape[-1]))
+
+ else:
+ raise RuntimeError(f'Tensor with name {tensor_name} is not handled')
+
+
+def convert_mpt_to_ft(model_name_or_path: str, output_dir: str,
+ infer_gpu_num: int, weight_data_type: str) -> None:
+ """Convert an MPT checkpoint to a FasterTransformer compatible format.
+
+ Args:
+ model_name_or_path (str): The HF hub name of the model (e.g., mosaicml/mpt-7b) or the path of a directory
+ containing an MPT checkpoint in a local dir.
+ output_dir (str): Path of the directory to save the checkpoint in FT format. The directory must not already exist.
+ infer_gpu_num (int): The number of gpus you are planning to use for inference.
+ weight_data_type (str): Data type of the weights in the input checkpoint.
+ """
+ save_dir = os.path.join(output_dir, f'{infer_gpu_num}-gpu')
+
+ if (os.path.exists(save_dir) == False):
+ os.makedirs(save_dir)
+ else:
+ raise RuntimeError(f'Output path {save_dir} already exists!')
+
+ # do conversion on cpu
+ torch_device = 'cpu'
+
+ model = transformers.AutoModelForCausalLM.from_pretrained(
+ model_name_or_path, trust_remote_code=True).to(torch_device)
+ tokenizer = transformers.AutoTokenizer.from_pretrained(model_name_or_path)
+
+ hf_config = vars(model.config)
+
+ config = configparser.ConfigParser()
+ config['gpt'] = {}
+ try:
+ config['gpt']['model_name'] = 'mpt' if hf_config[
+ '_name_or_path'] == '' else hf_config['_name_or_path']
+ config['gpt']['head_num'] = str(hf_config['n_heads'])
+ n_embd = hf_config['d_model']
+ config['gpt']['size_per_head'] = str(n_embd // hf_config['n_heads'])
+ config['gpt']['inter_size'] = str(n_embd * hf_config['expansion_ratio'])
+ config['gpt']['max_pos_seq_len'] = str(hf_config['max_seq_len'])
+ config['gpt']['num_layer'] = str(hf_config['n_layers'])
+ config['gpt']['vocab_size'] = str(hf_config['vocab_size'])
+ config['gpt']['start_id'] = str(
+ hf_config['bos_token_id']
+ ) if hf_config['bos_token_id'] != None else str(tokenizer.bos_token_id)
+ config['gpt']['end_id'] = str(
+ hf_config['eos_token_id']
+ ) if hf_config['eos_token_id'] != None else str(tokenizer.eos_token_id)
+ config['gpt']['weight_data_type'] = weight_data_type
+ config['gpt']['tensor_para_size'] = str(infer_gpu_num)
+ # nn.LayerNorm default eps is 1e-5
+ config['gpt']['layernorm_eps'] = str(1e-5)
+ if hf_config['attn_config']['alibi']:
+ config['gpt']['has_positional_encoding'] = str(False)
+ config['gpt']['use_attention_linear_bias'] = str(True)
+
+ with open(save_dir + '/config.ini', 'w') as configfile:
+ config.write(configfile)
+ except:
+ print(f'Failed to save the config in config.ini.')
+ raise
+
+ np_weight_data_type = get_weight_data_type(weight_data_type)
+
+ param_remapping = {
+ 'norm_1.bias': 'input_layernorm.bias',
+ 'norm_1.weight': 'input_layernorm.weight',
+ 'attn.Wqkv.bias': 'attention.query_key_value.bias',
+ 'attn.Wqkv.weight': 'attention.query_key_value.weight',
+ 'attn.out_proj.bias': 'attention.dense.bias',
+ 'attn.out_proj.weight': 'attention.dense.weight',
+ 'norm_2.bias': 'post_attention_layernorm.bias',
+ 'norm_2.weight': 'post_attention_layernorm.weight',
+ 'ffn.up_proj.bias': 'mlp.dense_h_to_4h.bias',
+ 'ffn.up_proj.weight': 'mlp.dense_h_to_4h.weight',
+ 'ffn.down_proj.bias': 'mlp.dense_4h_to_h.bias',
+ 'ffn.down_proj.weight': 'mlp.dense_4h_to_h.weight',
+ }
+
+ for name, param in model.named_parameters():
+ print(f'Working on parameter {name} ...')
+ data = param.detach().cpu().numpy().astype(np_weight_data_type)
+ if name.find('weight') == -1 and name.find('bias') == -1:
+ print(f'found a parameter name that is not handled: {name}')
+ continue
+ if name == 'transformer.wpe.weight':
+ assert data.shape == (
+ hf_config['max_seq_len'],
+ hf_config['d_model']), f'unexpected dim for {name}'
+ data.tofile(save_dir + 'model.wpe.bin')
+ elif name == 'transformer.wte.weight':
+ assert data.shape == (
+ hf_config['vocab_size'],
+ hf_config['d_model']), f'unexpected dim for {name}'
+ data.tofile(save_dir + 'model.wte.bin')
+ elif name == 'transformer.norm_f.bias':
+ assert data.shape == (
+ hf_config['d_model'],), f'unexpected dim for {name}'
+ data.tofile(save_dir + 'model.final_layernorm.bias.bin')
+ elif name == 'transformer.norm_f.weight':
+ assert data.shape == (
+ hf_config['d_model'],), f'unexpected dim for {name}'
+ save_path = save_dir + 'model.final_layernorm.weight.bin'
+ data.tofile(save_path)
+ if hf_config['no_bias']:
+ write_zero_bias(name, save_path, data.shape[-1])
+ elif name == 'transformer.lm_head.weight':
+ data.tofile(save_dir + 'model.lm_head.weight.bin')
+ else:
+ for mpt_pattern, ft_pattern in param_remapping.items():
+ if name.find(mpt_pattern) != -1:
+ new_name = name.replace('transformer.blocks.',
+ 'layers.').replace(
+ mpt_pattern, ft_pattern)
+ convert_weight_to_ft_each(save_dir, infer_gpu_num, new_name,
+ hf_config, data)
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.RawTextHelpFormatter)
+ parser.add_argument('--save_dir',
+ '-o',
+ type=str,
+ help='Directory to save converted checkpoint in',
+ required=True)
+ parser.add_argument(
+ '--name_or_dir',
+ '-i',
+ type=str,
+ help=
+ 'HF hub Model name (e.g., mosaicml/mpt-7b) or local dir path to load checkpoint from',
+ required=True)
+ parser.add_argument('--infer_gpu_num',
+ '-i_g',
+ type=int,
+ help='How many gpus for inference?',
+ required=True)
+ parser.add_argument('--weight_data_type',
+ type=str,
+ help='Data type of weights in the input checkpoint',
+ default='fp32',
+ choices=['fp32', 'fp16'])
+
+ args = parser.parse_args()
+ print('\n=============== Argument ===============')
+ for key in vars(args):
+ print('{}: {}'.format(key, vars(args)[key]))
+ print('========================================')
+
+ convert_mpt_to_ft(args.name_or_dir, args.save_dir, args.infer_gpu_num,
+ args.weight_data_type)
| FasterTransformer
Hi, I saw in mpt model card that the models could run with FasterTransformer
I didn't find any details about that anywhere
can you guys share the conversion scripts or help there?
Thanks
| MPT is a GPT style network
You'd want to create a conversion script, similar to [this one](https://github.com/NVIDIA/FasterTransformer/blob/c6e8f60ec40da218804a60e6aa986903e7fa8594/examples/pytorch/gpt/utils/huggingface_gpt_convert.py), for converting the MPT HF model into the FT format.
When we write it, it'll probably land in the [llm-foundry/scripts/misc/](https://github.com/mosaicml/llm-foundry/tree/main/scripts/misc) folder (or be directly contributed to FT).
Thanks @vchiley
AFAIK it is using AliBi and some other things that aren't native in GPT FT version
maybe Bloom is more similar? and from playing with mpt around in HF version and try base as GPT / Bloom (by renaming and loading state dicts) I get nonsense so I wonder if there is some other impl. that prevents it straight forward such as other ordering in QKV layers or etc.
wdyt ?
Thanks !!
[The `*.c_*.*` naming](https://github.com/NVIDIA/FasterTransformer/blob/c6e8f60ec40da218804a60e6aa986903e7fa8594/examples/pytorch/gpt/utils/huggingface_gpt_convert.py#L135) makes me think they use 1x1 conv layers instead of linear layers (functionally the same thing, for some reason early transformer implementations use to do this; e.g. [here](https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt2/modeling_gpt2.py#L156)).
A 1x1 conv and a linear layers are functionally the same thing, but the weight tensors are transposes of one another.
Try transposing the MPT weights before loading them into the FT conversion script π€·ββοΈ
Any updates on this? As all MPT model cards describe the built-in optimization for FlashAttention and FasterTransformer, I am curious why the FasterTransformer part hasn't been tested before release?
Or was it and you just didn't come to write a conversion script?
Also, what about the Transformer Engine of H100's? How easy/difficult is it to make the model work with that? (FP8)
> Transformer Engine
We've played around with [TE and H100 FP8 support](https://www.mosaicml.com/blog/coreweave-nvidia-h100-part-1)
It works and we'll include everything when we have more seat time with H100s so we can test everything more thoroughly.
@vchiley Thank you so much! ππ
I also need the FT conversion script; it would be super helpful for me π₯°
@vchiley You are also talking about uzing TransformerEngine in inference right?
This would be extremely helpful for me too!
@xgal, @SinanAkkoyun, @meitalbensinai, @therealadityashankar
We will soon add conversion and run scripts
I'd greatly appreciate that so so much! | 2023-05-18T23:11:38 |
|
mosaicml/llm-foundry | 704 | mosaicml__llm-foundry-704 | [
"703"
] | 9027f49153d89e6b0b225af3626311a9b4658dbf | diff --git a/llmfoundry/models/utils/hf_prefixlm_converter.py b/llmfoundry/models/utils/hf_prefixlm_converter.py
--- a/llmfoundry/models/utils/hf_prefixlm_converter.py
+++ b/llmfoundry/models/utils/hf_prefixlm_converter.py
@@ -10,31 +10,14 @@
and treat the input prompt as the prefix in `generate`.
"""
-import math
-import warnings
from types import MethodType
from typing import Any, List, MutableMapping, Optional, Tuple, Union
import torch
-from transformers.models.bloom.modeling_bloom import (
- BaseModelOutputWithPastAndCrossAttentions, BloomForCausalLM, BloomModel,
- CausalLMOutputWithCrossAttentions, CrossEntropyLoss)
-from transformers.models.bloom.modeling_bloom import \
- _expand_mask as _expand_mask_bloom
-from transformers.models.bloom.modeling_bloom import \
- _make_causal_mask as _make_causal_mask_bloom
-from transformers.models.bloom.modeling_bloom import logging
from transformers.models.gpt2.modeling_gpt2 import GPT2LMHeadModel
from transformers.models.gpt_neo.modeling_gpt_neo import GPTNeoForCausalLM
from transformers.models.gpt_neox.modeling_gpt_neox import GPTNeoXForCausalLM
from transformers.models.gptj.modeling_gptj import GPTJForCausalLM
-from transformers.models.opt.modeling_opt import OPTForCausalLM
-from transformers.models.opt.modeling_opt import \
- _expand_mask as _expand_mask_opt
-from transformers.models.opt.modeling_opt import \
- _make_causal_mask as _make_causal_mask_opt
-
-logger = logging.get_logger(__name__)
_SUPPORTED_GPT_MODELS = (
GPT2LMHeadModel,
@@ -223,583 +206,10 @@ def generate(self: CAUSAL_GPT_TYPES, *args: Any, **kwargs: Any):
return model
-def _convert_bloom_causal_lm_to_prefix_lm(
- model: BloomForCausalLM) -> BloomForCausalLM:
- """Converts a BLOOM Causal LM to a Prefix LM.
-
- Supported HuggingFace model classes:
- - `BloomForCausalLM`
-
- See `convert_hf_causal_lm_to_prefix_lm` for more details.
- """
- if hasattr(model, '_prefix_lm_converted'):
- return model
-
- assert isinstance(model, BloomForCausalLM)
- assert model.config.add_cross_attention == False, 'Only supports BLOOM decoder-only models'
-
- # Modified from transformers.models.bloom.modeling_bloom.BloomModel._prepare_attn_mask
- # https://github.com/huggingface/transformers/blob/v4.25.1/src/transformers/models/bloom/modeling_bloom.py#L648
- def _prepare_attn_mask(
- self: BloomModel,
- attention_mask: torch.Tensor,
- bidirectional_mask: Optional[torch.Tensor],
- input_shape: Tuple[int, int],
- past_key_values_length: int,
- ) -> torch.BoolTensor:
- # create causal mask
- # [batch_size, seq_length] -> [batch_size, 1, tgt_length, src_length]
- combined_attention_mask = None
- device = attention_mask.device
- _, src_length = input_shape
-
- if src_length > 1:
- combined_attention_mask = _make_causal_mask_bloom(
- input_shape,
- device=device,
- past_key_values_length=past_key_values_length)
- # Make use of the batch-specific `bidirectional_mask` attribute set
- # by the parent module in its (new) `forward` method wrapper
- if bidirectional_mask is not None:
- # The two masks should have the same size
- assert attention_mask.shape == bidirectional_mask.shape
-
- # [batch_size, seq_length] -> [batch_size, 1, tgt_length, src_length]
- expanded_bidirectional_mask = _expand_mask_bloom(
- bidirectional_mask, tgt_length=src_length)
- combined_attention_mask = torch.logical_and(
- combined_attention_mask, expanded_bidirectional_mask)
-
- # [batch_size, seq_length] -> [batch_size, 1, tgt_length, src_length]
- expanded_attn_mask = _expand_mask_bloom(attention_mask,
- tgt_length=src_length)
- combined_attention_mask = (expanded_attn_mask
- if combined_attention_mask is None else
- expanded_attn_mask | combined_attention_mask)
-
- return combined_attention_mask
-
- # Modified from transformers.models.bloom.modeling_bloom._prepare_alibi_transformer
- # https://github.com/huggingface/transformers/blob/v4.25.1/src/transformers/models/bloom/modeling_bloom.py#L87
- def _build_alibi_tensor(
- self: BloomModel,
- batch_size: int,
- query_length: int,
- key_length: int,
- dtype: torch.dtype,
- device: torch.device,
- ) -> torch.Tensor:
- num_heads = self.config.n_head
-
- closest_power_of_2 = 2**math.floor(math.log2(num_heads))
- base = torch.tensor(2**(-(2**-(math.log2(closest_power_of_2) - 3))),
- device=device,
- dtype=torch.float32)
- powers = torch.arange(1,
- 1 + closest_power_of_2,
- device=device,
- dtype=torch.int32)
- slopes = torch.pow(base, powers)
-
- if closest_power_of_2 != num_heads:
- extra_base = torch.tensor(
- 2**(-(2**-(math.log2(2 * closest_power_of_2) - 3))),
- device=device,
- dtype=torch.float32)
- num_remaining_heads = min(closest_power_of_2,
- num_heads - closest_power_of_2)
- extra_powers = torch.arange(1,
- 1 + 2 * num_remaining_heads,
- 2,
- device=device,
- dtype=torch.int32)
- slopes = torch.cat(
- [slopes, torch.pow(extra_base, extra_powers)], dim=0)
-
- qa = torch.arange(query_length, device=device,
- dtype=torch.int32).view(-1, 1)
- ka = torch.arange(key_length, device=device,
- dtype=torch.int32).view(1, -1)
- diffs = qa - ka + key_length - query_length
- diffs = -diffs.abs()
- alibi = slopes.view(1, num_heads, 1, 1) * diffs.view(
- 1, 1, query_length, key_length)
- alibi = alibi.expand(batch_size, -1, -1,
- -1).reshape(-1, query_length, key_length)
- return alibi.to(dtype)
-
- # Modified from transformers.models.bloom.modeling_bloom.BloomModel.forward
- # Note: The modified code is surrounded with #### START/END #### comments
- # and one new argument (`bidirectional_mask`) is added to the signature.
- KeyValueT = Tuple[torch.Tensor, torch.Tensor]
-
- def transformer_forward(
- self: BloomModel,
- input_ids: Optional[torch.LongTensor] = None,
- past_key_values: Optional[Tuple[KeyValueT, ...]] = None,
- attention_mask: Optional[torch.Tensor] = None,
- bidirectional_mask: Optional[torch.Tensor] = None,
- head_mask: Optional[torch.LongTensor] = None,
- inputs_embeds: Optional[torch.LongTensor] = None,
- use_cache: Optional[bool] = None,
- output_attentions: Optional[bool] = None,
- output_hidden_states: Optional[bool] = None,
- return_dict: Optional[bool] = None,
- **deprecated_arguments: Any
- ) -> Union[Tuple[torch.Tensor, ...],
- BaseModelOutputWithPastAndCrossAttentions]:
- if deprecated_arguments.pop('position_ids', False) is not False:
- # `position_ids` could have been `torch.Tensor` or `None` so
- # defaulting pop to `False` allows to detect if users were
- # passing explicitly `None`
- warnings.warn(
- '`position_ids` have no functionality in BLOOM and will be removed in v5.0.0. ' +\
- 'You can safely ignore passing `position_ids`.',
- FutureWarning,
- )
- if len(deprecated_arguments) > 0:
- raise ValueError(
- f'Got unexpected arguments: {deprecated_arguments}')
-
- output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
- output_hidden_states = (output_hidden_states
- if output_hidden_states is not None else
- self.config.output_hidden_states)
- use_cache = use_cache if use_cache is not None else self.config.use_cache
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
-
- if input_ids is not None and inputs_embeds is not None:
- raise ValueError(
- 'You cannot specify both input_ids and inputs_embeds at the same time'
- )
- elif input_ids is not None:
- batch_size, seq_length = input_ids.shape
- elif inputs_embeds is not None:
- batch_size, seq_length, _ = inputs_embeds.shape
- else:
- raise ValueError(
- 'You have to specify either input_ids or inputs_embeds')
-
- if past_key_values is None:
- past_key_values = tuple([None] * len(self.h)) # type: ignore
-
- # Prepare head mask if needed
- # 1.0 in head_mask indicate we keep the head
- # attention_probs has shape batch_size x num_heads x N x N
- # head_mask has shape n_layer x batch x num_heads x N x N
- head_mask = self.get_head_mask(head_mask, self.config.n_layer)
-
- if inputs_embeds is None:
- inputs_embeds = self.word_embeddings(input_ids)
-
- hidden_states = self.word_embeddings_layernorm(inputs_embeds)
-
- presents = () if use_cache else None
- all_self_attentions = () if output_attentions else None
- all_hidden_states = () if output_hidden_states else None
-
- # Compute alibi tensor: check build_alibi_tensor documentation
- seq_length_with_past = seq_length
- past_key_values_length = 0
- if past_key_values[0] is not None: # type: ignore
- tmp = past_key_values[0][0] # type: ignore
- past_key_values_length = tmp.shape[2] # type: ignore
- seq_length_with_past = seq_length_with_past + past_key_values_length
- if attention_mask is None:
- attention_mask = torch.ones((batch_size, seq_length_with_past),
- device=hidden_states.device)
- else:
- attention_mask = attention_mask.to(hidden_states.device)
-
- ##### ALL NON-SIGNATURE MODIFICATIONS ARE CONTAINED TO THIS BLOCK [STARTS HERE] #####
- alibi = self._build_alibi_tensor(
- batch_size=batch_size,
- query_length=seq_length,
- key_length=seq_length_with_past,
- dtype=hidden_states.dtype,
- device=hidden_states.device,
- )
-
- causal_mask = self._prepare_attn_mask(
- attention_mask,
- bidirectional_mask,
- input_shape=(batch_size, seq_length),
- past_key_values_length=past_key_values_length,
- )
- ##### ALL NON-SIGNATURE MODIFICATIONS ARE CONTAINED TO THIS BLOCK [ENDS HERE] #####
-
- for i, (block,
- layer_past) in enumerate(zip(self.h,
- past_key_values)): # type: ignore
-
- if output_hidden_states:
- hst = (hidden_states,)
- all_hidden_states = all_hidden_states + hst # type: ignore
-
- if self.gradient_checkpointing and self.training:
-
- if use_cache:
- logger.warning(
- '`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...'
- )
- use_cache = False
-
- def create_custom_forward(module: torch.nn.Module):
-
- def custom_forward(*inputs: Any):
- # None for past_key_value
- return module(*inputs,
- use_cache=use_cache,
- output_attentions=output_attentions)
-
- return custom_forward
-
- outputs = torch.utils.checkpoint.checkpoint( # type: ignore
- create_custom_forward(block),
- hidden_states,
- alibi,
- causal_mask,
- head_mask[i], # type: ignore
- )
- else:
- outputs = block(
- hidden_states,
- layer_past=layer_past,
- attention_mask=causal_mask,
- head_mask=head_mask[i], # type: ignore
- use_cache=use_cache,
- output_attentions=output_attentions,
- alibi=alibi,
- )
-
- hidden_states = outputs[0]
- if use_cache is True:
- presents = presents + (outputs[1],) # type: ignore
-
- if output_attentions:
- oa = (outputs[2 if use_cache else 1],) # type: ignore
- all_self_attentions = all_self_attentions + oa # type: ignore
-
- # Add last hidden state
- hidden_states = self.ln_f(hidden_states)
-
- if output_hidden_states:
- hst = (hidden_states,)
- all_hidden_states = all_hidden_states + hst # type: ignore
-
- if not return_dict:
- return tuple(v for v in [
- hidden_states, presents, all_hidden_states, all_self_attentions
- ] if v is not None)
-
- return BaseModelOutputWithPastAndCrossAttentions(
- last_hidden_state=hidden_states,
- past_key_values=presents,
- hidden_states=all_hidden_states,
- attentions=all_self_attentions,
- )
-
- # Make it so model.transformer has the new helper methods and new
- # `forward` method
- setattr(model.transformer, '_prepare_attn_mask',
- MethodType(_prepare_attn_mask, model.transformer))
- setattr(model.transformer, '_build_alibi_tensor',
- MethodType(_build_alibi_tensor, model.transformer))
- setattr(model.transformer, 'forward',
- MethodType(transformer_forward, model.transformer))
-
- # In order to actually use the new argument we've added to
- # model.transformer, we need to update the parent module's `forward` to
- # accept/pass the same new argument.
- # We add 2 lines to handle that change.
- # Both lines are tagged with "# WE'RE ADDING A NEW ARGUMENT!"
- KeyValueT = Tuple[torch.Tensor, torch.Tensor]
-
- def forward(
- self: BloomForCausalLM,
- input_ids: Optional[torch.LongTensor] = None,
- past_key_values: Optional[Tuple[KeyValueT, ...]] = None,
- attention_mask: Optional[torch.Tensor] = None,
- # WE'RE ADDING A NEW ARGUMENT! (Change 1/2)
- bidirectional_mask: Optional[torch.Tensor] = None,
- head_mask: Optional[torch.Tensor] = None,
- inputs_embeds: Optional[torch.Tensor] = None,
- labels: Optional[torch.Tensor] = None,
- use_cache: Optional[bool] = None,
- output_attentions: Optional[bool] = None,
- output_hidden_states: Optional[bool] = None,
- return_dict: Optional[bool] = None,
- **deprecated_arguments: Any,
- ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
- """Replacement forward method for BloomCausalLM."""
- if deprecated_arguments.pop('position_ids', False) is not False:
- # `position_ids` could have been `torch.Tensor` or `None` so
- # defaulting pop to `False` allows to detect if users were passing
- # explicitly `None`
- warnings.warn(
- '`position_ids` have no functionality in BLOOM and will be removed ' +\
- 'in v5.0.0. You can safely ignore passing `position_ids`.',
- FutureWarning,
- )
- if len(deprecated_arguments) > 0:
- raise ValueError(
- f'Got unexpected arguments: {deprecated_arguments}')
-
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
-
- transformer_outputs = self.transformer(
- input_ids,
- past_key_values=past_key_values,
- attention_mask=attention_mask,
- # WE'RE ADDING A NEW ARGUMENT! (Change 2/2)
- bidirectional_mask=bidirectional_mask,
- head_mask=head_mask,
- inputs_embeds=inputs_embeds,
- use_cache=use_cache,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- )
- hidden_states = transformer_outputs[0]
-
- lm_logits = self.lm_head(hidden_states)
-
- loss = None
- if labels is not None:
- # Shift so that tokens < n predict n
- shift_logits = lm_logits[..., :-1, :].contiguous()
- shift_labels = labels[..., 1:].contiguous()
- batch_size, seq_length, vocab_size = shift_logits.shape
- # Flatten the tokens
- loss_fct = CrossEntropyLoss()
- loss = loss_fct(
- shift_logits.view(batch_size * seq_length, vocab_size),
- shift_labels.view(batch_size * seq_length))
-
- if not return_dict:
- output = (lm_logits,) + transformer_outputs[1:]
- return ((loss,) + output) if loss is not None else output
-
- return CausalLMOutputWithCrossAttentions(
- loss=loss,
- logits=lm_logits,
- past_key_values=transformer_outputs.past_key_values,
- hidden_states=transformer_outputs.hidden_states,
- attentions=transformer_outputs.attentions,
- )
-
- # To handle generation, re-write `prepare_inputs_for_generation` to
- # implement the bidirectional logic.
- def prepare_inputs_for_generation(self: BloomForCausalLM,
- input_ids: torch.LongTensor,
- past: Optional[torch.Tensor] = None,
- attention_mask: Optional[
- torch.Tensor] = None,
- **kwargs: Any) -> dict:
- del kwargs # unused
- # only last token for input_ids if past is not None
- if past:
- input_ids = input_ids[:, -1].unsqueeze(-1) # type: ignore
- # We can turn off bidirectional masking after the prefix
- # has been encoded into `past`
- bidirectional_mask = None
-
- # the cache may be in the standard format (e.g. in contrastive
- # search), convert to bloom's format if needed
- if past[0][0].shape[0] == input_ids.shape[0]:
- past = self._convert_to_bloom_cache(past)
-
- else:
- # If we're here, `input_ids` contains the prefix. Encode it with
- # bidirectional attention.
- bidirectional_mask = torch.ones_like(input_ids)
-
- return {
- 'input_ids': input_ids,
- 'past_key_values': past,
- # "use_cache": kwargs.get("use_cache"),
- # Requires this. TODO(Alex): Confirm this supports other decoding strategies.
- 'use_cache': True,
- 'attention_mask': attention_mask,
- 'bidirectional_mask': bidirectional_mask,
- }
-
- # Register the new `forward` and `prepare_inputs_for_generation` methods
- # with the model
- setattr(model, 'forward', MethodType(forward, model))
- setattr(model, 'prepare_inputs_for_generation',
- MethodType(prepare_inputs_for_generation, model))
-
- # Finally, tag the model so that this conversion cannot happen again.
- setattr(model, '_prefix_lm_converted', True)
- return model
-
-
-def _convert_opt_causal_lm_to_prefix_lm(
- model: OPTForCausalLM) -> OPTForCausalLM:
- """Converts an OPT Causal LM to a Prefix LM.
-
- Supported HuggingFace model classes:
- - `OPTForCausalLM`
-
- See `convert_hf_causal_lm_to_prefix_lm` for more details.
- """
- if hasattr(model, '_prefix_lm_converted'):
- return model
-
- assert isinstance(model, OPTForCausalLM)
- assert model.config.add_cross_attention == False, 'Only supports OPT decoder-only models'
-
- # Rename methods to allow:
- # - new `forward` to wrap original `forward`
- # - new `generate` to wrap original `generate`
- setattr(model, '_original_forward', getattr(model, 'forward'))
- setattr(model, '_original_generate', getattr(model, 'generate'))
-
- model.model.decoder.bidirectional_mask = None
-
- # Modified from transformers.models.bloom.modeling_opt.OPTDecoder._prepare_decoder_attn_mask
- # https://github.com/huggingface/transformers/blob/v4.25.1/src/transformers/models/opt/modeling_opt.py#L532
- def _prepare_decoder_attention_mask(self: torch.nn.Module,
- attention_mask: Optional[torch.Tensor],
- input_shape: Tuple[int, int],
- inputs_embeds: Optional[torch.Tensor],
- past_key_values_length: int):
- # create causal mask
- # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
- combined_attention_mask = None
- if input_shape[-1] > 1:
- assert inputs_embeds is not None
- # 'g' indicates generation mode. Causal mask replaced with 0.
- if self.bidirectional_mask == 'g':
- bsz, src_length = input_shape
- combined_attention_mask = torch.zeros(
- (bsz, 1, src_length, src_length + past_key_values_length),
- dtype=inputs_embeds.dtype,
- device=inputs_embeds.device)
- else:
- combined_attention_mask = _make_causal_mask_opt(
- input_shape,
- inputs_embeds.dtype,
- past_key_values_length=past_key_values_length).to(
- inputs_embeds.device)
-
- # Make use of the batch-specific `bidirectional_mask` attribute
- # set by the parent module in its (new) `forward` method wrapper
- if self.bidirectional_mask is not None:
- assert attention_mask is not None
- # The two masks should have the same size
- assert attention_mask.shape == self.bidirectional_mask.shape
-
- # [batch_size, seq_length] -> [batch_size, 1, tgt_length, src_length]
- expanded_bidirectional_mask = _expand_mask_opt(
- self.bidirectional_mask,
- inputs_embeds.dtype,
- tgt_len=input_shape[-1]).to(inputs_embeds.device)
- combined_attention_mask = torch.maximum(
- expanded_bidirectional_mask, combined_attention_mask)
-
- if attention_mask is not None:
- assert inputs_embeds is not None
- # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
- expanded_attn_mask = _expand_mask_opt(attention_mask,
- inputs_embeds.dtype,
- tgt_len=input_shape[-1]).to(
- inputs_embeds.device)
- combined_attention_mask = (expanded_attn_mask
- if combined_attention_mask is None else
- expanded_attn_mask +
- combined_attention_mask)
-
- return combined_attention_mask
-
- # Make it so model.model.decoder uses the above `_prepare_decoder_attn_mask`
- # in place of the original method
- setattr(model.model.decoder, '_prepare_decoder_attention_mask',
- MethodType(_prepare_decoder_attention_mask, model.model.decoder))
-
- def forward(
- self: OPTForCausalLM,
- input_ids: Optional[torch.LongTensor] = None,
- attention_mask: Optional[torch.Tensor] = None,
- bidirectional_mask: Optional[torch.ByteTensor] = None,
- head_mask: Optional[torch.Tensor] = None,
- past_key_values: Optional[List[torch.FloatTensor]] = None,
- inputs_embeds: Optional[torch.FloatTensor] = None,
- labels: Optional[torch.LongTensor] = None,
- use_cache: Optional[bool] = None,
- output_attentions: Optional[bool] = None,
- output_hidden_states: Optional[bool] = None,
- return_dict: Optional[bool] = None,
- ):
-
- def call_og_forward():
- return self._original_forward(
- input_ids=input_ids,
- attention_mask=attention_mask,
- head_mask=head_mask,
- past_key_values=past_key_values,
- inputs_embeds=inputs_embeds,
- labels=labels,
- use_cache=use_cache,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- )
-
- if bidirectional_mask is None:
- # This wrapper is a no-op if bidirectional masks are not supplied
- return call_og_forward()
-
- # Temporarily set `bidirectional_mask` in the child module
- self.model.decoder.bidirectional_mask = bidirectional_mask
-
- # Apply the original forward method (the model will use the mask that
- # was just set)
- try:
- outputs = call_og_forward()
- except:
- self.model.decoder.bidirectional_mask = None
- raise
-
- # Reset the `bidirectional_mask` attribute to None
- self.model.decoder.bidirectional_mask = None
-
- # Return the outputs
- return outputs
-
- def generate(self: OPTForCausalLM, *args: tuple, **kwargs: Any):
- """Wraps original generate to enable PrefixLM-style attention."""
- # Flag the child module to use generation-style attention masking
- self.model.decoder.bidirectional_mask = 'g'
-
- # Collect outputs using the model's original forward method
- try:
- output = self._original_generate(*args, **kwargs)
- except:
- self.model.decoder.bidirectional_mask = None
- raise
-
- # Reset the `bidirectional_mask` attribute to None
- self.model.decoder.bidirectional_mask = None
-
- # Return the output
- return output
-
- # Replace `forward` and `generate` with the new wrappers
- setattr(model, 'forward', MethodType(forward, model))
- setattr(model, 'generate', MethodType(generate, model))
-
- # Finally, tag the model so that this conversion cannot happen again.
- setattr(model, '_prefix_lm_converted', True)
- return model
-
-
-_SUPPORTED_HF_MODELS = _SUPPORTED_GPT_MODELS + (BloomForCausalLM,
- OPTForCausalLM)
+_SUPPORTED_HF_MODELS = _SUPPORTED_GPT_MODELS
CAUSAL_LM_TYPES = Union[GPT2LMHeadModel, GPTJForCausalLM, GPTNeoForCausalLM,
- GPTNeoXForCausalLM, BloomForCausalLM, OPTForCausalLM]
+ GPTNeoXForCausalLM]
def convert_hf_causal_lm_to_prefix_lm(
@@ -811,8 +221,6 @@ def convert_hf_causal_lm_to_prefix_lm(
- `GPTNeoForCausalLM`
- `GPTNeoXForCausalLM`
- `GPTJForCausalLM`
- - `BloomForCausalLM`
- - `OPTForCausalLM`
Conversion to a Prefix LM is done by modifying the `forward` method, and possibly also the
`generate` method and/or select underlying methods depending on the model class.
@@ -862,13 +270,6 @@ def convert_hf_causal_lm_to_prefix_lm(
"""
if isinstance(model, _SUPPORTED_GPT_MODELS):
return _convert_gpt_causal_lm_to_prefix_lm(model)
-
- elif isinstance(model, BloomForCausalLM):
- return _convert_bloom_causal_lm_to_prefix_lm(model)
-
- elif isinstance(model, OPTForCausalLM):
- return _convert_opt_causal_lm_to_prefix_lm(model)
-
else:
raise TypeError(
f'Cannot convert model to Prefix LM. ' +\
| diff --git a/tests/test_model.py b/tests/test_model.py
--- a/tests/test_model.py
+++ b/tests/test_model.py
@@ -25,8 +25,7 @@
from transformers.modeling_outputs import CausalLMOutputWithPast
from transformers.models.bloom.modeling_bloom import build_alibi_tensor
-from llmfoundry import (COMPOSER_MODEL_REGISTRY, ComposerHFCausalLM,
- ComposerHFPrefixLM)
+from llmfoundry import COMPOSER_MODEL_REGISTRY, ComposerHFCausalLM
from llmfoundry.models.hf.model_wrapper import HuggingFaceModelWithZLoss
from llmfoundry.models.layers import NORM_CLASS_REGISTRY, build_alibi_bias
from llmfoundry.models.layers.blocks import MPTBlock
@@ -438,11 +437,10 @@ def test_loss_fn():
atol=1e-4), f'differed at step {i}'
[email protected]('prefixlm', [False, True])
-def test_opt_wrapping(prefixlm: bool):
+def test_opt_wrapping():
conf = {
'model': {
- 'name': 'hf_prefix_lm' if prefixlm else 'hf_causal_lm',
+ 'name': 'hf_causal_lm',
'pretrained_model_name_or_path': 'facebook/opt-125m',
'pretrained': 'false'
},
@@ -456,10 +454,7 @@ def test_opt_wrapping(prefixlm: bool):
tokenizer = build_tokenizer(config.tokenizer.name,
tokenizer_cfg.get('kwargs', {}))
- if prefixlm:
- model = ComposerHFPrefixLM(config.model, tokenizer)
- else:
- model = ComposerHFCausalLM(config.model, tokenizer)
+ model = ComposerHFCausalLM(config.model, tokenizer)
# check that all the modules we except are blocked from FSDP wrapping
assert not model.model.model._fsdp_wrap
| MPT models on the Hub not working with `transformers` main
Hi there!
Currently with transformers main loading MPT models from the Hub fails because it tries to import some private method (such as `_expand_mask` ) that has been recently removed: https://github.com/huggingface/transformers/pull/27086
The simple loading script below should work
```python
from accelerate import init_empty_weights
from transformers import AutoModelForCausalLM, AutoConfig
model_id = "mosaicml/mpt-7b"
config = AutoConfig.from_pretrained(
model_id, trust_remote_code=True
)
with init_empty_weights():
model = AutoModelForCausalLM.from_config(
config, trust_remote_code=True
)
```
| Thanks for letting us know Younes, will look into this ASAP
Thanks @dakinggg !
| 2023-10-30T17:55:10 |
sanic-org/sanic | 186 | sanic-org__sanic-186 | [
"116"
] | 3ea1a804963cc17e9e0ac6469ed58a97a3a3d73c | diff --git a/sanic/request.py b/sanic/request.py
--- a/sanic/request.py
+++ b/sanic/request.py
@@ -4,6 +4,7 @@
from httptools import parse_url
from urllib.parse import parse_qs
from ujson import loads as json_loads
+from sanic.exceptions import InvalidUsage
from .log import log
@@ -67,7 +68,7 @@ def json(self):
try:
self.parsed_json = json_loads(self.body)
except Exception:
- log.exception("Failed when parsing body as json")
+ raise InvalidUsage("Failed when parsing body as json")
return self.parsed_json
| diff --git a/tests/test_requests.py b/tests/test_requests.py
--- a/tests/test_requests.py
+++ b/tests/test_requests.py
@@ -49,6 +49,19 @@ async def handler(request):
assert results.get('test') == True
+def test_invalid_json():
+ app = Sanic('test_json')
+
+ @app.route('/')
+ async def handler(request):
+ return json(request.json())
+
+ data = "I am not json"
+ request, response = sanic_endpoint_test(app, data=data)
+
+ assert response.status == 400
+
+
def test_query_string():
app = Sanic('test_query_string')
| Unparseable JSON should not be ignored
request.py, line 64
Returning a None here causes breakage in application code further down the line. Generate a 400 error here on malformed JSON to protect the server.
| Should it just raise an Exception that then generates a 400 error?
Yes, that would be my preference.
The actual line is here now: https://github.com/channelcat/sanic/blob/master/sanic/request.py#L69
Correct, that line should not log anything (as I don't want my logs littered with client-side problems), and should raise an InvalidUsage exception.
@jasonab I am not able to reproduce this, am I misunderstanding the bug? I added this test and although it fails, there is a 500 internal server error (not the silent, ignoring result that you're referring to):
```python
def test_post_invalid_json():
app = Sanic('test_post_json')
@app.route('/')
async def handler(request):
data = request.json()
return json(data)
payload = None
headers = {'content-type': 'application/json'}
request, response = sanic_endpoint_test(app,
data=json_dumps(payload),
headers=headers)
assert response.status == '400'
```
You're saying Sanic now throws a 500 error on malformed JSON? That doesn't sound right.
The initial issue was that if JSON parsing fails and throws an exception, Sanic catches that exception and returns None from the json() method. Any code of mine that assumes that we have a non-None value from json() breaks.
What stack trace are you now seeing?
Ah, I was misunderstanding then. That test is malformed post data. I have observed the issue you're referring to as well.
I don't think that request.py actually has anything to do with this, since we're talking about invalid json in the response. This looks like it's a result of the `dump` function in `ujson`:
```python
>>> import ujson
>>> ujson.dumps(None)
'null'
>>> ujson.dumps("I am not valid json")
'"i am not valid json"'
```
See:
https://github.com/channelcat/sanic/blob/master/sanic/response.py#L5
and:
https://github.com/channelcat/sanic/blob/master/sanic/response.py#L135
The line of code I cited (now at request.py line 69) is for incoming data, not response data. I'm sorry if there's been confusion.
To restate the problem: when you call request.json(), if the posted JSON data causes the parser to error out, Sanic returns a None to the controller/resource. This None will tend to blow up any code operating on that returned value.
What I believe should happen is that Sanic should raise an exception and return a 400-level error if malformed JSON is sent with the request. | 2016-12-08T04:36:50 |
sanic-org/sanic | 326 | sanic-org__sanic-326 | [
"324"
] | bef34d66f5afd14d82f8a66d8849cdae7f71f45b | diff --git a/examples/exception_monitoring.py b/examples/exception_monitoring.py
--- a/examples/exception_monitoring.py
+++ b/examples/exception_monitoring.py
@@ -9,17 +9,15 @@
class' default handler, we can do anything including sending exceptions to
an external service.
"""
-
-
-
+from sanic.exceptions import Handler, SanicException
"""
Imports and code relevant for our CustomHandler class
(Ordinarily this would be in a separate file)
"""
-from sanic.response import text
-from sanic.exceptions import Handler, SanicException
+
class CustomHandler(Handler):
+
def default(self, request, exception):
# Here, we have access to the exception object
# and can do anything with it (log, send to external service, etc)
@@ -31,9 +29,7 @@ def default(self, request, exception):
# Then, we must finish handling the exception by returning
# our response to the client
# For this we can just call the super class' default handler
- return super.default(self, request, exception)
-
-
+ return super().default(request, exception)
"""
@@ -49,11 +45,12 @@ def default(self, request, exception):
handler = CustomHandler(sanic=app)
app.error_handler = handler
+
@app.route("/")
async def test(request):
# Here, something occurs which causes an unexpected exception
# This exception will flow to our custom handler.
- x = 1 / 0
+ 1 / 0
return json({"test": True})
| During handling of the above exception, another exception occurred
In the examples/exception_monitoring.py, when i try to run the app and hit the default route, it prints out the exception and then it calls "return super.default(self, request, exception)", it returns with the following exception:
AttributeError: type object 'super' has no attribute 'default'
looks like in the exceptions.py, this line is the culprit (i could be wrong):
`handler = self.handlers.get(type(exception), self.default)
`
since __init__ does not have that attribute defined. I am running python 3.6
| Oh that must be a typo, I think it should actually be `super().default(self, request, exception)`
i think it should be
`return super().default(request, exception)`
without the "self" | 2017-01-20T20:37:53 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.