desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Search accounts for a given domain. Environment is added by this function. Use this instead of search() to benefit from environment filtering. Use user.has_group() and suspend_security() before calling this method.'
@api.model def retrieve(self, domain):
domain.append(['environment', 'in', self._retrieve_env()]) return self.search(domain)
'At this time there is no namespace set.'
@api.multi def write(self, vals):
if ((not vals.get('data')) and (not self.data)): vals['data'] = self._serialize_data(self._init_data()) return super(KeychainAccount, self).write(vals)
'Return the current environments. You may override this function to fit your needs. returns: a tuple like: (\'dev\', \'test\', False) Which means accounts for dev, test and blank (not set) Order is important: the first one is used for encryption.'
@staticmethod def _retrieve_env():
current = (config.get('running_env') or False) envs = [current] if (False not in envs): envs.append(False) return envs
'Return a cipher using the keys of environments. force_env = name of the env key. Useful for encoding against one precise env'
@classmethod def _get_cipher(cls, force_env=None):
def _get_keys(envs): suffixes = [(('_%s' % env) if env else '') for env in envs] keys_name = [('keychain_key%s' % suf) for suf in suffixes] keys_str = [config.get(key) for key in keys_name] return [Fernet(key) for key in keys_str if (key and (len(key) > 0))] if force_env: envs = [force_env] else: envs = cls._retrieve_env() keys = _get_keys(envs) if (len(keys) == 0): raise Warning(_(("No 'keychain_key_%s' entries found in config file. Use a key similar to: %s" % (envs[0], Fernet.generate_key())))) return MultiFernet(keys)
'It should encrypt passwords.'
def test_password(self):
account = self._create_account() passwords = ('', '12345', 'djkqfljfqm', u'&\xe9"\'(\xa7\xe8!\xe7') for password in passwords: account.clear_password = password account._inverse_set_password() self.assertTrue((account.clear_password != account.password)) self.assertEqual(account.get_password(), password)
'It should raise an exception when encoded key != decoded.'
def test_wrong_key(self):
account = self._create_account() password = 'urieapocq' account.clear_password = password account._inverse_set_password() config['keychain_key'] = Fernet.generate_key() try: account.get_password() self.assertTrue(False, 'It should not work with another key') except Warning as err: self.assertTrue(True, 'It should raise a Warning') self.assertTrue(('has been encrypted with a diff' in str(err)), 'It should display the right msg') else: self.assertTrue(False, 'It should raise a Warning')
'It should raise an exception when no key is set.'
def test_no_key(self):
account = self._create_account() del config.options['keychain_key'] with self.assertRaises(Warning) as err: account.clear_password = 'aiuepr' account._inverse_set_password() self.assertTrue(False, 'It should not work without key') self.assertTrue(('Use a key similar to' in str(err.exception)), 'It should display the right msg')
'It should raise an exception when key is not acceptable format.'
def test_badly_formatted_key(self):
account = self._create_account() config['keychain_key'] = '' with self.assertRaises(Warning): account.clear_password = 'aiuepr' account._inverse_set_password() self.assertTrue(False, 'It should not work missing formated key') self.assertTrue(True, 'It shoud raise a ValueError')
'Retrieve env should always return False at the end'
def test_retrieve_env(self):
config['running_env'] = False self.assertListEqual(self.keychain._retrieve_env(), [False]) config['running_env'] = 'dev' self.assertListEqual(self.keychain._retrieve_env(), ['dev', False]) config['running_env'] = 'prod' self.assertListEqual(self.keychain._retrieve_env(), ['prod', False])
'Encrypt with dev, decrypt with dev.'
def test_multienv(self):
account = self._create_account() config['keychain_key_dev'] = Fernet.generate_key() config['keychain_key_prod'] = Fernet.generate_key() config['running_env'] = 'dev' account.clear_password = 'abc' account._inverse_set_password() self.assertEqual(account.get_password(), 'abc', 'Should work with dev') config['running_env'] = 'prod' with self.assertRaises(Warning): self.assertEqual(account.get_password(), 'abc', 'Should not work with prod key')
'Encrypt with blank, decrypt for all.'
def test_multienv_blank(self):
account = self._create_account() config['keychain_key'] = Fernet.generate_key() config['keychain_key_dev'] = Fernet.generate_key() config['keychain_key_prod'] = Fernet.generate_key() config['running_env'] = '' account.clear_password = 'abc' account._inverse_set_password() self.assertEqual(account.get_password(), 'abc', 'Should work with dev') config['running_env'] = 'prod' self.assertEqual(account.get_password(), 'abc', 'Should work with prod')
'Set the env on the record'
def test_multienv_force(self):
account = self._create_account() account.environment = 'prod' config['keychain_key'] = Fernet.generate_key() config['keychain_key_dev'] = Fernet.generate_key() config['keychain_key_prod'] = Fernet.generate_key() config['running_env'] = '' account.clear_password = 'abc' account._inverse_set_password() with self.assertRaises(Warning): self.assertEqual(account.get_password(), 'abc', 'Should not work with dev') config['running_env'] = 'prod' self.assertEqual(account.get_password(), 'abc', 'Should work with prod')
'It should raise an exception when data is not valid json.'
def test_wrong_json(self):
account = self._create_account() wrong_jsons = ("{'hi':'o'}", "{'oq", '[>}') for json in wrong_jsons: with self.assertRaises(ValidationError) as err: account.write({'data': json}) self.assertTrue(False, 'Should not validate baddly formatted json') self.assertTrue(('Data should be a valid JSON' in str(err.exception)), 'It should raise a ValidationError')
'It should raise an exception when data don\'t pass _validate_data.'
def test_invalid_json(self):
account = self._create_account() invalid_jsons = ('{}', '{"hi": 1}') for json in invalid_jsons: with self.assertRaises(ValidationError) as err: account.write({'data': json}) self.assertTrue(('Data not valid' in str(err.exception)), 'It should raise a ValidationError')
'It should work with valid data.'
def test_valid_json(self):
account = self._create_account() valid_jsons = ('{"c": true}', '{"c": 1}', '{"a": "o", "c": "b"}') for json in valid_jsons: try: account.write({'data': json}) self.assertTrue(True, 'Should validate json') except: self.assertTrue(False, 'It should validate a good json')
'Get the main image for this object. This is provided as a compatibility layer for submodels that already had one image per record.'
@api.multi @api.depends('image_ids') def _get_multi_image(self):
for s in self: first = s.image_ids[:1] s.image_main = first.image_main s.image_main_medium = first.image_medium s.image_main_small = first.image_small
'Save or delete the main image for this record. This is provided as a compatibility layer for submodels that already had one image per record.'
@api.multi def _set_multi_image(self, image=False, name=False):
values = {'storage': 'db', 'file_db_store': tools.image_resize_image_big(image), 'owner_model': self._name} if name: values['name'] = name for s in self: if image: values['owner_id'] = s.id if s.image_ids: s.image_ids[0].write(values) else: values.setdefault('name', (name or _('Main image'))) s.image_ids = [(0, 0, values)] elif s.image_ids: s.image_ids[0].unlink()
'Mimic `ondelete="cascade"` for multi images. Will be skipped if ``env.context[\'bypass_image_removal\']`` == True'
@api.multi def unlink(self):
images = self.mapped('image_ids') result = super(Owner, self).unlink() if (result and (not self.env.context.get('bypass_image_removal'))): images.unlink() return result
'Allow any model; after all, this field is readonly.'
@api.model @tools.ormcache('self') def _selection_owner_ref_id(self):
return [(r.model, r.name) for r in self.env['ir.model'].search([])]
'Get a reference field based on the split model and id fields.'
@api.multi @api.depends('owner_model', 'owner_id') def _compute_owner_ref_id(self):
for s in self: s.owner_ref_id = '{0.owner_model},{0.owner_id}'.format(s)
'Get image data from the right storage type.'
@api.multi @api.depends('storage', 'path', 'file_db_store', 'url') def _get_image(self):
for s in self: s.image_main = getattr(s, ('_get_image_from_%s' % s.storage))()
'Know if you need to show the technical fields.'
@api.multi @api.depends('owner_id', 'owner_model') def _show_technical(self):
self.show_technical = all(((('default_owner_%s' % f) not in self.env.context) for f in ('id', 'model')))
'Allow to download an image and cache it by its URL.'
@api.model @tools.ormcache('url') def _get_image_from_url_cached(self, url):
if url: try: (filename, header) = urllib.urlretrieve(url) with open(filename, 'rb') as f: return base64.b64encode(f.read()) except: _logger.error('URL %s cannot be fetched', url, exc_info=True) return False
'This function is used to test and debug this module'
@api.model def _test_scheduler_failure(self):
raise UserError((_('Task failure with UID = %d.') % self._uid))
'Without this call, dynamic fields build by fields_view_get() generate a log warning, i.e.: odoo.models:mass.editing.wizard.read() with unknown field \'myfield\' odoo.models:mass.editing.wizard.read() with unknown field \'selection__myfield\''
def read(self, fields, load='_classic_read'):
real_fields = fields if fields: real_fields = [x for x in fields if (x in self._fields)] return super(MassEditingWizard, self).read(real_fields, load=load)
'Create a Partner.'
def _create_partner(self):
categ_ids = self.env['res.partner.category'].search([]).ids return self.res_partner_model.create({'name': 'Test Partner', 'email': '[email protected]', 'phone': 123456, 'category_id': [(6, 0, categ_ids)]})
'Create a Mass Editing with Partner as model and email field of partner.'
def _create_mass_editing(self, model, fields):
mass = self.mass_object_model.create({'name': 'Mass Editing for Partner', 'model_id': model.id, 'field_ids': [(6, 0, fields.ids)]}) mass.create_action() return mass
'Create Wizard object to perform mass editing to REMOVE field\'s value.'
def _apply_action(self, partner, vals):
ctx = {'active_id': partner.id, 'active_ids': partner.ids, 'active_model': 'res.partner'} return self.mass_wiz_obj.with_context(ctx).create(vals)
'Test whether fields_view_get method returns arch or not.'
def test_wiz_fields_view_get(self):
ctx = {'mass_editing_object': self.mass.id, 'active_id': self.partner.id, 'active_ids': self.partner.ids, 'active_model': 'res.partner'} result = self.mass_wiz_obj.with_context(ctx).fields_view_get() self.assertTrue(result.get('arch'), 'Fields view get must return architecture.')
'Test whether onchange model_id returns model_id in list'
def test_onchange_model(self):
new_mass = self.mass_object_model.new({'model_id': self.user_model.id}) new_mass._onchange_model_id() model_list = ast.literal_eval(new_mass.model_list) self.assertTrue((self.user_model.id in model_list), 'Onchange model list must contains model_id.')
'Test Case for MASS EDITING which will remove and after add Partner\'s email and will assert the same.'
def test_mass_edit_email(self):
vals = {'selection__email': 'remove', 'selection__phone': 'remove'} self._apply_action(self.partner, vals) self.assertEqual(self.partner.email, False, "Partner's Email should be removed.") vals = {'selection__email': 'set', 'email': '[email protected]'} self._apply_action(self.partner, vals) self.assertNotEqual(self.partner.email, False, "Partner's Email should be set.")
'Test Case for MASS EDITING which will remove and add Partner\'s category m2m.'
def test_mass_edit_m2m_categ(self):
vals = {'selection__category_id': 'remove_m2m'} self._apply_action(self.partner, vals) self.assertNotEqual(self.partner.category_id, False, "Partner's category should be removed.") dist_categ_id = self.env.ref('base.res_partner_category_13').id vals = {'selection__category_id': 'add', 'category_id': [[6, 0, [dist_categ_id]]]} wiz_action = self._apply_action(self.partner, vals) self.assertTrue((dist_categ_id in self.partner.category_id.ids), "Partner's category should be added.") res = wiz_action.action_apply() self.assertTrue((res['type'] == 'ir.actions.act_window_close'), 'IR Action must be window close.')
'Test if fields one2many field gets blank when mass editing record is copied.'
def test_mass_edit_copy(self):
self.assertEqual(self.copy_mass.field_ids.ids, [], 'Fields must be blank.')
'Test if Sidebar Action is added / removed to / from give object.'
def test_sidebar_action(self):
action = (self.mass.ref_ir_act_window_id and self.mass.ref_ir_value_id) self.assertTrue(action, 'Sidebar action must be exists.') self.mass.unlink_action() action = (self.mass.ref_ir_act_window_id and self.mass.ref_ir_value_id) self.assertFalse(action, 'Sidebar action must be removed.')
'Test if related actions are removed when mass editing record is unlinked.'
def test_unlink_mass(self):
mass_action_id = ('ir.actions.act_window,' + str(self.mass.id)) self.mass.unlink() value_cnt = self.env['ir.values'].search([('value', '=', mass_action_id)], count=True) self.assertTrue((value_cnt == 0), 'Sidebar action must be removed when mass editing is unlinked.')
'Test if related actions are removed when mass editing record is uninstalled.'
def test_uninstall_hook(self):
uninstall_hook(self.cr, registry) mass_action_id = ('ir.actions.act_window,' + str(self.mass.id)) value_cnt = self.env['ir.values'].search([('value', '=', mass_action_id)], count=True) self.assertTrue((value_cnt == 0), 'Sidebar action must be removed when mass editing module is uninstalled.')
'It provides validations before initiating a pass reset email :raises: PassError on invalidated pass reset attempt :return: True on allowed reset'
@api.multi def _validate_pass_reset(self):
for rec_id in self: pass_min = rec_id.company_id.password_minimum if (pass_min <= 0): pass write_date = fields.Datetime.from_string(rec_id.password_write_date) delta = timedelta(hours=pass_min) if ((write_date + delta) > datetime.now()): raise PassError((_('Passwords can only be reset every %d hour(s). Please contact an administrator for assistance.') % pass_min)) return True
'It validates proposed password against existing history :raises: PassError on reused password'
@api.multi def _check_password_history(self, password):
crypt = self._crypt_context() for rec_id in self: recent_passes = rec_id.company_id.password_history if (recent_passes < 0): recent_passes = rec_id.password_history_ids else: recent_passes = rec_id.password_history_ids[0:(recent_passes - 1)] if recent_passes.filtered((lambda r: crypt.verify(password, r.password_crypt))): raise PassError((_('Cannot use the most recent %d passwords') % rec_id.company_id.password_history))
'It saves password crypt history for history rules'
@api.multi def _set_encrypted_password(self, encrypted):
super(ResUsers, self)._set_encrypted_password(encrypted) self.write({'password_history_ids': [(0, 0, {'password_crypt': encrypted})]})
'It should raise PassError if previously used'
def test_check_password_crypt(self):
rec_id = self._new_record() with self.assertRaises(PassError): rec_id.write({'password': self.password})
'It should throw PassError on reset inside min threshold'
def test_validate_pass_reset_error(self):
rec_id = self._new_record() with self.assertRaises(PassError): rec_id._validate_pass_reset()
'It should allow reset pass when outside threshold'
def test_validate_pass_reset_allow(self):
rec_id = self._new_record() rec_id.password_write_date = '2016-01-01' self.assertEqual(True, rec_id._validate_pass_reset())
'It should allow reset pass when <= 0'
def test_validate_pass_reset_zero(self):
rec_id = self._new_record() rec_id.company_id.password_minimum = 0 self.assertEqual(True, rec_id._validate_pass_reset())
'It mocks and returns assets used by this controller'
@contextmanager def mock_assets(self):
with mock.patch(('%s.request' % IMPORT)) as request: (yield {'request': request})
'It should check password on request user'
def test_change_password_check(self):
with self.mock_assets() as assets: check_password = assets['request'].env.user._check_password check_password.side_effect = EndTestException with self.assertRaises(EndTestException): self.password_security_session.change_password(self.fields) check_password.assert_called_once_with(self.passwd)
'It should return result of super'
def test_change_password_return(self):
with self.mock_assets(): with mock.patch.object(main.Session, 'change_password') as chg: res = self.password_security_session.change_password(self.fields) self.assertEqual(chg(), res)
'It mocks and returns assets used by this controller'
@contextmanager def mock_assets(self):
methods = ['do_signup', 'web_login', 'web_auth_signup', 'web_auth_reset_password'] with mock.patch.multiple(main.AuthSignupHome, **{m: mock.DEFAULT for m in methods}) as _super: mocks = {} for method in methods: mocks[method] = _super[method] mocks[method].return_value = MockResponse() with mock.patch(('%s.request' % IMPORT)) as request: with mock.patch(('%s.ensure_db' % IMPORT)) as ensure: with mock.patch(('%s.http' % IMPORT)) as http: http.redirect_with_hash.return_value = MockResponse() mocks.update({'request': request, 'ensure_db': ensure, 'http': http}) (yield mocks)
'It should check password on user'
def test_do_signup_check(self):
with self.mock_assets() as assets: check_password = assets['request'].env.user._check_password check_password.side_effect = EndTestException with self.assertRaises(EndTestException): self.password_security_home.do_signup(self.qcontext) check_password.assert_called_once_with(self.passwd)
'It should return result of super'
def test_do_signup_return(self):
with self.mock_assets() as assets: res = self.password_security_home.do_signup(self.qcontext) self.assertEqual(assets['do_signup'](), res)
'It should verify available db'
def test_web_login_ensure_db(self):
with self.mock_assets() as assets: assets['ensure_db'].side_effect = EndTestException with self.assertRaises(EndTestException): self.password_security_home.web_login()
'It should call superclass w/ proper args'
def test_web_login_super(self):
expect_list = [1, 2, 3] expect_dict = {'test1': 'good1', 'test2': 'good2'} with self.mock_assets() as assets: assets['web_login'].side_effect = EndTestException with self.assertRaises(EndTestException): self.password_security_home.web_login(*expect_list, **expect_dict) assets['web_login'].assert_called_once_with(*expect_list, **expect_dict)
'It should return immediate result of super when not POST'
def test_web_login_no_post(self):
with self.mock_assets() as assets: assets['request'].httprequest.method = 'GET' assets['request'].session.authenticate.side_effect = EndTestException res = self.password_security_home.web_login() self.assertEqual(assets['web_login'](), res)
'It should attempt authentication to obtain uid'
def test_web_login_authenticate(self):
with self.mock_assets() as assets: assets['request'].httprequest.method = 'POST' authenticate = assets['request'].session.authenticate request = assets['request'] authenticate.side_effect = EndTestException with self.assertRaises(EndTestException): self.password_security_home.web_login() authenticate.assert_called_once_with(request.session.db, request.params['login'], request.params['password'])
'It should return super result if failed auth'
def test_web_login_authenticate_fail(self):
with self.mock_assets() as assets: authenticate = assets['request'].session.authenticate request = assets['request'] request.httprequest.method = 'POST' request.env['res.users'].sudo.side_effect = EndTestException authenticate.return_value = False res = self.password_security_home.web_login() self.assertEqual(assets['web_login'](), res)
'It should get the proper user as sudo'
def test_web_login_get_user(self):
with self.mock_assets() as assets: request = assets['request'] request.httprequest.method = 'POST' sudo = request.env['res.users'].sudo() sudo.browse.side_effect = EndTestException with self.assertRaises(EndTestException): self.password_security_home.web_login() sudo.browse.assert_called_once_with(request.uid)
'It should return parent result if pass isn\'t expired'
def test_web_login_valid_pass(self):
with self.mock_assets() as assets: request = assets['request'] request.httprequest.method = 'POST' user = request.env['res.users'].sudo().browse() user.action_expire_password.side_effect = EndTestException user._password_has_expired.return_value = False res = self.password_security_home.web_login() self.assertEqual(assets['web_login'](), res)
'It should expire password if necessary'
def test_web_login_expire_pass(self):
with self.mock_assets() as assets: request = assets['request'] request.httprequest.method = 'POST' user = request.env['res.users'].sudo().browse() user.action_expire_password.side_effect = EndTestException user._password_has_expired.return_value = True with self.assertRaises(EndTestException): self.password_security_home.web_login()
'It should redirect w/ hash to reset after expiration'
def test_web_login_redirect(self):
with self.mock_assets() as assets: request = assets['request'] request.httprequest.method = 'POST' user = request.env['res.users'].sudo().browse() user._password_has_expired.return_value = True res = self.password_security_home.web_login() self.assertEqual(assets['http'].redirect_with_hash(), res)
'It should return super if no errors'
def test_web_auth_signup_valid(self):
with self.mock_assets() as assets: res = self.password_security_home.web_auth_signup() self.assertEqual(assets['web_auth_signup'](), res)
'It should catch PassError and get signup qcontext'
def test_web_auth_signup_invalid_qcontext(self):
with self.mock_assets() as assets: with mock.patch.object(main.AuthSignupHome, 'get_auth_signup_qcontext') as qcontext: assets['web_auth_signup'].side_effect = MockPassError qcontext.side_effect = EndTestException with self.assertRaises(EndTestException): self.password_security_home.web_auth_signup()
'It should render & return signup form on invalid'
def test_web_auth_signup_invalid_render(self):
with self.mock_assets() as assets: with mock.patch.object(main.AuthSignupHome, 'get_auth_signup_qcontext', spec=dict) as qcontext: assets['web_auth_signup'].side_effect = MockPassError res = self.password_security_home.web_auth_signup() assets['request'].render.assert_called_once_with('auth_signup.signup', qcontext()) self.assertEqual(assets['request'].render(), res)
'It should raise from failed _validate_pass_reset by login'
def test_web_auth_reset_password_fail_login(self):
with self.mock_assets() as assets: with mock.patch.object(main.AuthSignupHome, 'get_auth_signup_qcontext', spec=dict) as qcontext: qcontext['login'] = 'login' search = assets['request'].env.sudo().search assets['request'].httprequest.method = 'POST' user = mock.MagicMock() user._validate_pass_reset.side_effect = MockPassError search.return_value = user with self.assertRaises(MockPassError): self.password_security_home.web_auth_reset_password()
'It should raise from failed _validate_pass_reset by email'
def test_web_auth_reset_password_fail_email(self):
with self.mock_assets() as assets: with mock.patch.object(main.AuthSignupHome, 'get_auth_signup_qcontext', spec=dict) as qcontext: qcontext['login'] = 'login' search = assets['request'].env.sudo().search assets['request'].httprequest.method = 'POST' user = mock.MagicMock() user._validate_pass_reset.side_effect = MockPassError search.side_effect = [[], user] with self.assertRaises(MockPassError): self.password_security_home.web_auth_reset_password()
'It should return parent response on no validate errors'
def test_web_auth_reset_password_success(self):
with self.mock_assets() as assets: with mock.patch.object(main.AuthSignupHome, 'get_auth_signup_qcontext', spec=dict) as qcontext: qcontext['login'] = 'login' assets['request'].httprequest.method = 'POST' res = self.password_security_home.web_auth_reset_password() self.assertEqual(assets['web_auth_reset_password'](), res)
'It provides hook to disallow front-facing resets inside of min Unfortuantely had to reimplement some core logic here because of nested logic in parent'
@http.route() def web_auth_reset_password(self, *args, **kw):
qcontext = self.get_auth_signup_qcontext() if ((request.httprequest.method == 'POST') and qcontext.get('login') and ('error' not in qcontext) and ('token' not in qcontext)): login = qcontext.get('login') user_ids = request.env.sudo().search([('login', '=', login)], limit=1) if (not user_ids): user_ids = request.env.sudo().search([('email', '=', login)], limit=1) user_ids._validate_pass_reset() return super(PasswordSecurityHome, self).web_auth_reset_password(*args, **kw)
'Prepopulate the user table from one or more LDAP resources. Obviously, the option to create users must be toggled in the LDAP configuration. Return the number of users created (as far as we can tell).'
@api.multi def action_populate(self):
logger = logging.getLogger('orm.ldap') logger.debug('action_populate called on res.company.ldap ids %s', self.ids) users_model = self.env['res.users'] users_count_before = users_model.search_count([]) (deactivate_unknown, known_user_ids) = self._check_users() if deactivate_unknown: logger.debug('will deactivate unknown users') for conf in self.get_ldap_dicts(): if (not conf['create_user']): continue attribute_match = re.search('([a-zA-Z_]+)=\\%s', conf['ldap_filter']) if attribute_match: login_attr = attribute_match.group(1) else: raise UserError((_('No login attribute found: Could not extract login attribute from filter %s') % conf['ldap_filter'])) results = self.get_ldap_entry_dicts(conf) for result in results: user_id = self.get_or_create_user(conf, result[1][login_attr][0], result) if (not user_id): deactivate_unknown = False known_user_ids.append(user_id) users_created = (users_model.search_count([]) - users_count_before) deactivated_users_count = 0 if deactivate_unknown: deactivated_users_count = self.do_deactivate_unknown_users(known_user_ids) logger.debug('%d users created', users_created) logger.debug('%d users deactivated', deactivated_users_count) return (users_created, deactivated_users_count)
'Execute ldap query as defined in conf. Don\'t call self.query because it supresses possible exceptions'
def get_ldap_entry_dicts(self, conf, user_name='*'):
ldap_filter = filter_format((conf['ldap_filter'] % user_name), ()) conn = self.connect(conf) conn.simple_bind_s((conf['ldap_binddn'] or ''), (conf['ldap_password'] or '')) results = conn.search_st(conf['ldap_base'], ldap.SCOPE_SUBTREE, ldap_filter.encode('utf8'), None, timeout=60) conn.unbind() return results
'Deactivate users not found in last populate run.'
def do_deactivate_unknown_users(self, known_user_ids):
unknown_user_ids = [] users = self.env['res.users'].search([('id', 'not in', known_user_ids)]) for unknown_user in users: present_in_ldap = False for conf in self.get_ldap_dicts(): present_in_ldap |= bool(self.get_ldap_entry_dicts(conf, user_name=unknown_user.login)) if (not present_in_ldap): unknown_user.active = False unknown_user_ids.append(unknown_user.id) return len(unknown_user_ids)
'GUI wrapper for the populate method that reports back the number of users created.'
@api.multi def populate_wizard(self):
if (not self): return wizard_obj = self.env['res.company.ldap.populate_wizard'] res_id = wizard_obj.create({'ldap_id': self.id}).id return {'name': wizard_obj._description, 'view_type': 'form', 'view_mode': 'form', 'res_model': wizard_obj._name, 'domain': [], 'context': self.env.context, 'type': 'ir.actions.act_window', 'target': 'new', 'res_id': res_id, 'nodestroy': True}
'It should close the connection'
def test_connection_close_mssql(self):
connection = mock.MagicMock() res = self.dbsource.connection_close_mssql(connection) self.assertEqual(res, connection.close())
'It should call SQLAlchemy open'
def test_connection_open_mssql(self):
with mock.patch.object(self.dbsource, '_connection_open_sqlalchemy') as parent_method: self.dbsource.connection_open_mssql() parent_method.assert_called_once_with()
'It should pass args to SQLAlchemy execute'
def test_excecute_mssql(self):
expect = ('sqlquery', 'sqlparams', 'metadata') with mock.patch.object(self.dbsource, '_execute_sqlalchemy') as parent_method: self.dbsource.execute_mssql(*expect) parent_method.assert_called_once_with(*expect)
'It should close the connection'
def test_connection_close_fdb(self):
connection = mock.MagicMock() res = self.dbsource.connection_close_fdb(connection) self.assertEqual(res, connection.close())
'It should open the connection with the split conn string'
@mock.patch(ADAPTER) def test_connection_open_fdb(self, fdb):
self.dbsource.conn_string = 'User=User;' self.dbsource.connection_open_fdb() fdb.connect.assert_called_once_with(**{'user': 'User', 'password': 'password'})
'It should return the newly opened connection'
@mock.patch(ADAPTER) def test_connection_open_fdb_return(self, fdb):
res = self.dbsource.connection_open_fdb() self.assertEqual(res, fdb.connect())
'It should preserve checksum_installed when cancelling upgrades'
def test_upgrade_module_cancel(self):
self.own_module.write({'state': 'to upgrade'}) self.own_module.checksum_installed = 'test' self.env['base.module.upgrade'].upgrade_module_cancel() self.assertEqual(self.own_module.checksum_installed, 'test', 'Upgrade cancellation does not preserve checksum_installed')
'It should call update_list method on ir.module.module'
@mock.patch.object(Registry, 'new') def test_upgrade_module(self, new_mock):
update_list_mock = mock.MagicMock() self.env['ir.module.module']._patch_method('update_list', update_list_mock) self.env['base.module.upgrade'].upgrade_module() update_list_mock.assert_called_once_with() self.env['ir.module.module']._revert_method('update_list')
'It should compute the directory\'s SHA-1 hash'
def test_compute_checksum_dir(self):
self.assertEqual(self.own_module.checksum_dir, self.own_checksum, 'Module directory checksum not computed properly')
'It should exclude .pyc/.pyo extensions from checksum calculations'
def test_compute_checksum_dir_ignore_excluded(self):
with tempfile.NamedTemporaryFile(suffix='.pyc', dir=self.own_dir_path): self.assertEqual(self.own_module.checksum_dir, self.own_checksum, 'SHA1 checksum does not ignore excluded extensions')
'It should return a different value when a non-.pyc/.pyo file is added to the module directory'
def test_compute_checksum_dir_recomputes_when_file_added(self):
with tempfile.NamedTemporaryFile(suffix='.py', dir=self.own_dir_path): self.assertNotEqual(self.own_module.checksum_dir, self.own_checksum, 'SHA1 checksum not recomputed')
'It should set the module\'s checksum_installed equal to checksum_dir when vals contain state \'installed\''
def test_store_checksum_installed_state_installed(self):
self.own_module.checksum_installed = 'test' self.own_module._store_checksum_installed({'state': 'installed'}) self.assertEqual(self.own_module.checksum_installed, self.own_module.checksum_dir, 'Setting state to installed does not store checksum_dir as checksum_installed')
'It should clear the module\'s checksum_installed when vals contain state \'uninstalled\''
def test_store_checksum_installed_state_uninstalled(self):
self.own_module.checksum_installed = 'test' self.own_module._store_checksum_installed({'state': 'uninstalled'}) self.assertEqual(self.own_module.checksum_installed, False, 'Setting state to uninstalled does not clear checksum_installed')
'It should not set checksum_installed to False or checksum_dir when a checksum_installed is included in vals'
def test_store_checksum_installed_vals_contain_checksum_installed(self):
self.own_module.checksum_installed = 'test' self.own_module._store_checksum_installed({'state': 'installed', 'checksum_installed': 'test'}) self.assertEqual(self.own_module.checksum_installed, 'test', 'Providing checksum_installed in vals did not prevent overwrite')
'It should not set checksum_installed to False or checksum_dir when self has context retain_checksum_installed=True'
def test_store_checksum_installed_with_retain_context(self):
self.own_module.checksum_installed = 'test' self.own_module.with_context(retain_checksum_installed=True)._store_checksum_installed({'state': 'installed'}) self.assertEqual(self.own_module.checksum_installed, 'test', 'Providing retain_checksum_installed context did not prevent overwrite')
'It should preserve checksum_installed when cancelling uninstall'
def test_button_uninstall_cancel(self):
self.own_module.write({'state': 'to remove'}) self.own_module.checksum_installed = 'test' self.own_module.button_uninstall_cancel() self.assertEqual(self.own_module.checksum_installed, 'test', 'Uninstall cancellation does not preserve checksum_installed')
'It should preserve checksum_installed when cancelling upgrades'
def test_button_upgrade_cancel(self):
self.own_module.write({'state': 'to upgrade'}) self.own_module.checksum_installed = 'test' self.own_module.button_upgrade_cancel() self.assertEqual(self.own_module.checksum_installed, 'test', 'Upgrade cancellation does not preserve checksum_installed')
'It should call _store_checksum_installed method'
def test_create(self):
_store_checksum_installed_mock = mock.MagicMock() self.env['ir.module.module']._patch_method('_store_checksum_installed', _store_checksum_installed_mock) vals = {'name': 'module_auto_update_test_module', 'state': 'installed'} self.create_test_module(vals) _store_checksum_installed_mock.assert_called_once_with(vals) self.env['ir.module.module']._revert_method('_store_checksum_installed')
'It should change the state of modules with different checksum_dir and checksum_installed to \'to upgrade\''
@mock.patch(('%s.get_module_path' % model)) def test_update_list(self, get_module_path_mock):
get_module_path_mock.return_value = self.own_dir_path vals = {'name': 'module_auto_update_test_module', 'state': 'installed'} test_module = self.create_test_module(vals) test_module.checksum_installed = 'test' self.env['ir.module.module'].update_list() self.assertEqual(test_module.state, 'to upgrade', 'List update does not mark upgradeable modules "to upgrade"')
'It should not change the state of a module with a former state other than \'installed\' to \'to upgrade\''
def test_update_list_only_changes_installed(self):
vals = {'name': 'module_auto_update_test_module', 'state': 'uninstalled'} test_module = self.create_test_module(vals) self.env['ir.module.module'].update_list() self.assertNotEqual(test_module.state, 'to upgrade', 'List update changed state of an uninstalled module')
'It should call _store_checksum_installed method'
def test_write(self):
_store_checksum_installed_mock = mock.MagicMock() self.env['ir.module.module']._patch_method('_store_checksum_installed', _store_checksum_installed_mock) vals = {'state': 'installed'} self.own_module.write(vals) _store_checksum_installed_mock.assert_called_once_with(vals) self.env['ir.module.module']._revert_method('_store_checksum_installed')
'It should set checksum_installed equal to checksum_dir for all installed modules'
def test_post_init_hook(self):
installed_modules = self.env['ir.module.module'].search([('state', '=', 'installed')]) post_init_hook(self.env.cr, None) self.assertListEqual(installed_modules.mapped('checksum_dir'), installed_modules.mapped('checksum_installed'), 'Installed modules did not have checksum_installed stored')
'Set up a default URL.'
@api.model def _install_default_url(self):
conf = self.env['ir.config_parameter'] name = 'dead_mans_switch_client.url' param = conf.get_param(name) if (not param): url = '{}/dead_mans_switch/alive'.format(conf.get_param('report.url', conf.get_param('web.base.url', 'http://localhost'))) conf.set_param(name, url)
'It should close the connection'
def test_connection_close_mysql(self):
connection = mock.MagicMock() res = self.dbsource.connection_close_mysql(connection) self.assertEqual(res, connection.close())
'It should call SQLAlchemy open'
def test_connection_open_mysql(self):
with mock.patch.object(self.dbsource, '_connection_open_sqlalchemy') as parent_method: self.dbsource.connection_open_mysql() parent_method.assert_called_once_with()
'It should pass args to SQLAlchemy execute'
def test_excecute_mysql(self):
expect = ('sqlquery', 'sqlparams', 'metadata') with mock.patch.object(self.dbsource, '_execute_sqlalchemy') as parent_method: self.dbsource.execute_mysql(*expect) parent_method.assert_called_once_with(*expect)
'Override write to verify that membership of the Threshold Manager group is not able to be set by users outside that group'
@api.multi def write(self, vals):
manager = self.env.ref(THRESHOLD_MANAGER, raise_if_not_found=False) if manager: is_manager = self.env.user.has_group(THRESHOLD_MANAGER) if ((not is_manager) and (manager in self)): raise AccessError(_('You must be a member of the `User Threshold Manager` group to grant access to it.')) return super(ResGroups, self).write(vals)
'Override to check if env var to hide threshold configuration and reset the database state is set. If it is, run those actions'
def __init__(self, pool, cr):
if THRESHOLD_HIDE: exempt_users_var = os.environ.get('USER_THRESHOLD_USER', '') exempt_users = reader([exempt_users_var]) with api.Environment.manage(): with registry(cr.dbname).cursor() as new_cr: new_env = api.Environment(new_cr, SUPERUSER_ID, {}) installed = new_env['ir.module.module'].search_count([('name', '=', 'user_threshold'), ('state', '=', 'installed')]) if installed: users = new_env['res.users'].search([('share', '=', False), ('threshold_exempt', '=', True)]) non_ex = users.filtered((lambda r: (r.login not in exempt_users))) for user in non_ex: user.threshold_exempt = False new_cr.commit()
'Check to see if any user thresholds are met Returns: False when the thresholds aren\'t met and True when they are'
def _check_thresholds(self):
domain = [('threshold_exempt', '=', False), ('share', '=', False)] users = self.env['res.users'].search(domain) max_db_users = int(self.env['ir.config_parameter'].get_param(MAX_DB_USER_PARAM)) if ((max_db_users > 0) and (len(users) >= max_db_users)): return True company = self.env.user.company_id company_users = users.filtered((lambda r: (r.company_id.id == company.id))) if ((company.max_users > 0) and (len(company_users) >= company.max_users)): return True return False
'Override method to make sure the Thresholds aren\'t met before creating a new user'
@api.multi def copy(self, default=None):
if self._check_thresholds(): raise ValidationError(_('Cannot add user - Maximum number of allowed users reached')) return super(ResUsers, self).copy(default=default)
'Override method to make sure the Thresholds aren\'t met before creating a new user'
@api.multi def create(self, vals):
if self._check_thresholds(): raise ValidationError(_('Cannot add user - Maximum number of allowed users reached')) return super(ResUsers, self).create(vals)